mirror of
https://github.com/drakkan/sftpgo.git
synced 2024-11-25 00:50:31 +00:00
move IP/Network lists to the data provider
this is a backward incompatible change, all previous file based IP/network lists will not work anymore Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
This commit is contained in:
parent
2412a0a369
commit
1b1745b7f7
103 changed files with 4958 additions and 1284 deletions
12
.github/workflows/development.yml
vendored
12
.github/workflows/development.yml
vendored
|
@ -11,11 +11,11 @@ jobs:
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go: [1.19]
|
go: ['1.20']
|
||||||
os: [ubuntu-latest, macos-latest]
|
os: [ubuntu-latest, macos-latest]
|
||||||
upload-coverage: [true]
|
upload-coverage: [true]
|
||||||
include:
|
include:
|
||||||
- go: 1.19
|
- go: '1.20'
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
upload-coverage: false
|
upload-coverage: false
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: 1.19
|
go-version: '1.20'
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
|
@ -252,7 +252,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: 1.19
|
go-version: '1.20'
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
|
@ -326,7 +326,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: 1.19
|
go-version: '1.20'
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
|
@ -546,7 +546,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: 1.19
|
go-version: '1.20'
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Run golangci-lint
|
- name: Run golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v3
|
uses: golangci/golangci-lint-action@v3
|
||||||
|
|
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
|
@ -160,7 +160,7 @@ jobs:
|
||||||
if: ${{ github.event_name != 'pull_request' }}
|
if: ${{ github.event_name != 'pull_request' }}
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
builder: ${{ steps.builder.outputs.name }}
|
builder: ${{ steps.builder.outputs.name }}
|
||||||
|
|
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
|
@ -5,7 +5,7 @@ on:
|
||||||
tags: 'v*'
|
tags: 'v*'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
GO_VERSION: 1.19.4
|
GO_VERSION: 1.20.0
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
prepare-sources-with-deps:
|
prepare-sources-with-deps:
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.19-bullseye as builder
|
FROM golang:1.20-bullseye as builder
|
||||||
|
|
||||||
ENV GOFLAGS="-mod=readonly"
|
ENV GOFLAGS="-mod=readonly"
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.19-alpine3.17 AS builder
|
FROM golang:1.20-alpine3.17 AS builder
|
||||||
|
|
||||||
ENV GOFLAGS="-mod=readonly"
|
ENV GOFLAGS="-mod=readonly"
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.19-bullseye as builder
|
FROM golang:1.20-bullseye as builder
|
||||||
|
|
||||||
ENV CGO_ENABLED=0 GOFLAGS="-mod=readonly"
|
ENV CGO_ENABLED=0 GOFLAGS="-mod=readonly"
|
||||||
|
|
||||||
|
|
30
README.md
30
README.md
|
@ -34,7 +34,7 @@ More [info](https://github.com/drakkan/sftpgo/issues/452).
|
||||||
|
|
||||||
#### Silver sponsors
|
#### Silver sponsors
|
||||||
|
|
||||||
[<img src="./img/Dendi_logo.png" alt="Dendi logo" width="202" height="63">](https://dendisoftware.com/)
|
[<img src="./img/Dendi_logo.png" alt="Dendi logo" width="212" height="66">](https://dendisoftware.com/)
|
||||||
|
|
||||||
#### Bronze sponsors
|
#### Bronze sponsors
|
||||||
|
|
||||||
|
@ -51,12 +51,12 @@ If you report an invalid issue or ask for step-by-step support, your issue will
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- Support for serving local filesystem, encrypted local filesystem, S3 Compatible Object Storage, Google Cloud Storage, Azure Blob Storage or other SFTP accounts over SFTP/SCP/FTP/WebDAV.
|
- Support for serving local filesystem, encrypted local filesystem, S3 Compatible Object Storage, Google Cloud Storage, Azure Blob Storage or other SFTP accounts over SFTP/SCP/FTP/WebDAV.
|
||||||
- Virtual folders are supported: a virtual folder can use any of the supported storage backends. So you can have, for example, an S3 user that exposes a GCS bucket (or part of it) on a specified path and an encrypted local filesystem on another one. Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.
|
- Virtual folders are supported: a virtual folder can use any of the supported storage backends. So you can have, for example, a user with the S3 backend mapping a GCS bucket (or part of it) on a specified path and an encrypted local filesystem on another one. Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.
|
||||||
- Configurable [custom commands and/or HTTP hooks](./docs/custom-actions.md) on upload, pre-upload, download, pre-download, delete, pre-delete, rename, mkdir, rmdir on SSH commands and on user add, update and delete.
|
- Configurable [custom commands and/or HTTP hooks](./docs/custom-actions.md) on upload, pre-upload, download, pre-download, delete, pre-delete, rename, mkdir, rmdir on SSH commands and on user add, update and delete.
|
||||||
- Virtual accounts stored within a "data provider".
|
- Virtual accounts stored within a "data provider".
|
||||||
- SQLite, MySQL, PostgreSQL, CockroachDB, Bolt (key/value store in pure Go) and in-memory data providers are supported.
|
- SQLite, MySQL, PostgreSQL, CockroachDB, Bolt (key/value store in pure Go) and in-memory data providers are supported.
|
||||||
- Chroot isolation for local accounts. Cloud-based accounts can be restricted to a certain base path.
|
- Chroot isolation for local accounts. Cloud-based accounts can be restricted to a certain base path.
|
||||||
- Per-user and per-directory virtual permissions, for each exposed path you can allow or deny: directory listing, upload, overwrite, download, delete, rename, create directories, create symlinks, change owner/group/file mode and modification time.
|
- Per-user and per-directory virtual permissions, for each path you can allow or deny: directory listing, upload, overwrite, download, delete, rename, create directories, create symlinks, change owner/group/file mode and modification time.
|
||||||
- [REST API](./docs/rest-api.md) for users and folders management, data retention, backup, restore and real time reports of the active connections with possibility of forcibly closing a connection.
|
- [REST API](./docs/rest-api.md) for users and folders management, data retention, backup, restore and real time reports of the active connections with possibility of forcibly closing a connection.
|
||||||
- The [Event Manager](./docs/eventmanager.md) allows to define custom workflows based on server events or schedules.
|
- The [Event Manager](./docs/eventmanager.md) allows to define custom workflows based on server events or schedules.
|
||||||
- [Web based administration interface](./docs/web-admin.md) to easily manage users, folders and connections.
|
- [Web based administration interface](./docs/web-admin.md) to easily manage users, folders and connections.
|
||||||
|
@ -92,7 +92,7 @@ If you report an invalid issue or ask for step-by-step support, your issue will
|
||||||
- ACME protocol is supported. SFTPGo can obtain and automatically renew TLS certificates for HTTPS, WebDAV and FTPS from `Let's Encrypt` or other ACME compliant certificate authorities, using the the `HTTP-01` or `TLS-ALPN-01` [challenge types](https://letsencrypt.org/docs/challenge-types/).
|
- ACME protocol is supported. SFTPGo can obtain and automatically renew TLS certificates for HTTPS, WebDAV and FTPS from `Let's Encrypt` or other ACME compliant certificate authorities, using the the `HTTP-01` or `TLS-ALPN-01` [challenge types](https://letsencrypt.org/docs/challenge-types/).
|
||||||
- Two-Way TLS authentication, aka TLS with client certificate authentication, is supported for REST API/Web Admin, FTPS and WebDAV over HTTPS.
|
- Two-Way TLS authentication, aka TLS with client certificate authentication, is supported for REST API/Web Admin, FTPS and WebDAV over HTTPS.
|
||||||
- Per-user protocols restrictions. You can configure the allowed protocols (SSH/HTTP/FTP/WebDAV) for each user.
|
- Per-user protocols restrictions. You can configure the allowed protocols (SSH/HTTP/FTP/WebDAV) for each user.
|
||||||
- [Prometheus metrics](./docs/metrics.md) are exposed.
|
- [Prometheus metrics](./docs/metrics.md) are supported.
|
||||||
- Support for HAProxy PROXY protocol: you can proxy and/or load balance the SFTP/SCP/FTP service without losing the information about the client's address.
|
- Support for HAProxy PROXY protocol: you can proxy and/or load balance the SFTP/SCP/FTP service without losing the information about the client's address.
|
||||||
- Easy [migration](./examples/convertusers) from Linux system user accounts.
|
- Easy [migration](./examples/convertusers) from Linux system user accounts.
|
||||||
- [Portable mode](./docs/portable-mode.md): a convenient way to share a single directory on demand.
|
- [Portable mode](./docs/portable-mode.md): a convenient way to share a single directory on demand.
|
||||||
|
@ -222,12 +222,12 @@ To start using SFTPGo you need to create an admin user, you can do it in several
|
||||||
SFTPGo supports upgrading from the previous release branch to the current one.
|
SFTPGo supports upgrading from the previous release branch to the current one.
|
||||||
Some examples for supported upgrade paths are:
|
Some examples for supported upgrade paths are:
|
||||||
|
|
||||||
- from 1.2.x to 2.0.x
|
- from 2.1.x to 2.2.x
|
||||||
- from 2.0.x to 2.1.x and so on.
|
- from 2.2.x to 2.3.x and so on.
|
||||||
|
|
||||||
For supported upgrade paths, the data and schema are migrated automatically, alternately you can use the `initprovider` command.
|
For supported upgrade paths, the data and schema are migrated automatically when SFTPGo starts, alternatively you can use the `initprovider` command before starting SFTPGo.
|
||||||
|
|
||||||
So if, for example, you want to upgrade from a version before 1.2.x to 2.0.x, you must first install version 1.2.x, update the data provider and finally install the version 2.0.x. It is recommended to always install the latest available minor version, ie do not install 1.2.0 if 1.2.2 is available.
|
So if, for example, you want to upgrade from 2.0.x to 2.2.x, you must first install version 2.1.x, update the data provider (automatically, by starting SFTPGo or manually using the `initprovider` command) and finally install the version 2.2.x. It is recommended to always install the latest available minor version, ie do not install 2.1.0 if 2.1.2 is available.
|
||||||
|
|
||||||
Loading data from a provider independent JSON dump is supported from the previous release branch to the current one too. After upgrading SFTPGo it is advisable to regenerate the JSON dump from the new version.
|
Loading data from a provider independent JSON dump is supported from the previous release branch to the current one too. After upgrading SFTPGo it is advisable to regenerate the JSON dump from the new version.
|
||||||
|
|
||||||
|
@ -237,13 +237,13 @@ If for some reason you want to downgrade SFTPGo, you may need to downgrade your
|
||||||
|
|
||||||
As for upgrading, SFTPGo supports downgrading from the previous release branch to the current one.
|
As for upgrading, SFTPGo supports downgrading from the previous release branch to the current one.
|
||||||
|
|
||||||
So, if you plan to downgrade from 2.0.x to 1.2.x, before uninstalling 2.0.x version, you can prepare your data provider executing the following command from the configuration directory:
|
So, if you plan to downgrade from 2.3.x to 2.2.x, before uninstalling 2.3.x version, you can prepare your data provider executing the following command from the configuration directory:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
sftpgo revertprovider --to-version 4
|
sftpgo revertprovider
|
||||||
```
|
```
|
||||||
|
|
||||||
Take a look at the CLI usage to see the supported parameter for the `--to-version` argument and to learn how to specify a different configuration file:
|
Take a look at the CLI usage to learn how to specify a configuration file:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
sftpgo revertprovider --help
|
sftpgo revertprovider --help
|
||||||
|
@ -253,11 +253,11 @@ The `revertprovider` command is not supported for the memory provider.
|
||||||
|
|
||||||
Please note that we only support the current release branch and the current main branch, if you find a bug it is better to report it rather than downgrading to an older unsupported version.
|
Please note that we only support the current release branch and the current main branch, if you find a bug it is better to report it rather than downgrading to an older unsupported version.
|
||||||
|
|
||||||
## Users, groups and folders management
|
## Users, groups, folders and other resource management
|
||||||
|
|
||||||
After starting SFTPGo you can manage users, groups, folders and other resources using:
|
After starting SFTPGo you can manage users, groups, folders and other resources using:
|
||||||
|
|
||||||
- the [web based administration interface](./docs/web-admin.md)
|
- the [WebAdmin UI](./docs/web-admin.md)
|
||||||
- the [REST API](./docs/rest-api.md)
|
- the [REST API](./docs/rest-api.md)
|
||||||
|
|
||||||
To support embedded data providers like `bolt` and `SQLite`, which do not support concurrent connections, we can't have a CLI that directly write users and other resources to the data provider, we always have to use the REST API.
|
To support embedded data providers like `bolt` and `SQLite`, which do not support concurrent connections, we can't have a CLI that directly write users and other resources to the data provider, we always have to use the REST API.
|
||||||
|
@ -299,7 +299,7 @@ More information about custom actions can be found [here](./docs/custom-actions.
|
||||||
|
|
||||||
## Virtual folders
|
## Virtual folders
|
||||||
|
|
||||||
Directories outside the user home directory or based on a different storage provider can be exposed as virtual folders, more information [here](./docs/virtual-folders.md).
|
Directories outside the user home directory or based on a different storage provider can be mapped as virtual folders, more information [here](./docs/virtual-folders.md).
|
||||||
|
|
||||||
## Other hooks
|
## Other hooks
|
||||||
|
|
||||||
|
@ -310,7 +310,7 @@ You can use your own hook to [check passwords](./docs/check-password-hook.md).
|
||||||
|
|
||||||
### S3/GCP/Azure
|
### S3/GCP/Azure
|
||||||
|
|
||||||
Each user can be mapped with a [S3 Compatible Object Storage](./docs/s3.md) /[Google Cloud Storage](./docs/google-cloud-storage.md)/[Azure Blob Storage](./docs/azure-blob-storage.md) bucket or a bucket virtual folder that is exposed over SFTP/SCP/FTP/WebDAV.
|
Each user can be mapped with a [S3 Compatible Object Storage](./docs/s3.md) /[Google Cloud Storage](./docs/google-cloud-storage.md)/[Azure Blob Storage](./docs/azure-blob-storage.md) bucket or a bucket virtual folder.
|
||||||
|
|
||||||
### SFTP backend
|
### SFTP backend
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,8 @@
|
||||||
功能齐全、高度可配置化、支持自定义 HTTP/S,FTP/S 和 WebDAV 的 SFTP 服务。
|
功能齐全、高度可配置化、支持自定义 HTTP/S,FTP/S 和 WebDAV 的 SFTP 服务。
|
||||||
一些存储后端支持:本地文件系统、加密本地文件系统、S3(兼容)对象存储,Google Cloud 存储,Azure Blob 存储,SFTP。
|
一些存储后端支持:本地文件系统、加密本地文件系统、S3(兼容)对象存储,Google Cloud 存储,Azure Blob 存储,SFTP。
|
||||||
|
|
||||||
|
:warning: 我無法自己維護中文翻譯,這個文檔可能已經過時了
|
||||||
|
|
||||||
## 赞助商
|
## 赞助商
|
||||||
|
|
||||||
如果你觉得 SFTPGo 有用,请考虑支持这个开源项目。
|
如果你觉得 SFTPGo 有用,请考虑支持这个开源项目。
|
||||||
|
@ -32,6 +34,10 @@
|
||||||
|
|
||||||
[<img src="./img/Aledade_logo.png" alt="Aledade logo" width="202" height="70">](https://www.aledade.com/)
|
[<img src="./img/Aledade_logo.png" alt="Aledade logo" width="202" height="70">](https://www.aledade.com/)
|
||||||
|
|
||||||
|
#### 銀牌贊助商
|
||||||
|
|
||||||
|
[<img src="./img/Dendi_logo.png" alt="Dendi logo" width="212" height="66">](https://dendisoftware.com/)
|
||||||
|
|
||||||
#### 铜牌赞助商
|
#### 铜牌赞助商
|
||||||
|
|
||||||
[<img src="https://www.7digital.com/wp-content/themes/sevendigital/images/top_logo.png" alt="7digital logo">](https://www.7digital.com/)
|
[<img src="https://www.7digital.com/wp-content/themes/sevendigital/images/top_logo.png" alt="7digital logo">](https://www.7digital.com/)
|
||||||
|
|
|
@ -4,12 +4,12 @@ SFTPGo provides an official Docker image, it is available on both [Docker Hub](h
|
||||||
|
|
||||||
## Supported tags and respective Dockerfile links
|
## Supported tags and respective Dockerfile links
|
||||||
|
|
||||||
- [v2.4.3, v2.4, v2, latest](https://github.com/drakkan/sftpgo/blob/v2.4.3/Dockerfile)
|
- [v2.4.4, v2.4, v2, latest](https://github.com/drakkan/sftpgo/blob/v2.4.4/Dockerfile)
|
||||||
- [v2.4.3-plugins, v2.4-plugins, v2-plugins, plugins](https://github.com/drakkan/sftpgo/blob/v2.4.3/Dockerfile)
|
- [v2.4.4-plugins, v2.4-plugins, v2-plugins, plugins](https://github.com/drakkan/sftpgo/blob/v2.4.4/Dockerfile)
|
||||||
- [v2.4.3-alpine, v2.4-alpine, v2-alpine, alpine](https://github.com/drakkan/sftpgo/blob/v2.4.3/Dockerfile.alpine)
|
- [v2.4.4-alpine, v2.4-alpine, v2-alpine, alpine](https://github.com/drakkan/sftpgo/blob/v2.4.4/Dockerfile.alpine)
|
||||||
- [v2.4.3-slim, v2.4-slim, v2-slim, slim](https://github.com/drakkan/sftpgo/blob/v2.4.3/Dockerfile)
|
- [v2.4.4-slim, v2.4-slim, v2-slim, slim](https://github.com/drakkan/sftpgo/blob/v2.4.4/Dockerfile)
|
||||||
- [v2.4.3-alpine-slim, v2.4-alpine-slim, v2-alpine-slim, alpine-slim](https://github.com/drakkan/sftpgo/blob/v2.4.3/Dockerfile.alpine)
|
- [v2.4.4-alpine-slim, v2.4-alpine-slim, v2-alpine-slim, alpine-slim](https://github.com/drakkan/sftpgo/blob/v2.4.4/Dockerfile.alpine)
|
||||||
- [v2.4.3-distroless-slim, v2.4-distroless-slim, v2-distroless-slim, distroless-slim](https://github.com/drakkan/sftpgo/blob/v2.4.3/Dockerfile.distroless)
|
- [v2.4.4-distroless-slim, v2.4-distroless-slim, v2-distroless-slim, distroless-slim](https://github.com/drakkan/sftpgo/blob/v2.4.4/Dockerfile.distroless)
|
||||||
- [edge](../Dockerfile)
|
- [edge](../Dockerfile)
|
||||||
- [edge-plugins](../Dockerfile)
|
- [edge-plugins](../Dockerfile)
|
||||||
- [edge-alpine](../Dockerfile.alpine)
|
- [edge-alpine](../Dockerfile.alpine)
|
||||||
|
@ -58,7 +58,7 @@ The FTP service is now available on port 2121 and SFTP on port 2022.
|
||||||
|
|
||||||
You can change the passive ports range (`50000-50100` by default) by setting the environment variables `SFTPGO_FTPD__PASSIVE_PORT_RANGE__START` and `SFTPGO_FTPD__PASSIVE_PORT_RANGE__END`.
|
You can change the passive ports range (`50000-50100` by default) by setting the environment variables `SFTPGO_FTPD__PASSIVE_PORT_RANGE__START` and `SFTPGO_FTPD__PASSIVE_PORT_RANGE__END`.
|
||||||
|
|
||||||
It is recommended that you provide a certificate and key file to expose FTP over TLS. You should prefer SFTP to FTP even if you configure TLS, please don't blindly enable the old FTP protocol.
|
It is recommended that you provide a certificate and key file to enable FTP over TLS. You should prefer SFTP to FTP even if you configure TLS, please don't blindly enable the old FTP protocol.
|
||||||
|
|
||||||
### Enable WebDAV service
|
### Enable WebDAV service
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ docker run --name some-sftpgo \
|
||||||
|
|
||||||
The WebDAV service is now available on port 10080 and SFTP on port 2022.
|
The WebDAV service is now available on port 10080 and SFTP on port 2022.
|
||||||
|
|
||||||
It is recommended that you provide a certificate and key file to expose WebDAV over https.
|
It is recommended that you provide a certificate and key file to enable WebDAV over https.
|
||||||
|
|
||||||
### Container shell access and viewing SFTPGo logs
|
### Container shell access and viewing SFTPGo logs
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ Alternatively you can increase the default docker grace time to a value larger t
|
||||||
Important note: There are several ways to store data used by applications that run in Docker containers. We encourage users of the SFTPGo images to familiarize themselves with the options available, including:
|
Important note: There are several ways to store data used by applications that run in Docker containers. We encourage users of the SFTPGo images to familiarize themselves with the options available, including:
|
||||||
|
|
||||||
- Let Docker manage the storage for SFTPGo data by [writing them to disk on the host system using its own internal volume management](https://docs.docker.com/engine/tutorials/dockervolumes/#adding-a-data-volume). This is the default and is easy and fairly transparent to the user. The downside is that the files may be hard to locate for tools and applications that run directly on the host system, i.e. outside containers.
|
- Let Docker manage the storage for SFTPGo data by [writing them to disk on the host system using its own internal volume management](https://docs.docker.com/engine/tutorials/dockervolumes/#adding-a-data-volume). This is the default and is easy and fairly transparent to the user. The downside is that the files may be hard to locate for tools and applications that run directly on the host system, i.e. outside containers.
|
||||||
- Create a data directory on the host system (outside the container) and [mount this to a directory visible from inside the container]((https://docs.docker.com/engine/tutorials/dockervolumes/#mount-a-host-directory-as-a-data-volume)). This places the SFTPGo files in a known location on the host system, and makes it easy for tools and applications on the host system to access the files. The downside is that the user needs to make sure that the directory exists, and that e.g. directory permissions and other security mechanisms on the host system are set up correctly. The SFTPGo image runs using `1000` as UID/GID by default.
|
- Create a data directory on the host system (outside the container) and [mount this to a directory visible from inside the container](https://docs.docker.com/engine/tutorials/dockervolumes/#mount-a-host-directory-as-a-data-volume). This places the SFTPGo files in a known location on the host system, and makes it easy for tools and applications on the host system to access the files. The downside is that the user needs to make sure that the directory exists, and that e.g. directory permissions and other security mechanisms on the host system are set up correctly. The SFTPGo image runs using `1000` as UID/GID by default.
|
||||||
|
|
||||||
The Docker documentation is a good starting point for understanding the different storage options and variations, and there are multiple blogs and forum postings that discuss and give advice in this area. We will simply show the basic procedure here for the latter option above:
|
The Docker documentation is a good starting point for understanding the different storage options and variations, and there are multiple blogs and forum postings that discuss and give advice in this area. We will simply show the basic procedure here for the latter option above:
|
||||||
|
|
||||||
|
|
|
@ -42,31 +42,4 @@ Using the REST API you can:
|
||||||
- list hosts within the defender's lists
|
- list hosts within the defender's lists
|
||||||
- remove hosts from the defender's lists
|
- remove hosts from the defender's lists
|
||||||
|
|
||||||
The `defender` can also load a permanent block list and/or a safe list of ip addresses/networks from a file:
|
The `defender` can also check permanent block and safe lists of IP addresses/networks. You can define these lists using the WebAdmin UI or the REST API. In multi-nodes setups, the list entries propagation between nodes may take some minutes.
|
||||||
|
|
||||||
- `safelist_file`, defines the path to a file containing a list of ip addresses and/or networks to never ban.
|
|
||||||
- `blocklist_file`, defines the path to a file containing a list of ip addresses and/or networks to always ban.
|
|
||||||
|
|
||||||
These list must be stored as JSON conforming to the following schema:
|
|
||||||
|
|
||||||
- `addresses`, list of strings. Each string must be a valid IPv4/IPv6 address.
|
|
||||||
- `networks`, list of strings. Each string must be a valid IPv4/IPv6 CIDR address.
|
|
||||||
|
|
||||||
Here is a small example:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"addresses":[
|
|
||||||
"192.0.2.1",
|
|
||||||
"2001:db8::68"
|
|
||||||
],
|
|
||||||
"networks":[
|
|
||||||
"192.0.3.0/24",
|
|
||||||
"2001:db8:1234::/48"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Small lists can also be set using the `safelist`/`blocklist` configuration parameters and or using environment variables. These lists will be merged with the ones specified via files, if any, so that you can set both.
|
|
||||||
|
|
||||||
These list will be always loaded in memory (even if you use the `provider` driver) for faster lookups. The REST API queries "live" data and not these lists.
|
|
||||||
|
|
|
@ -79,9 +79,9 @@ The configuration file contains the following sections:
|
||||||
- `post_connect_hook`, string. Absolute path to the command to execute or HTTP URL to notify. See [Post-connect hook](./post-connect-hook.md) for more details. Leave empty to disable
|
- `post_connect_hook`, string. Absolute path to the command to execute or HTTP URL to notify. See [Post-connect hook](./post-connect-hook.md) for more details. Leave empty to disable
|
||||||
- `post_disconnect_hook`, string. Absolute path to the command to execute or HTTP URL to notify. See [Post-disconnect hook](./post-disconnect-hook.md) for more details. Leave empty to disable
|
- `post_disconnect_hook`, string. Absolute path to the command to execute or HTTP URL to notify. See [Post-disconnect hook](./post-disconnect-hook.md) for more details. Leave empty to disable
|
||||||
- `data_retention_hook`, string. Absolute path to the command to execute or HTTP URL to notify. See [Data retention hook](./data-retention-hook.md) for more details. Leave empty to disable
|
- `data_retention_hook`, string. Absolute path to the command to execute or HTTP URL to notify. See [Data retention hook](./data-retention-hook.md) for more details. Leave empty to disable
|
||||||
- `max_total_connections`, integer. Maximum number of concurrent client connections. 0 means unlimited. Default: 0.
|
- `max_total_connections`, integer. Maximum number of concurrent client connections. 0 means unlimited. Default: `0`.
|
||||||
- `max_per_host_connections`, integer. Maximum number of concurrent client connections from the same host (IP). If the defender is enabled, exceeding this limit will generate `score_limit_exceeded` events and thus hosts that repeatedly exceed the max allowed connections can be automatically blocked. 0 means unlimited. Default: 20.
|
- `max_per_host_connections`, integer. Maximum number of concurrent client connections from the same host (IP). If the defender is enabled, exceeding this limit will generate `score_limit_exceeded` events and thus hosts that repeatedly exceed the max allowed connections can be automatically blocked. 0 means unlimited. Default: `20`.
|
||||||
- `whitelist_file`, string. Path to a file containing a list of IP addresses and/or networks to allow. Only the listed IPs/networks can access the configured services, all other client connections will be dropped before they even try to authenticate. The whitelist must be a JSON file with the same structure documented for the [defenders's list](./defender.md). The whitelist can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows. Default: "".
|
- `allowlist_status`, integer. Set to `1` to enable the allow list. The allow list can be populated using the WebAdmin or the REST API. If enabled, only the listed IPs/networks can access the configured services, all other client connections will be dropped before they even try to authenticate. Ensure to populate your allow list before enabling this setting. In multi-nodes setups, the list entries propagation between nodes may take some minutes. Default: `0`.
|
||||||
- `allow_self_connections`, integer. Allow users on this instance to use other users/virtual folders on this instance as storage backend. Enable this setting if you know what you are doing. Set to `1` to enable. Default: `0`.
|
- `allow_self_connections`, integer. Allow users on this instance to use other users/virtual folders on this instance as storage backend. Enable this setting if you know what you are doing. Set to `1` to enable. Default: `0`.
|
||||||
- `defender`, struct containing the defender configuration. See [Defender](./defender.md) for more details.
|
- `defender`, struct containing the defender configuration. See [Defender](./defender.md) for more details.
|
||||||
- `enabled`, boolean. Default `false`.
|
- `enabled`, boolean. Default `false`.
|
||||||
|
@ -96,17 +96,12 @@ The configuration file contains the following sections:
|
||||||
- `observation_time`, integer. Defines the time window, in minutes, for tracking client errors. A host is banned if it has exceeded the defined threshold during the last observation time minutes. Default: `30`.
|
- `observation_time`, integer. Defines the time window, in minutes, for tracking client errors. A host is banned if it has exceeded the defined threshold during the last observation time minutes. Default: `30`.
|
||||||
- `entries_soft_limit`, integer. Ignored for `provider` driver. Default: `100`.
|
- `entries_soft_limit`, integer. Ignored for `provider` driver. Default: `100`.
|
||||||
- `entries_hard_limit`, integer. The number of banned IPs and host scores kept in memory will vary between the soft and hard limit for `memory` driver. If you use the `provider` driver, this setting will limit the number of entries to return when you ask for the entire host list from the defender. Default: `150`.
|
- `entries_hard_limit`, integer. The number of banned IPs and host scores kept in memory will vary between the soft and hard limit for `memory` driver. If you use the `provider` driver, this setting will limit the number of entries to return when you ask for the entire host list from the defender. Default: `150`.
|
||||||
- `safelist_file`, string. Path to a file containing a list of ip addresses and/or networks to never ban.
|
|
||||||
- `blocklist_file`, string. Path to a file containing a list of ip addresses and/or networks to always ban. The lists can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows. An host that is already banned will not be automatically unbanned if you put it inside the safe list, you have to unban it using the REST API.
|
|
||||||
- `safelist`, list of IP addresses and/or IP ranges and/or networks to never ban. Invalid entries will be silently ignored. For large lists prefer `safelist_file`. `safelist` and `safelist_file` will be merged so that you can set both.
|
|
||||||
- `blocklist`, list of IP addresses and/or IP ranges and/or networks to always ban. Invalid entries will be silently ignored.. For large lists prefer `blocklist_file`. `blocklist` and `blocklist_file` will be merged so that you can set both.
|
|
||||||
- `rate_limiters`, list of structs containing the rate limiters configuration. Take a look [here](./rate-limiting.md) for more details. Each struct has the following fields:
|
- `rate_limiters`, list of structs containing the rate limiters configuration. Take a look [here](./rate-limiting.md) for more details. Each struct has the following fields:
|
||||||
- `average`, integer. Average defines the maximum rate allowed. 0 means disabled. Default: 0
|
- `average`, integer. Average defines the maximum rate allowed. 0 means disabled. Default: 0
|
||||||
- `period`, integer. Period defines the period as milliseconds. The rate is actually defined by dividing average by period Default: 1000 (1 second).
|
- `period`, integer. Period defines the period as milliseconds. The rate is actually defined by dividing average by period Default: 1000 (1 second).
|
||||||
- `burst`, integer. Burst defines the maximum number of requests allowed to go through in the same arbitrarily small period of time. Default: 1
|
- `burst`, integer. Burst defines the maximum number of requests allowed to go through in the same arbitrarily small period of time. Default: 1
|
||||||
- `type`, integer. 1 means a global rate limiter, independent from the source host. 2 means a per-ip rate limiter. Default: 2
|
- `type`, integer. 1 means a global rate limiter, independent from the source host. 2 means a per-ip rate limiter. Default: 2
|
||||||
- `protocols`, list of strings. Available protocols are `SSH`, `FTP`, `DAV`, `HTTP`. By default all supported protocols are enabled
|
- `protocols`, list of strings. Available protocols are `SSH`, `FTP`, `DAV`, `HTTP`. By default all supported protocols are enabled
|
||||||
- `allow_list`, list of IP addresses and IP ranges excluded from rate limiting. Default: empty
|
|
||||||
- `generate_defender_events`, boolean. If `true`, the defender is enabled, and this is not a global rate limiter, a new defender event will be generated each time the configured limit is exceeded. Default `false`
|
- `generate_defender_events`, boolean. If `true`, the defender is enabled, and this is not a global rate limiter, a new defender event will be generated each time the configured limit is exceeded. Default `false`
|
||||||
- `entries_soft_limit`, integer.
|
- `entries_soft_limit`, integer.
|
||||||
- `entries_hard_limit`, integer. The number of per-ip rate limiters kept in memory will vary between the soft and hard limit
|
- `entries_hard_limit`, integer. The number of per-ip rate limiters kept in memory will vary between the soft and hard limit
|
||||||
|
@ -166,7 +161,7 @@ The configuration file contains the following sections:
|
||||||
- `certificate_file`, string. Binding specific TLS certificate. This can be an absolute path or a path relative to the config dir.
|
- `certificate_file`, string. Binding specific TLS certificate. This can be an absolute path or a path relative to the config dir.
|
||||||
- `certificate_key_file`, string. Binding specific private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If not set the global ones will be used, if any.
|
- `certificate_key_file`, string. Binding specific private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If not set the global ones will be used, if any.
|
||||||
- `min_tls_version`, integer. Defines the minimum version of TLS to be enabled. `12` means TLS 1.2 (and therefore TLS 1.2 and TLS 1.3 will be enabled),`13` means TLS 1.3. Default: `12`.
|
- `min_tls_version`, integer. Defines the minimum version of TLS to be enabled. `12` means TLS 1.2 (and therefore TLS 1.2 and TLS 1.3 will be enabled),`13` means TLS 1.3. Default: `12`.
|
||||||
- `force_passive_ip`, ip address. External IP address to expose for passive connections. Leave empty to autodetect. If not empty, it must be a valid IPv4 address. Default: "".
|
- `force_passive_ip`, ip address. External IP address for passive connections. Leave empty to autodetect. If not empty, it must be a valid IPv4 address. Default: "".
|
||||||
- `passive_ip_overrides`, list of struct that allows to return a different passive ip based on the client IP address. Each struct has the following fields:
|
- `passive_ip_overrides`, list of struct that allows to return a different passive ip based on the client IP address. Each struct has the following fields:
|
||||||
- `networks`, list of strings. Each string must define a network in CIDR notation, for example 192.168.1.0/24.
|
- `networks`, list of strings. Each string must define a network in CIDR notation, for example 192.168.1.0/24.
|
||||||
- `ip`, string. Passive IP to return if the client IP address belongs to the defined networks. Empty means autodetect.
|
- `ip`, string. Passive IP to return if the client IP address belongs to the defined networks. Empty means autodetect.
|
||||||
|
@ -296,7 +291,7 @@ The configuration file contains the following sections:
|
||||||
</details>
|
</details>
|
||||||
<details><summary><font size=4>HTTP Server</font></summary>
|
<details><summary><font size=4>HTTP Server</font></summary>
|
||||||
|
|
||||||
- **"httpd"**, the configuration for the HTTP server used to serve REST API and to expose the built-in web interface
|
- **"httpd"**, the configuration for the HTTP server used to serve REST API and the built-in web interfaces
|
||||||
- `bindings`, list of structs. Each struct has the following fields:
|
- `bindings`, list of structs. Each struct has the following fields:
|
||||||
- `port`, integer. The port used for serving HTTP requests. Default: 8080.
|
- `port`, integer. The port used for serving HTTP requests. Default: 8080.
|
||||||
- `address`, string. Leave blank to listen on all available network interfaces. On *NIX you can specify an absolute path to listen on a Unix-domain socket Default: blank.
|
- `address`, string. Leave blank to listen on all available network interfaces. On *NIX you can specify an absolute path to listen on a Unix-domain socket Default: blank.
|
||||||
|
@ -441,7 +436,7 @@ The configuration file contains the following sections:
|
||||||
|
|
||||||
- **mfa**, multi-factor authentication settings
|
- **mfa**, multi-factor authentication settings
|
||||||
- `totp`, list of struct that define settings for time-based one time passwords (RFC 6238). Each struct has the following fields:
|
- `totp`, list of struct that define settings for time-based one time passwords (RFC 6238). Each struct has the following fields:
|
||||||
- `name`, string. Unique configuration name. This name should not be changed if there are users or admins using the configuration. The name is not exposed to the authentication apps. Default: `Default`.
|
- `name`, string. Unique configuration name. This name should not be changed if there are users or admins using the configuration. The name is not visible to the authentication apps. Default: `Default`.
|
||||||
- `issuer`, string. Name of the issuing Organization/Company. Default: `SFTPGo`.
|
- `issuer`, string. Name of the issuing Organization/Company. Default: `SFTPGo`.
|
||||||
- `algo`, string. Algorithm to use for HMAC. The supported algorithms are: `sha1`, `sha256`, `sha512`. Currently Google Authenticator app on iPhone seems to only support `sha1`, please check the compatibility with your target apps/device before setting a different algorithm. You can also define multiple configurations, for example one that uses `sha256` or `sha512` and another one that uses `sha1` and instruct your users to use the appropriate configuration for their devices/apps. The algorithm should not be changed if there are users or admins using the configuration. Default: `sha1`.
|
- `algo`, string. Algorithm to use for HMAC. The supported algorithms are: `sha1`, `sha256`, `sha512`. Currently Google Authenticator app on iPhone seems to only support `sha1`, please check the compatibility with your target apps/device before setting a different algorithm. You can also define multiple configurations, for example one that uses `sha256` or `sha512` and another one that uses `sha1` and instruct your users to use the appropriate configuration for their devices/apps. The algorithm should not be changed if there are users or admins using the configuration. Default: `sha1`.
|
||||||
|
|
||||||
|
@ -462,7 +457,7 @@ The configuration file contains the following sections:
|
||||||
</details>
|
</details>
|
||||||
<details><summary><font size=4>Plugins</font></summary>
|
<details><summary><font size=4>Plugins</font></summary>
|
||||||
|
|
||||||
- **plugins**, list of external plugins. :warning: Please note that the plugin system is experimental, the exposed configuration parameters and interfaces may change in a backward incompatible way in future. Each plugin is configured using a struct with the following fields:
|
- **plugins**, list of external plugins. :warning: Please note that the plugin system is experimental, the configuration parameters and interfaces may change in a backward incompatible way in future. Each plugin is configured using a struct with the following fields:
|
||||||
- `type`, string. Defines the plugin type. Supported types: `notifier`, `kms`, `auth`, `metadata`.
|
- `type`, string. Defines the plugin type. Supported types: `notifier`, `kms`, `auth`, `metadata`.
|
||||||
- `notifier_options`, struct. Defines the options for notifier plugins.
|
- `notifier_options`, struct. Defines the options for notifier plugins.
|
||||||
- `fs_events`, list of strings. Defines the filesystem events that will be notified to this plugin.
|
- `fs_events`, list of strings. Defines the filesystem events that will be notified to this plugin.
|
||||||
|
@ -587,7 +582,7 @@ When users log in, if their passwords are stored with anything other than the pr
|
||||||
|
|
||||||
## Telemetry Server
|
## Telemetry Server
|
||||||
|
|
||||||
The telemetry server exposes the following endpoints:
|
The telemetry server publishes the following endpoints:
|
||||||
|
|
||||||
- `/healthz`, health information (for health checks)
|
- `/healthz`, health information (for health checks)
|
||||||
- `/metrics`, Prometheus metrics
|
- `/metrics`, Prometheus metrics
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
SFTPGo allows to securely share your files over SFTP and optionally FTP/S and WebDAV too.
|
SFTPGo allows to securely share your files over SFTP and optionally FTP/S and WebDAV too.
|
||||||
Several storage backends are supported and they are configurable per user, so you can serve a local directory for a user and an S3 bucket (or part of it) for another one.
|
Several storage backends are supported and they are configurable per user, so you can serve a local directory for a user and an S3 bucket (or part of it) for another one.
|
||||||
SFTPGo also supports virtual folders, a virtual folder can use any of the supported storage backends. So you can have, for example, an S3 user that exposes a GCS bucket (or part of it) on a specified path and an encrypted local filesystem on another one.
|
SFTPGo also supports virtual folders, a virtual folder can use any of the supported storage backends. So you can have, for example, a user with the S3 backend mapping a GCS bucket (or part of it) on a specified path and an encrypted local filesystem on another one.
|
||||||
Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.
|
Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.
|
||||||
|
|
||||||
In this tutorial we explore the main features and concepts using the built-in web admin interface. Advanced users can also use the SFTPGo [REST API](https://sftpgo.stoplight.io/docs/sftpgo/openapi.yaml)
|
In this tutorial we explore the main features and concepts using the built-in web admin interface. Advanced users can also use the SFTPGo [REST API](https://sftpgo.stoplight.io/docs/sftpgo/openapi.yaml)
|
||||||
|
@ -11,7 +11,7 @@ In this tutorial we explore the main features and concepts using the built-in we
|
||||||
- [Initial configuration](#initial-configuration)
|
- [Initial configuration](#initial-configuration)
|
||||||
- [Creating users](#creating-users)
|
- [Creating users](#creating-users)
|
||||||
- [Creating users with a Cloud Storage backend](#creating-users-with-a-cloud-storage-backend)
|
- [Creating users with a Cloud Storage backend](#creating-users-with-a-cloud-storage-backend)
|
||||||
- [Creating users with a local encrypted backend (Data At Rest Encryption)](#creating-users-with-a-local-encrypted-backend-data-at-rest-Encryption)
|
- [Creating users with a local encrypted backend (Data At Rest Encryption)](#creating-users-with-a-local-encrypted-backend-data-at-rest-encryption)
|
||||||
- [Virtual permissions](#virtual-permissions)
|
- [Virtual permissions](#virtual-permissions)
|
||||||
- [Virtual folders](#virtual-folders)
|
- [Virtual folders](#virtual-folders)
|
||||||
- [Groups](#groups)
|
- [Groups](#groups)
|
||||||
|
@ -202,11 +202,11 @@ Suppose we created two virtual folders name `localfolder` and `minio` as you can
|
||||||
|
|
||||||
Now, click `Users`, on the left menu, select a user and click the `Edit` icon, to update the user and associate the virtual folders.
|
Now, click `Users`, on the left menu, select a user and click the `Edit` icon, to update the user and associate the virtual folders.
|
||||||
|
|
||||||
Virtual folders must be referenced using their unique name and you can expose them on a configurable virtual path. Take a look at the following screenshot.
|
Virtual folders must be referenced using their unique name and you can map them on a configurable virtual path. Take a look at the following screenshot.
|
||||||
|
|
||||||
![Virtual Folders](./img/virtual-folders.png)
|
![Virtual Folders](./img/virtual-folders.png)
|
||||||
|
|
||||||
We exposed the folder named `localfolder` on the path `/vdirlocal` (this must be an absolute UNIX path on Windows too) and the folder named `minio` on the path `/vdirminio`. For `localfolder` the quota usage is included within the user quota, while for the `minio` folder we defined separate quota limits: at most 2 files and at most 100MB, whichever is reached first.
|
We mapped the folder named `localfolder` on the path `/vdirlocal` (this must be an absolute UNIX path on Windows too) and the folder named `minio` on the path `/vdirminio`. For `localfolder` the quota usage is included within the user quota, while for the `minio` folder we defined separate quota limits: at most 2 files and at most 100MB, whichever is reached first.
|
||||||
|
|
||||||
The folder `minio` can be shared with other users and we can define different quota limits on a per-user basis. The folder `localfolder` is considered private since we have included its quota limits within those of the user, if we share them with other users we will break quota calculation.
|
The folder `minio` can be shared with other users and we can define different quota limits on a per-user basis. The folder `localfolder` is considered private since we have included its quota limits within those of the user, if we share them with other users we will break quota calculation.
|
||||||
|
|
||||||
|
@ -621,7 +621,7 @@ Restart SFTPGo to apply the changes. The FTP service is now available on port `2
|
||||||
|
|
||||||
You can also configure the passive ports range (`50000-50100` by default), these ports must be reachable for passive FTP to work. If your FTP server is on the private network side of a NAT configuration you have to set `force_passive_ip` to your external IP address. You may also need to open the passive port range on your firewall.
|
You can also configure the passive ports range (`50000-50100` by default), these ports must be reachable for passive FTP to work. If your FTP server is on the private network side of a NAT configuration you have to set `force_passive_ip` to your external IP address. You may also need to open the passive port range on your firewall.
|
||||||
|
|
||||||
It is recommended that you provide a certificate and key file to expose FTP over TLS. You should prefer SFTP to FTP even if you configure TLS, please don't blindly enable the old FTP protocol.
|
It is recommended that you provide a certificate and key file to allow FTP over TLS. You should prefer SFTP to FTP even if you configure TLS, please don't blindly enable the old FTP protocol.
|
||||||
|
|
||||||
### Enable WebDAV service
|
### Enable WebDAV service
|
||||||
|
|
||||||
|
@ -656,4 +656,4 @@ Alternatively (recommended), you can use environment variables by creating the f
|
||||||
SFTPGO_WEBDAVD__BINDINGS__0__PORT=10080
|
SFTPGO_WEBDAVD__BINDINGS__0__PORT=10080
|
||||||
```
|
```
|
||||||
|
|
||||||
Restart SFTPGo to apply the changes. The WebDAV service is now available on port `10080`. It is recommended that you provide a certificate and key file to expose WebDAV over https.
|
Restart SFTPGo to apply the changes. The WebDAV service is now available on port `10080`. It is recommended that you provide a certificate and key file to allow WebDAV over https.
|
||||||
|
|
|
@ -29,9 +29,9 @@ Two-factor authentication is enabled by default with the following settings.
|
||||||
},
|
},
|
||||||
```
|
```
|
||||||
|
|
||||||
The `issuer` and `algo` are exposed to the authenticators apps. For example, you could set your company/organization name as `issuer` and an `algo` appropriate for your target apps/devices. The supported algorithms are: `sha1`, `sha256`, `sha512`. Currently Google Authenticator app on iPhone seems to only support `sha1`, please check the compatibility with your target apps/device before setting a different algorithm.
|
The `issuer` and `algo` are visible/used in the authenticators apps. For example, you could set your company/organization name as `issuer` and an `algo` appropriate for your target apps/devices. The supported algorithms are: `sha1`, `sha256`, `sha512`. Currently Google Authenticator app on iPhone seems to only support `sha1`, please check the compatibility with your target apps/device before setting a different algorithm.
|
||||||
|
|
||||||
You can also define multiple configurations, for example one that uses `sha256` or `sha512` and another one that uses `sha1` and instruct your users to use the appropriate configuration for their devices/apps. The algorithm should not be changed if there are users or admins using the configuration. The `name` is exposed to the users/admins when they select the 2FA configuration to use and it must be unique. A configuration name should not be changed if there are users or admins using it.
|
You can also define multiple configurations, for example one that uses `sha256` or `sha512` and another one that uses `sha1` and instruct your users to use the appropriate configuration for their devices/apps. The algorithm should not be changed if there are users or admins using the configuration. The `name` is visible to the users/admins when they select the 2FA configuration to use and it must be unique. A configuration name should not be changed if there are users or admins using it.
|
||||||
|
|
||||||
SFTPGo can use 2FA for `HTTP`, `SSH` (SFTP, SCP) and `FTP` protocols. If you plan to use 2FA with `SSH` you have to enable the keyboard interactive authentication which is disabled by default.
|
SFTPGo can use 2FA for `HTTP`, `SSH` (SFTP, SCP) and `FTP` protocols. If you plan to use 2FA with `SSH` you have to enable the keyboard interactive authentication which is disabled by default.
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Metrics
|
# Metrics
|
||||||
|
|
||||||
SFTPGo exposes [Prometheus](https://prometheus.io/) metrics at the `/metrics` HTTP endpoint of the telemetry server.
|
SFTPGo supports [Prometheus](https://prometheus.io/) metrics at the `/metrics` HTTP endpoint of the telemetry server.
|
||||||
Several counters and gauges are available, for example:
|
Several counters and gauges are available, for example:
|
||||||
|
|
||||||
- Total uploads and downloads
|
- Total uploads and downloads
|
||||||
|
|
|
@ -18,7 +18,7 @@ The following plugin types are supported:
|
||||||
|
|
||||||
Full configuration details can be found [here](./full-configuration.md).
|
Full configuration details can be found [here](./full-configuration.md).
|
||||||
|
|
||||||
:warning: Please note that the plugin system is experimental, the exposed configuration parameters and interfaces may change in a backward incompatible way in future.
|
:warning: Please note that the plugin system is experimental, the configuration parameters and interfaces may change in a backward incompatible way in future.
|
||||||
|
|
||||||
## Available plugins
|
## Available plugins
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Post-connect hook
|
# Post-connect hook
|
||||||
|
|
||||||
This hook is executed as soon as a new connection is established. It notifies the connection's IP address and protocol. Based on the received response, the connection is accepted or rejected. Combining this hook with the [Post-login hook](./post-login-hook.md) you can implement your own (even for Protocol) blacklist/whitelist of IP addresses.
|
This hook is executed as soon as a new connection is established. It notifies the connection's IP address and protocol. Based on the received response, the connection is accepted or rejected. Combining this hook with the [Post-login hook](./post-login-hook.md) you can implement your own (even for Protocol) blocklist/allowlist of IP addresses.
|
||||||
|
|
||||||
The `post_connect_hook` can be defined as the absolute path of your program or an HTTP URL.
|
The `post_connect_hook` can be defined as the absolute path of your program or an HTTP URL.
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ The program must finish within 20 seconds.
|
||||||
If the hook defines an HTTP URL then this URL will be invoked as HTTP GET with the following query parameters:
|
If the hook defines an HTTP URL then this URL will be invoked as HTTP GET with the following query parameters:
|
||||||
|
|
||||||
- `ip`
|
- `ip`
|
||||||
- `protocol`, possible values are `SSH`, `FTP`, `DAV`, `HTTP`, `OIDC` (OpenID Connect)
|
- `protocol`, possible values are `SSH`, `FTP`, `DAV`, `HTTP`, `HTTPShare`, `OIDC` (OpenID Connect)
|
||||||
|
|
||||||
The connection is accepted if the HTTP response code is `200` otherwise rejected.
|
The connection is accepted if the HTTP response code is `200` otherwise rejected.
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
The built-in profiler lets you collect CPU profiles, traces, allocations and heap profiles that allow to identify and correct specific bottlenecks.
|
The built-in profiler lets you collect CPU profiles, traces, allocations and heap profiles that allow to identify and correct specific bottlenecks.
|
||||||
You can enable the built-in profiler using `telemetry` configuration section inside the configuration file.
|
You can enable the built-in profiler using `telemetry` configuration section inside the configuration file.
|
||||||
|
|
||||||
Profiling data are exposed via HTTP/HTTPS in the format expected by the [pprof](https://github.com/google/pprof/blob/main/doc/README.md) visualization tool. You can find the index page at the URL `/debug/pprof/`.
|
Profiling data are available via HTTP/HTTPS in the format expected by the [pprof](https://github.com/google/pprof/blob/main/doc/README.md) visualization tool. You can find the index page at the URL `/debug/pprof/`.
|
||||||
|
|
||||||
The following profiles are available, you can obtain them via HTTP GET requests:
|
The following profiles are available, you can obtain them via HTTP GET requests:
|
||||||
|
|
||||||
|
|
|
@ -22,19 +22,7 @@ You can also define two types of rate limiters:
|
||||||
|
|
||||||
If you configure a per-host rate limiter, SFTPGo will keep a rate limiter in memory for each host that connects to the service, you can limit the memory usage using the `entries_soft_limit` and `entries_hard_limit` configuration keys.
|
If you configure a per-host rate limiter, SFTPGo will keep a rate limiter in memory for each host that connects to the service, you can limit the memory usage using the `entries_soft_limit` and `entries_hard_limit` configuration keys.
|
||||||
|
|
||||||
For each rate limiter you can exclude a list of IP addresses and IP ranges by defining an `allow_list`.
|
You can exclude a list of IP addresses and IP ranges from rate limiters by adding them to rate limites allow list using the WebAdmin UI or the REST API. In multi-nodes setups, the list entries propagation between nodes may take some minutes.
|
||||||
The allow list supports IPv4/IPv6 address and CIDR networks, for example:
|
|
||||||
|
|
||||||
```json
|
|
||||||
...
|
|
||||||
"allow_list": [
|
|
||||||
"192.0.2.1",
|
|
||||||
"192.168.1.0/24",
|
|
||||||
"2001:db8::68",
|
|
||||||
"2001:db8:1234::/48"
|
|
||||||
],
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
You can defines how many rate limiters as you want, but keep in mind that if you defines multiple rate limiters each request will be checked against all the configured limiters and so it can potentially be delayed multiple times. Let's clarify with an example, here is a configuration that defines a global rate limiter and a per-host rate limiter for the FTP protocol:
|
You can defines how many rate limiters as you want, but keep in mind that if you defines multiple rate limiters each request will be checked against all the configured limiters and so it can potentially be delayed multiple times. Let's clarify with an example, here is a configuration that defines a global rate limiter and a per-host rate limiter for the FTP protocol:
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
# REST API
|
# REST API
|
||||||
|
|
||||||
SFTPGo exposes REST API to manage, backup, and restore users and folders, data retention, and to get real time reports of the active connections with the ability to forcibly close a connection.
|
SFTPGo supports REST API to manage, backup, and restore users and folders, data retention, and to get real time reports of the active connections with the ability to forcibly close a connection.
|
||||||
|
|
||||||
If quota tracking is enabled in the configuration file, then the used size and number of files are updated each time a file is added/removed. If files are added/removed not using SFTP/SCP, or if you change `track_quota` from `2` to `1`, you can rescan the users home dir and update the used quota using the REST API.
|
If quota tracking is enabled in the configuration file, then the used size and number of files are updated each time a file is added/removed. If files are added/removed not using SFTP/SCP, or if you change `track_quota` from `2` to `1`, you can rescan the users home dir and update the used quota using the REST API.
|
||||||
|
|
||||||
REST API are protected using JSON Web Tokens (JWT) authentication and can be exposed over HTTPS. You can also configure client certificate authentication in addition to JWT.
|
REST API are protected using JSON Web Tokens (JWT) authentication and can be served over HTTPS. You can also configure client certificate authentication in addition to JWT.
|
||||||
|
|
||||||
You can get a JWT token using the `/api/v2/token` endpoint, you need to authenticate using HTTP Basic authentication and the credentials of an active administrator. Here is a sample response:
|
You can get a JWT token using the `/api/v2/token` endpoint, you need to authenticate using HTTP Basic authentication and the credentials of an active administrator. Here is a sample response:
|
||||||
|
|
||||||
|
@ -99,7 +99,7 @@ You can find an example script that shows how to manage data retention [here](..
|
||||||
|
|
||||||
:warning: Deleting files is an irreversible action, please make sure you fully understand what you are doing before using this feature, you may have users with overlapping home directories or virtual folders shared between multiple users, it is relatively easy to inadvertently delete files you need.
|
:warning: Deleting files is an irreversible action, please make sure you fully understand what you are doing before using this feature, you may have users with overlapping home directories or virtual folders shared between multiple users, it is relatively easy to inadvertently delete files you need.
|
||||||
|
|
||||||
The OpenAPI 3 schema for the exposed API can be found inside the source tree: [openapi.yaml](../openapi/openapi.yaml "OpenAPI 3 specs"). You can render the schema and try the API using the `/openapi` endpoint. SFTPGo uses by default [Swagger UI](https://github.com/swagger-api/swagger-ui), you can use another renderer just by copying it to the defined OpenAPI path.
|
The OpenAPI 3 schema for the supported APIs can be found inside the source tree: [openapi.yaml](../openapi/openapi.yaml "OpenAPI 3 specs"). You can render the schema and try the API using the `/openapi` endpoint. SFTPGo uses by default [Swagger UI](https://github.com/swagger-api/swagger-ui), you can use another renderer just by copying it to the defined OpenAPI path.
|
||||||
|
|
||||||
You can also explore the schema on [Stoplight](https://sftpgo.stoplight.io/docs/sftpgo/openapi.yaml).
|
You can also explore the schema on [Stoplight](https://sftpgo.stoplight.io/docs/sftpgo/openapi.yaml).
|
||||||
|
|
||||||
|
|
|
@ -8,9 +8,9 @@ SFTPGo will try to automatically create any missing parent directory for the con
|
||||||
|
|
||||||
For each virtual folder, the following properties can be configured:
|
For each virtual folder, the following properties can be configured:
|
||||||
|
|
||||||
- `folder_name`, is the ID for an existing folder. The folder structure contains the absolute filesystem path to expose as virtual folder
|
- `folder_name`, is the ID for an existing folder. The folder structure contains the absolute filesystem path to map as virtual folder
|
||||||
- `filesystem`, this way you can map a local path or a Cloud backend to mount as virtual folders
|
- `filesystem`, this way you can map a local path or a Cloud backend to mount as virtual folders
|
||||||
- `virtual_path`, the SFTPGo absolute path to use to expose the mapped path
|
- `virtual_path`, absolute path seen by SFTPGo users where the mapped path is accessible
|
||||||
- `quota_size`, maximum size allowed as bytes. 0 means unlimited, -1 included in user quota
|
- `quota_size`, maximum size allowed as bytes. 0 means unlimited, -1 included in user quota
|
||||||
- `quota_files`, maximum number of files allowed. 0 means unlimited, -1 included in user quota
|
- `quota_files`, maximum number of files allowed. 0 means unlimited, -1 included in user quota
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
# Web Admin
|
# Web Admin
|
||||||
|
|
||||||
You can easily build your own interface using the exposed [REST API](./rest-api.md). Anyway, SFTPGo also provides a basic built-in web interface that allows you to manage users, virtual folders, admins and connections.
|
You can easily build your own interface using the SFTPGo [REST API](./rest-api.md). Anyway, SFTPGo also provides a basic built-in web interface that allows you to manage users, virtual folders, admins and connections.
|
||||||
With the default `httpd` configuration, the web admin is available at the following URL:
|
With the default `httpd` configuration, the web admin is available at the following URL:
|
||||||
|
|
||||||
[http://127.0.0.1:8080/web/admin](http://127.0.0.1:8080/web/admin)
|
[http://127.0.0.1:8080/web/admin](http://127.0.0.1:8080/web/admin)
|
||||||
|
|
||||||
If no admin user is found within the data provider, typically after the initial installation, SFTPGo will ask you to create the first admin. You can also pre-create an admin user by loading initial data or by enabling the `create_default_admin` configuration key. Please take a look [here](./full-configuration.md) for more details.
|
If no admin user is found within the data provider, typically after the initial installation, SFTPGo will ask you to create the first admin. You can also pre-create an admin user by loading initial data or by enabling the `create_default_admin` configuration key. Please take a look [here](./full-configuration.md) for more details.
|
||||||
|
|
||||||
The web interface can be exposed via HTTPS and may require mutual TLS authentication in addition to administrator credentials.
|
The web interface can be configured over HTTPS and to require mutual TLS authentication in addition to administrator credentials.
|
||||||
|
|
62
go.mod
62
go.mod
|
@ -1,22 +1,22 @@
|
||||||
module github.com/drakkan/sftpgo/v2
|
module github.com/drakkan/sftpgo/v2
|
||||||
|
|
||||||
go 1.19
|
go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go/storage v1.29.0
|
cloud.google.com/go/storage v1.29.0
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1
|
||||||
github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962
|
github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962
|
||||||
github.com/alexedwards/argon2id v0.0.0-20211130144151-3585854a6387
|
github.com/alexedwards/argon2id v0.0.0-20211130144151-3585854a6387
|
||||||
github.com/aws/aws-sdk-go-v2 v1.17.3
|
github.com/aws/aws-sdk-go-v2 v1.17.4
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.18.10
|
github.com/aws/aws-sdk-go-v2/config v1.18.12
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.10
|
github.com/aws/aws-sdk-go-v2/credentials v1.13.12
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.22
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.49
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.51
|
||||||
github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.14.1
|
github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.14.2
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.1
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.2
|
||||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.18.2
|
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.18.3
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.2
|
github.com/aws/aws-sdk-go-v2/service/sts v1.18.3
|
||||||
github.com/bmatcuk/doublestar/v4 v4.6.0
|
github.com/bmatcuk/doublestar/v4 v4.6.0
|
||||||
github.com/cockroachdb/cockroach-go/v2 v2.2.20
|
github.com/cockroachdb/cockroach-go/v2 v2.2.20
|
||||||
github.com/coreos/go-oidc/v3 v3.5.0
|
github.com/coreos/go-oidc/v3 v3.5.0
|
||||||
|
@ -53,28 +53,28 @@ require (
|
||||||
github.com/rs/xid v1.4.0
|
github.com/rs/xid v1.4.0
|
||||||
github.com/rs/zerolog v1.29.0
|
github.com/rs/zerolog v1.29.0
|
||||||
github.com/sftpgo/sdk v0.1.3-0.20221217110036-383c1bb50fa0
|
github.com/sftpgo/sdk v0.1.3-0.20221217110036-383c1bb50fa0
|
||||||
github.com/shirou/gopsutil/v3 v3.22.12
|
github.com/shirou/gopsutil/v3 v3.23.1
|
||||||
github.com/spf13/afero v1.9.3
|
github.com/spf13/afero v1.9.3
|
||||||
github.com/spf13/cobra v1.6.1
|
github.com/spf13/cobra v1.6.1
|
||||||
github.com/spf13/viper v1.15.0
|
github.com/spf13/viper v1.15.0
|
||||||
github.com/stretchr/testify v1.8.1
|
github.com/stretchr/testify v1.8.1
|
||||||
github.com/studio-b12/gowebdav v0.0.0-20221109171924-60ec5ad56012
|
github.com/studio-b12/gowebdav v0.0.0-20230203202212-3282f94193f2
|
||||||
github.com/subosito/gotenv v1.4.2
|
github.com/subosito/gotenv v1.4.2
|
||||||
github.com/unrolled/secure v1.13.0
|
github.com/unrolled/secure v1.13.0
|
||||||
github.com/wagslane/go-password-validator v0.3.0
|
github.com/wagslane/go-password-validator v0.3.0
|
||||||
github.com/wneessen/go-mail v0.3.8
|
github.com/wneessen/go-mail v0.3.8
|
||||||
github.com/yl2chen/cidranger v1.0.3-0.20210928021809-d1cb2c52f37a
|
github.com/yl2chen/cidranger v1.0.3-0.20210928021809-d1cb2c52f37a
|
||||||
go.etcd.io/bbolt v1.3.6
|
go.etcd.io/bbolt v1.3.7
|
||||||
go.uber.org/automaxprocs v1.5.1
|
go.uber.org/automaxprocs v1.5.1
|
||||||
gocloud.dev v0.28.0
|
gocloud.dev v0.28.0
|
||||||
golang.org/x/crypto v0.5.0
|
golang.org/x/crypto v0.5.0
|
||||||
golang.org/x/net v0.5.0
|
golang.org/x/net v0.5.0
|
||||||
golang.org/x/oauth2 v0.4.0
|
golang.org/x/oauth2 v0.4.0
|
||||||
golang.org/x/sys v0.4.0
|
golang.org/x/sys v0.5.0
|
||||||
golang.org/x/term v0.4.0
|
golang.org/x/term v0.5.0
|
||||||
golang.org/x/time v0.3.0
|
golang.org/x/time v0.3.0
|
||||||
google.golang.org/api v0.108.0
|
google.golang.org/api v0.109.0
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
@ -85,16 +85,16 @@ require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect
|
||||||
github.com/ajg/form v1.5.1 // indirect
|
github.com/ajg/form v1.5.1 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.29 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.18 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.19 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.22 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.23 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.22 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.21 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.22 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sso v1.12.1 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 // indirect
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.1 // indirect
|
||||||
github.com/aws/smithy-go v1.13.5 // indirect
|
github.com/aws/smithy-go v1.13.5 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/boombuler/barcode v1.0.1 // indirect
|
github.com/boombuler/barcode v1.0.1 // indirect
|
||||||
|
@ -152,13 +152,13 @@ require (
|
||||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
github.com/tklauser/numcpus v0.6.0 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||||
go.opencensus.io v0.24.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
golang.org/x/mod v0.7.0 // indirect
|
golang.org/x/mod v0.8.0 // indirect
|
||||||
golang.org/x/text v0.6.0 // indirect
|
golang.org/x/text v0.7.0 // indirect
|
||||||
golang.org/x/tools v0.5.0 // indirect
|
golang.org/x/tools v0.5.0 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa // indirect
|
google.golang.org/genproto v0.0.0-20230202175211-008b39050e57 // indirect
|
||||||
google.golang.org/grpc v1.52.3 // indirect
|
google.golang.org/grpc v1.53.0 // indirect
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
|
116
go.sum
116
go.sum
|
@ -405,8 +405,8 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+Q
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.1/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.1/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0 h1:VuHAcMq8pU1IWNT/m5yRaGqbK0BiQKHT8X4DTp9CHdI=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0/go.mod h1:tZoQYdDZNOiIjdSn0dVWVfl0NEPGOJqVLzSrcFk4Is0=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0/go.mod h1:+6sju8gk8FRmSajX3Oz4G5Gm7P+mbqE9FVaXXFYTkCM=
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0/go.mod h1:+6sju8gk8FRmSajX3Oz4G5Gm7P+mbqE9FVaXXFYTkCM=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0=
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 h1:t/W5MYAuQy81cvM8VUNfRLzhtKpXhVUAN7Cd7KVbTyc=
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 h1:t/W5MYAuQy81cvM8VUNfRLzhtKpXhVUAN7Cd7KVbTyc=
|
||||||
|
@ -453,7 +453,6 @@ github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9s
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM=
|
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM=
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
|
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
|
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||||
github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962 h1:KeNholpO2xKjgaaSyd+DyQRrsQjhbSeS7qe4nEw8aQw=
|
github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962 h1:KeNholpO2xKjgaaSyd+DyQRrsQjhbSeS7qe4nEw8aQw=
|
||||||
|
@ -533,68 +532,68 @@ github.com/aws/aws-sdk-go v1.44.128/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX
|
||||||
github.com/aws/aws-sdk-go v1.44.151/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
github.com/aws/aws-sdk-go v1.44.151/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.17.1/go.mod h1:JLnGeGONAyi2lWXI1p0PCIOIy333JMVK1U7Hf0aRFLw=
|
github.com/aws/aws-sdk-go-v2 v1.17.1/go.mod h1:JLnGeGONAyi2lWXI1p0PCIOIy333JMVK1U7Hf0aRFLw=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY=
|
github.com/aws/aws-sdk-go-v2 v1.17.4 h1:wyC6p9Yfq6V2y98wfDsj6OnNQa4w2BLGCLIxzNhwOGY=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
github.com/aws/aws-sdk-go-v2 v1.17.4/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.9/go.mod h1:vCmV1q1VK8eoQJ5+aYE7PkK1K6v41qJ5pJdK3ggCDvg=
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.9/go.mod h1:vCmV1q1VK8eoQJ5+aYE7PkK1K6v41qJ5pJdK3ggCDvg=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.18.3/go.mod h1:BYdrbeCse3ZnOD5+2/VE/nATOK8fEUpBtmPMdKSyhMU=
|
github.com/aws/aws-sdk-go-v2/config v1.18.3/go.mod h1:BYdrbeCse3ZnOD5+2/VE/nATOK8fEUpBtmPMdKSyhMU=
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.18.10 h1:Znce11DWswdh+5kOsIp+QaNfY9igp1QUN+fZHCKmeCI=
|
github.com/aws/aws-sdk-go-v2/config v1.18.12 h1:fKs/I4wccmfrNRO9rdrbMO1NgLxct6H9rNMiPdBxHWw=
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.18.10/go.mod h1:VATKco+pl+Qe1WW+RzvZTlPPe/09Gg9+vM0ZXsqb16k=
|
github.com/aws/aws-sdk-go-v2/config v1.18.12/go.mod h1:J36fOhj1LQBr+O4hJCiT8FwVvieeoSGOtPuvhKlsNu8=
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.3/go.mod h1:/rOMmqYBcFfNbRPU0iN9IgGqD5+V2yp3iWNmIlz0wI4=
|
github.com/aws/aws-sdk-go-v2/credentials v1.13.3/go.mod h1:/rOMmqYBcFfNbRPU0iN9IgGqD5+V2yp3iWNmIlz0wI4=
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.10 h1:T4Y39IhelTLg1f3xiKJssThnFxsndS8B6OnmcXtKK+8=
|
github.com/aws/aws-sdk-go-v2/credentials v1.13.12 h1:Cb+HhuEnV19zHRaYYVglwvdHGMJWbdsyP4oHhw04xws=
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.10/go.mod h1:tqAm4JmQaShel+Qi38hmd1QglSnnxaYt50k/9yGQzzc=
|
github.com/aws/aws-sdk-go-v2/credentials v1.13.12/go.mod h1:37HG2MBroXK3jXfxVGtbM2J48ra2+Ltu+tmwr/jO0KA=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19/go.mod h1:VihW95zQpeKQWVPGkwT+2+WJNQV8UXFfMTWdU6VErL8=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19/go.mod h1:VihW95zQpeKQWVPGkwT+2+WJNQV8UXFfMTWdU6VErL8=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 h1:j9wi1kQ8b+e0FBVHxCqCGo4kxDU175hoDHcWAi0sauU=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.22 h1:3aMfcTmoXtTZnaT86QlVaYh+BRMbvrrmZwIQ5jWqCZQ=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21/go.mod h1:ugwW57Z5Z48bpvUyZuaPy4Kv+vEfJWnIrky7RmkBvJg=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.22/go.mod h1:YGSIJyQ6D6FjKMQh16hVFSIUD54L4F7zTGePqYMYYJU=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.42/go.mod h1:LHOsygMiW/14CkFxdXxvzKyMh3jbk/QfZVaDtCbLkl8=
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.42/go.mod h1:LHOsygMiW/14CkFxdXxvzKyMh3jbk/QfZVaDtCbLkl8=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.49 h1:zPFhadkmXbXu3RVXTPU4HVW+g2DStMY+01cJaj//+Cw=
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.51 h1:iTFYCAdKzSAjGnVIUe88Hxvix0uaBqr0Rv7qJEOX5hE=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.49/go.mod h1:N9gSChQkKpdAj7vRpfKma4ND88zoZM+v6W2lJgWrDh4=
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.51/go.mod h1:7Grl2gV+dx9SWrUIgwwlUvU40t7+lOSbx34XwfmsTkY=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25/go.mod h1:Zb29PYkf42vVYQY6pvSyJCJcFHlPIiY+YKdPtwnvMkY=
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25/go.mod h1:Zb29PYkf42vVYQY6pvSyJCJcFHlPIiY+YKdPtwnvMkY=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 h1:I3cakv2Uy1vNmmhRQmFptYDxOvBnwCdNwyw63N0RaRU=
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28 h1:r+XwaCLpIvCKjBIYy/HVZujQS9tsz5ohHG3ZIe0wKoE=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI=
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28/go.mod h1:3lwChorpIM/BhImY/hy+Z6jekmN92cXGPI1QJasVPYY=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19/go.mod h1:6Q0546uHDp421okhmmGfbxzq2hBqbXFNpi4k+Q1JnQA=
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19/go.mod h1:6Q0546uHDp421okhmmGfbxzq2hBqbXFNpi4k+Q1JnQA=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 h1:5NbbMrIzmUn/TXFqAle6mgrH5m9cOvMLRGL7pnG8tRE=
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22 h1:7AwGYXDdqRQYsluvKFmWoqpcOQJ4bH634SkYf3FNj/A=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE=
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22/go.mod h1:EqK7gVrIGAHyZItrD1D8B0ilgwMD1GiWAmbU4u/JHNk=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26/go.mod h1:Y2OJ+P+MC1u1VKnavT+PshiEuGPyh/7DqxoDNij4/bg=
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26/go.mod h1:Y2OJ+P+MC1u1VKnavT+PshiEuGPyh/7DqxoDNij4/bg=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 h1:KeTxcGdNnQudb46oOl4d90f2I33DF/c6q3RnZAmvQdQ=
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.29 h1:J4xhFd6zHhdF9jPP0FQJ6WknzBboGMBNjKOv4iTuw4A=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28/go.mod h1:yRZVr/iT0AqyHeep00SZ4YfBAKojXz08w3XMBscdi0c=
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.29/go.mod h1:TwuqRBGzxjQJIwH16/fOZodwXt2Zxa9/cwJC5ke4j7s=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.16/go.mod h1:XH+3h395e3WVdd6T2Z3mPxuI+x/HVtdqVOREkTiyubs=
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.16/go.mod h1:XH+3h395e3WVdd6T2Z3mPxuI+x/HVtdqVOREkTiyubs=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.18 h1:H/mF2LNWwX00lD6FlYfKpLLZgUW7oIzCBkig78x4Xok=
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.19 h1:FGvpyTg2LKEmMrLlpjOgkoNp9XF5CGeyAyo33LdqZW8=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.18/go.mod h1:T2Ku+STrYQ1zIkL1wMvj8P3wWQaaCMKNdz70MT2FLfE=
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.19/go.mod h1:8W88sW3PjamQpKFUQvHWWKay6ARsNvZnzU7+a4apubw=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.10/go.mod h1:9cBNUHI2aW4ho0A5T87O294iPDuuUOSIEDjnd1Lq/z0=
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.10/go.mod h1:9cBNUHI2aW4ho0A5T87O294iPDuuUOSIEDjnd1Lq/z0=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA=
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8=
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.20/go.mod h1:Mp4XI/CkWGD79AQxZ5lIFlgvC0A+gl+4BmyG1F+SfNc=
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.20/go.mod h1:Mp4XI/CkWGD79AQxZ5lIFlgvC0A+gl+4BmyG1F+SfNc=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.22 h1:kv5vRAl00tozRxSnI0IszPWGXsJOyA7hmEUHFYqsyvw=
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.23 h1:c5+bNdV8E4fIPteWx4HZSkqI07oY9exbfQ7JH7Yx4PI=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.22/go.mod h1:Od+GU5+Yx41gryN/ZGZzAJMZ9R1yn6lgA0fD5Lo5SkQ=
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.23/go.mod h1:1jcUfF+FAOEwtIcNiHPaV4TSoZqkUIPzrohmD7fb95c=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19/go.mod h1:02CP6iuYP+IVnBX5HULVdSAku/85eHB2Y9EsFhrkEwU=
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19/go.mod h1:02CP6iuYP+IVnBX5HULVdSAku/85eHB2Y9EsFhrkEwU=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 h1:5C6XgTViSb0bunmU57b3CT+MhxULqHH2721FVA+/kDM=
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.22 h1:LjFQf8hFuMO22HkV5VWGLBvmCLBCLPivUAmpdpnp4Vs=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21/go.mod h1:lRToEJsn+DRA9lW4O9L9+/3hjTkUzlzyzHqn8MTds5k=
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.22/go.mod h1:xt0Au8yPIwYXf/GYPy/vl4K3CgwhfQMYbrH7DlUUIws=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.19/go.mod h1:BmQWRVkLTmyNzYPFAZgon53qKLWBNSvonugD1MrSWUs=
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.19/go.mod h1:BmQWRVkLTmyNzYPFAZgon53qKLWBNSvonugD1MrSWUs=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.21 h1:vY5siRXvW5TrOKm2qKEf9tliBfdLxdfy0i02LOcmqUo=
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.22 h1:ISLJ2BKXe4zzyZ7mp5ewKECiw0U7KpLgS3S6OxY9Cm0=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.21/go.mod h1:WZvNXT1XuH8dnJM0HvOlvk+RNn7NbAPvA/ACO0QarSc=
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.22/go.mod h1:QFVbqK54XArazLvn2wvWMRBi/jGrWii46qbr5DyPGjc=
|
||||||
github.com/aws/aws-sdk-go-v2/service/kms v1.19.0/go.mod h1:kZodDPTQjSH/qM6/OvyTfM5mms5JHB/EKYp5dhn/vI4=
|
github.com/aws/aws-sdk-go-v2/service/kms v1.19.0/go.mod h1:kZodDPTQjSH/qM6/OvyTfM5mms5JHB/EKYp5dhn/vI4=
|
||||||
github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.14.1 h1:IOjpqwEHMYPVfiqnH/auHvhz69/SGHYo/tFBkax5O0o=
|
github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.14.2 h1:7vuSkPqVqwBwSV0OJD71qqWOEFr3Hh1K0e2yOQ/JWwQ=
|
||||||
github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.14.1/go.mod h1:DSuypbY6jb7WZSxrLuCgd7ouB5uRQ+Hg5wbt0GmgRcc=
|
github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.14.2/go.mod h1:vrZVsmrC7QRNBK/W8nplI0tfJDvMl6DZAUT/pkFJiws=
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.29.4/go.mod h1:/NHbqPRiwxSPVOB2Xr+StDEH+GWV/64WwnUjv4KYzV0=
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.29.4/go.mod h1:/NHbqPRiwxSPVOB2Xr+StDEH+GWV/64WwnUjv4KYzV0=
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.1 h1:kIgvVY7PHx4gIb0na/Q9gTWJWauTwhKdaqJjX8PkIY8=
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.2 h1:5EQWIFO+Hc8E2hFcXQJ1vm6ufl/PMt/6RVRDZRju2vM=
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.1/go.mod h1:L2l2/q76teehcW7YEsgsDjqdsDTERJeX3nOMIFlgGUE=
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.2/go.mod h1:SXDHd6fI2RhqB7vmAzyYQCTQnpZrIprVJvYxpzW3JAM=
|
||||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.16.8/go.mod h1:k6CPuxyzO247nYEM1baEwHH1kRtosRCvgahAepaaShw=
|
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.16.8/go.mod h1:k6CPuxyzO247nYEM1baEwHH1kRtosRCvgahAepaaShw=
|
||||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.18.2 h1:QDVKb2VpuwzIslzshumxksayV5GkpqT+rkVvdPVrA9E=
|
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.18.3 h1:Zod/h9QcDvbrrG3jjTUp4lctRb6Qg2nj7ARC/xMsUc4=
|
||||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.18.2/go.mod h1:jAeo/PdIJZuDSwsvxJS94G4d6h8tStj7WXVuKwLHWU8=
|
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.18.3/go.mod h1:hqPcyOuLU6yWIbLy3qMnQnmidgKuIEwqIlW6+chYnog=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sns v1.18.6/go.mod h1:2cPUjR63iE9MPMPJtSyzYmsTFCNrN/Xi9j0v9BL5OU0=
|
github.com/aws/aws-sdk-go-v2/service/sns v1.18.6/go.mod h1:2cPUjR63iE9MPMPJtSyzYmsTFCNrN/Xi9j0v9BL5OU0=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.19.15/go.mod h1:DKX/7/ZiAzHO6p6AhArnGdrV4r+d461weby8KeVtvC4=
|
github.com/aws/aws-sdk-go-v2/service/sqs v1.19.15/go.mod h1:DKX/7/ZiAzHO6p6AhArnGdrV4r+d461weby8KeVtvC4=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.33.1/go.mod h1:rEsqsZrOp9YvSGPOrcL3pR9+i/QJaWRkAYbuxMa7yCU=
|
github.com/aws/aws-sdk-go-v2/service/ssm v1.33.1/go.mod h1:rEsqsZrOp9YvSGPOrcL3pR9+i/QJaWRkAYbuxMa7yCU=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.25/go.mod h1:IARHuzTXmj1C0KS35vboR0FeJ89OkEy1M9mWbK2ifCI=
|
github.com/aws/aws-sdk-go-v2/service/sso v1.11.25/go.mod h1:IARHuzTXmj1C0KS35vboR0FeJ89OkEy1M9mWbK2ifCI=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 h1:/2gzjhQowRLarkkBOGPXSRnb8sQ2RVsjdG1C/UliK/c=
|
github.com/aws/aws-sdk-go-v2/service/sso v1.12.1 h1:lQKN/LNa3qqu2cDOQZybP7oL4nMGGiFqob0jZJaR8/4=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.0/go.mod h1:wo/B7uUm/7zw/dWhBJ4FXuw1sySU5lyIhVg1Bu2yL9A=
|
github.com/aws/aws-sdk-go-v2/service/sso v1.12.1/go.mod h1:IgV8l3sj22nQDd5qcAGY0WenwCzCphqdbFOpfktZPrI=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8/go.mod h1:er2JHN+kBY6FcMfcBBKNGCT3CarImmdFzishsqBmSRI=
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8/go.mod h1:er2JHN+kBY6FcMfcBBKNGCT3CarImmdFzishsqBmSRI=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 h1:Jfly6mRxk2ZOSlbCvZfKNS7TukSx1mIzhSsqZ/IGSZI=
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.1 h1:0bLhH6DRAqox+g0LatcjGKjjhU6Eudyys6HB6DJVPj8=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0/go.mod h1:TZSH7xLO7+phDtViY/KUp9WGCJMQkLJ/VpgkTFd5gh8=
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.1/go.mod h1:O1YSOg3aekZibh2SngvCRRG+cRHKKlYgxf/JBF/Kr/k=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.17.5/go.mod h1:bXcN3koeVYiJcdDU89n3kCYILob7Y34AeLopUbZgLT4=
|
github.com/aws/aws-sdk-go-v2/service/sts v1.17.5/go.mod h1:bXcN3koeVYiJcdDU89n3kCYILob7Y34AeLopUbZgLT4=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.2 h1:J/4wIaGInCEYCGhTSruxCxeoA5cy91a+JT7cHFKFSHQ=
|
github.com/aws/aws-sdk-go-v2/service/sts v1.18.3 h1:s49mSnsBZEXjfGBkRfmK+nPqzT7Lt3+t2SmAKNyHblw=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.2/go.mod h1:+lGbb3+1ugwKrNTWcf2RT05Xmp543B06zDFTwiTLp7I=
|
github.com/aws/aws-sdk-go-v2/service/sts v1.18.3/go.mod h1:b+psTJn33Q4qGoDaM7ZiOVVG8uVjGI6HaZ8WBHdgDgU=
|
||||||
github.com/aws/smithy-go v1.13.4/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
github.com/aws/smithy-go v1.13.4/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||||
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
||||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||||
|
@ -1802,8 +1801,8 @@ github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod
|
||||||
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo=
|
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo=
|
||||||
github.com/sftpgo/sdk v0.1.3-0.20221217110036-383c1bb50fa0 h1:e1OQroqX8SWV06Z270CxG2/v//Wx1026iXKTDRn5J1E=
|
github.com/sftpgo/sdk v0.1.3-0.20221217110036-383c1bb50fa0 h1:e1OQroqX8SWV06Z270CxG2/v//Wx1026iXKTDRn5J1E=
|
||||||
github.com/sftpgo/sdk v0.1.3-0.20221217110036-383c1bb50fa0/go.mod h1:3GpW3Qy8IHH6kex0ny+Y6ayeYb9OJxz8Pxh3IZgAs2E=
|
github.com/sftpgo/sdk v0.1.3-0.20221217110036-383c1bb50fa0/go.mod h1:3GpW3Qy8IHH6kex0ny+Y6ayeYb9OJxz8Pxh3IZgAs2E=
|
||||||
github.com/shirou/gopsutil/v3 v3.22.12 h1:oG0ns6poeUSxf78JtOsfygNWuEHYYz8hnnNg7P04TJs=
|
github.com/shirou/gopsutil/v3 v3.23.1 h1:a9KKO+kGLKEvcPIs4W62v0nu3sciVDOOOPUD0Hz7z/4=
|
||||||
github.com/shirou/gopsutil/v3 v3.22.12/go.mod h1:Xd7P1kwZcp5VW52+9XsirIKd/BROzbb2wdX3Kqlz9uI=
|
github.com/shirou/gopsutil/v3 v3.23.1/go.mod h1:NN6mnm5/0k8jw4cBfCnJtr5L7ErOTg18tMNpgFkn0hA=
|
||||||
github.com/shoenig/test v0.4.3/go.mod h1:xYtyGBC5Q3kzCNyJg/SjgNpfAa2kvmgA0i5+lQso8x0=
|
github.com/shoenig/test v0.4.3/go.mod h1:xYtyGBC5Q3kzCNyJg/SjgNpfAa2kvmgA0i5+lQso8x0=
|
||||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||||
|
@ -1880,8 +1879,8 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/studio-b12/gowebdav v0.0.0-20221109171924-60ec5ad56012 h1:ZC+dlnsjxqrcB68nEFbIEfo4iXsog3Sg8FlXKytAjhY=
|
github.com/studio-b12/gowebdav v0.0.0-20230203202212-3282f94193f2 h1:VsBj3UD2xyAOu7kJw6O/2jjG2UXLFoBzihqDU9Ofg9M=
|
||||||
github.com/studio-b12/gowebdav v0.0.0-20221109171924-60ec5ad56012/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE=
|
github.com/studio-b12/gowebdav v0.0.0-20230203202212-3282f94193f2/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE=
|
||||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
|
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
|
||||||
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||||
|
@ -1954,8 +1953,9 @@ github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxt
|
||||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
|
||||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||||
|
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
||||||
|
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
||||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||||
|
@ -2095,8 +2095,8 @@ golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||||
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
|
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
|
||||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
@ -2384,8 +2384,9 @@ golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
|
||||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||||
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
@ -2393,8 +2394,9 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
|
||||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||||
golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
|
|
||||||
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||||
|
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
|
||||||
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
@ -2407,8 +2409,9 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
|
||||||
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
|
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||||
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
@ -2576,8 +2579,8 @@ google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91
|
||||||
google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=
|
google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=
|
||||||
google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo=
|
google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo=
|
||||||
google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
|
google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
|
||||||
google.golang.org/api v0.108.0 h1:WVBc/faN0DkKtR43Q/7+tPny9ZoLZdIiAyG5Q9vFClg=
|
google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8=
|
||||||
google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
|
google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
@ -2711,8 +2714,8 @@ google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZV
|
||||||
google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
|
google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
|
||||||
google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
|
google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
|
||||||
google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
|
google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
|
||||||
google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa h1:GZXdWYIKckxQE2EcLHLvF+KLF+bIwoxGdMUxTZizueg=
|
google.golang.org/genproto v0.0.0-20230202175211-008b39050e57 h1:vArvWooPH749rNHpBGgVl+U9B9dATjiEhJzcWGlovNs=
|
||||||
google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
google.golang.org/genproto v0.0.0-20230202175211-008b39050e57/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
@ -2758,8 +2761,8 @@ google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCD
|
||||||
google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||||
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||||
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
|
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
|
||||||
google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ=
|
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
|
||||||
google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
|
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
@ -2798,8 +2801,9 @@ gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||||
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||||
|
|
|
@ -85,15 +85,6 @@ Command-line flags should be specified in the Subsystem declaration.
|
||||||
logger.Error(logSender, connectionID, "unable to load configuration: %v", err)
|
logger.Error(logSender, connectionID, "unable to load configuration: %v", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
dataProviderConf := config.GetProviderConf()
|
|
||||||
commonConfig := config.GetCommonConfig()
|
|
||||||
// idle connection are managed externally
|
|
||||||
commonConfig.IdleTimeout = 0
|
|
||||||
config.SetCommonConfig(commonConfig)
|
|
||||||
if err := common.Initialize(config.GetCommonConfig(), dataProviderConf.GetShared()); err != nil {
|
|
||||||
logger.Error(logSender, connectionID, "%v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
kmsConfig := config.GetKMSConfig()
|
kmsConfig := config.GetKMSConfig()
|
||||||
if err := kmsConfig.Initialize(); err != nil {
|
if err := kmsConfig.Initialize(); err != nil {
|
||||||
logger.Error(logSender, connectionID, "unable to initialize KMS: %v", err)
|
logger.Error(logSender, connectionID, "unable to initialize KMS: %v", err)
|
||||||
|
@ -115,8 +106,9 @@ Command-line flags should be specified in the Subsystem declaration.
|
||||||
logger.Error(logSender, connectionID, "unable to initialize SMTP configuration: %v", err)
|
logger.Error(logSender, connectionID, "unable to initialize SMTP configuration: %v", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
dataProviderConf := config.GetProviderConf()
|
||||||
if dataProviderConf.Driver == dataprovider.SQLiteDataProviderName || dataProviderConf.Driver == dataprovider.BoltDataProviderName {
|
if dataProviderConf.Driver == dataprovider.SQLiteDataProviderName || dataProviderConf.Driver == dataprovider.BoltDataProviderName {
|
||||||
logger.Debug(logSender, connectionID, "data provider %#v not supported in subsystem mode, using %#v provider",
|
logger.Debug(logSender, connectionID, "data provider %q not supported in subsystem mode, using %q provider",
|
||||||
dataProviderConf.Driver, dataprovider.MemoryDataProviderName)
|
dataProviderConf.Driver, dataprovider.MemoryDataProviderName)
|
||||||
dataProviderConf.Driver = dataprovider.MemoryDataProviderName
|
dataProviderConf.Driver = dataprovider.MemoryDataProviderName
|
||||||
dataProviderConf.Name = ""
|
dataProviderConf.Name = ""
|
||||||
|
@ -127,6 +119,14 @@ Command-line flags should be specified in the Subsystem declaration.
|
||||||
logger.Error(logSender, connectionID, "unable to initialize the data provider: %v", err)
|
logger.Error(logSender, connectionID, "unable to initialize the data provider: %v", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
commonConfig := config.GetCommonConfig()
|
||||||
|
// idle connection are managed externally
|
||||||
|
commonConfig.IdleTimeout = 0
|
||||||
|
config.SetCommonConfig(commonConfig)
|
||||||
|
if err := common.Initialize(config.GetCommonConfig(), dataProviderConf.GetShared()); err != nil {
|
||||||
|
logger.Error(logSender, connectionID, "%v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
httpConfig := config.GetHTTPConfig()
|
httpConfig := config.GetHTTPConfig()
|
||||||
if err := httpConfig.Initialize(configDir); err != nil {
|
if err := httpConfig.Initialize(configDir); err != nil {
|
||||||
logger.Error(logSender, connectionID, "unable to initialize http client: %v", err)
|
logger.Error(logSender, connectionID, "unable to initialize http client: %v", err)
|
||||||
|
|
|
@ -172,24 +172,27 @@ func Initialize(c Configuration, isShared int) error {
|
||||||
Config.idleTimeoutAsDuration = time.Duration(Config.IdleTimeout) * time.Minute
|
Config.idleTimeoutAsDuration = time.Duration(Config.IdleTimeout) * time.Minute
|
||||||
startPeriodicChecks(periodicTimeoutCheckInterval)
|
startPeriodicChecks(periodicTimeoutCheckInterval)
|
||||||
Config.defender = nil
|
Config.defender = nil
|
||||||
Config.whitelist = nil
|
Config.allowList = nil
|
||||||
|
Config.rateLimitersList = nil
|
||||||
rateLimiters = make(map[string][]*rateLimiter)
|
rateLimiters = make(map[string][]*rateLimiter)
|
||||||
for _, rlCfg := range c.RateLimitersConfig {
|
for _, rlCfg := range c.RateLimitersConfig {
|
||||||
if rlCfg.isEnabled() {
|
if rlCfg.isEnabled() {
|
||||||
if err := rlCfg.validate(); err != nil {
|
if err := rlCfg.validate(); err != nil {
|
||||||
return fmt.Errorf("rate limiters initialization error: %w", err)
|
return fmt.Errorf("rate limiters initialization error: %w", err)
|
||||||
}
|
}
|
||||||
allowList, err := util.ParseAllowedIPAndRanges(rlCfg.AllowList)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to parse rate limiter allow list %v: %v", rlCfg.AllowList, err)
|
|
||||||
}
|
|
||||||
rateLimiter := rlCfg.getLimiter()
|
rateLimiter := rlCfg.getLimiter()
|
||||||
rateLimiter.allowList = allowList
|
|
||||||
for _, protocol := range rlCfg.Protocols {
|
for _, protocol := range rlCfg.Protocols {
|
||||||
rateLimiters[protocol] = append(rateLimiters[protocol], rateLimiter)
|
rateLimiters[protocol] = append(rateLimiters[protocol], rateLimiter)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(rateLimiters) > 0 {
|
||||||
|
rateLimitersList, err := dataprovider.NewIPList(dataprovider.IPListTypeRateLimiterSafeList)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to initialize ratelimiters list: %w", err)
|
||||||
|
}
|
||||||
|
Config.rateLimitersList = rateLimitersList
|
||||||
|
}
|
||||||
if c.DefenderConfig.Enabled {
|
if c.DefenderConfig.Enabled {
|
||||||
if !util.Contains(supportedDefenderDrivers, c.DefenderConfig.Driver) {
|
if !util.Contains(supportedDefenderDrivers, c.DefenderConfig.Driver) {
|
||||||
return fmt.Errorf("unsupported defender driver %q", c.DefenderConfig.Driver)
|
return fmt.Errorf("unsupported defender driver %q", c.DefenderConfig.Driver)
|
||||||
|
@ -208,15 +211,13 @@ func Initialize(c Configuration, isShared int) error {
|
||||||
logger.Info(logSender, "", "defender initialized with config %+v", c.DefenderConfig)
|
logger.Info(logSender, "", "defender initialized with config %+v", c.DefenderConfig)
|
||||||
Config.defender = defender
|
Config.defender = defender
|
||||||
}
|
}
|
||||||
if c.WhiteListFile != "" {
|
if c.AllowListStatus > 0 {
|
||||||
whitelist := &whitelist{
|
allowList, err := dataprovider.NewIPList(dataprovider.IPListTypeAllowList)
|
||||||
fileName: c.WhiteListFile,
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to initialize the allow list: %w", err)
|
||||||
}
|
}
|
||||||
if err := whitelist.reload(); err != nil {
|
logger.Info(logSender, "", "allow list initialized")
|
||||||
return fmt.Errorf("whitelist initialization error: %w", err)
|
Config.allowList = allowList
|
||||||
}
|
|
||||||
logger.Info(logSender, "", "whitelist initialized from file: %#v", c.WhiteListFile)
|
|
||||||
Config.whitelist = whitelist
|
|
||||||
}
|
}
|
||||||
vfs.SetTempPath(c.TempPath)
|
vfs.SetTempPath(c.TempPath)
|
||||||
dataprovider.SetTempPath(c.TempPath)
|
dataprovider.SetTempPath(c.TempPath)
|
||||||
|
@ -293,9 +294,15 @@ func getActiveConnections() int {
|
||||||
// It returns an error if the time to wait exceeds the max
|
// It returns an error if the time to wait exceeds the max
|
||||||
// allowed delay
|
// allowed delay
|
||||||
func LimitRate(protocol, ip string) (time.Duration, error) {
|
func LimitRate(protocol, ip string) (time.Duration, error) {
|
||||||
|
if Config.rateLimitersList != nil {
|
||||||
|
isListed, _, err := Config.rateLimitersList.IsListed(ip, protocol)
|
||||||
|
if err == nil && isListed {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
for _, limiter := range rateLimiters[protocol] {
|
for _, limiter := range rateLimiters[protocol] {
|
||||||
if delay, err := limiter.Wait(ip); err != nil {
|
if delay, err := limiter.Wait(ip, protocol); err != nil {
|
||||||
logger.Debug(logSender, "", "protocol %v ip %v: %v", protocol, ip, err)
|
logger.Debug(logSender, "", "protocol %s ip %s: %v", protocol, ip, err)
|
||||||
return delay, err
|
return delay, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -305,21 +312,11 @@ func LimitRate(protocol, ip string) (time.Duration, error) {
|
||||||
// Reload reloads the whitelist, the IP filter plugin and the defender's block and safe lists
|
// Reload reloads the whitelist, the IP filter plugin and the defender's block and safe lists
|
||||||
func Reload() error {
|
func Reload() error {
|
||||||
plugin.Handler.ReloadFilter()
|
plugin.Handler.ReloadFilter()
|
||||||
var errWithelist error
|
return nil
|
||||||
if Config.whitelist != nil {
|
|
||||||
errWithelist = Config.whitelist.reload()
|
|
||||||
}
|
|
||||||
if Config.defender == nil {
|
|
||||||
return errWithelist
|
|
||||||
}
|
|
||||||
if err := Config.defender.Reload(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return errWithelist
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsBanned returns true if the specified IP address is banned
|
// IsBanned returns true if the specified IP address is banned
|
||||||
func IsBanned(ip string) bool {
|
func IsBanned(ip, protocol string) bool {
|
||||||
if plugin.Handler.IsIPBanned(ip) {
|
if plugin.Handler.IsIPBanned(ip) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -327,7 +324,7 @@ func IsBanned(ip string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return Config.defender.IsBanned(ip)
|
return Config.defender.IsBanned(ip, protocol)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDefenderBanTime returns the ban time for the given IP
|
// GetDefenderBanTime returns the ban time for the given IP
|
||||||
|
@ -377,12 +374,12 @@ func GetDefenderScore(ip string) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddDefenderEvent adds the specified defender event for the given IP
|
// AddDefenderEvent adds the specified defender event for the given IP
|
||||||
func AddDefenderEvent(ip string, event HostEvent) {
|
func AddDefenderEvent(ip, protocol string, event HostEvent) {
|
||||||
if Config.defender == nil {
|
if Config.defender == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
Config.defender.AddEvent(ip, event)
|
Config.defender.AddEvent(ip, protocol, event)
|
||||||
}
|
}
|
||||||
|
|
||||||
func startPeriodicChecks(duration time.Duration) {
|
func startPeriodicChecks(duration time.Duration) {
|
||||||
|
@ -449,7 +446,7 @@ type StatAttributes struct {
|
||||||
Size int64
|
Size int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConnectionTransfer defines the trasfer details to expose
|
// ConnectionTransfer defines the trasfer details
|
||||||
type ConnectionTransfer struct {
|
type ConnectionTransfer struct {
|
||||||
ID int64 `json:"-"`
|
ID int64 `json:"-"`
|
||||||
OperationType string `json:"operation_type"`
|
OperationType string `json:"operation_type"`
|
||||||
|
@ -479,35 +476,6 @@ func (t *ConnectionTransfer) getConnectionTransferAsString() string {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
type whitelist struct {
|
|
||||||
fileName string
|
|
||||||
sync.RWMutex
|
|
||||||
list HostList
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *whitelist) reload() error {
|
|
||||||
list, err := loadHostListFromFile(l.fileName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if list == nil {
|
|
||||||
return errors.New("cannot accept a nil whitelist")
|
|
||||||
}
|
|
||||||
|
|
||||||
l.Lock()
|
|
||||||
defer l.Unlock()
|
|
||||||
|
|
||||||
l.list = *list
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *whitelist) isAllowed(ip string) bool {
|
|
||||||
l.RLock()
|
|
||||||
defer l.RUnlock()
|
|
||||||
|
|
||||||
return l.list.isListed(ip)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Configuration defines configuration parameters common to all supported protocols
|
// Configuration defines configuration parameters common to all supported protocols
|
||||||
type Configuration struct {
|
type Configuration struct {
|
||||||
// Maximum idle timeout as minutes. If a client is idle for a time that exceeds this setting it will be disconnected.
|
// Maximum idle timeout as minutes. If a client is idle for a time that exceeds this setting it will be disconnected.
|
||||||
|
@ -578,10 +546,11 @@ type Configuration struct {
|
||||||
MaxTotalConnections int `json:"max_total_connections" mapstructure:"max_total_connections"`
|
MaxTotalConnections int `json:"max_total_connections" mapstructure:"max_total_connections"`
|
||||||
// Maximum number of concurrent client connections from the same host (IP). 0 means unlimited
|
// Maximum number of concurrent client connections from the same host (IP). 0 means unlimited
|
||||||
MaxPerHostConnections int `json:"max_per_host_connections" mapstructure:"max_per_host_connections"`
|
MaxPerHostConnections int `json:"max_per_host_connections" mapstructure:"max_per_host_connections"`
|
||||||
// Path to a file containing a list of IP addresses and/or networks to allow.
|
// Defines the status of the global allow list. 0 means disabled, 1 enabled.
|
||||||
// Only the listed IPs/networks can access the configured services, all other client connections
|
// If enabled, only the listed IPs/networks can access the configured services, all other
|
||||||
// will be dropped before they even try to authenticate.
|
// client connections will be dropped before they even try to authenticate.
|
||||||
WhiteListFile string `json:"whitelist_file" mapstructure:"whitelist_file"`
|
// Ensure to enable this setting only after adding some allowed ip/networks from the WebAdmin/REST API
|
||||||
|
AllowListStatus int `json:"allowlist_status" mapstructure:"allowlist_status"`
|
||||||
// Allow users on this instance to use other users/virtual folders on this instance as storage backend.
|
// Allow users on this instance to use other users/virtual folders on this instance as storage backend.
|
||||||
// Enable this setting if you know what you are doing.
|
// Enable this setting if you know what you are doing.
|
||||||
AllowSelfConnections int `json:"allow_self_connections" mapstructure:"allow_self_connections"`
|
AllowSelfConnections int `json:"allow_self_connections" mapstructure:"allow_self_connections"`
|
||||||
|
@ -592,7 +561,8 @@ type Configuration struct {
|
||||||
idleTimeoutAsDuration time.Duration
|
idleTimeoutAsDuration time.Duration
|
||||||
idleLoginTimeout time.Duration
|
idleLoginTimeout time.Duration
|
||||||
defender Defender
|
defender Defender
|
||||||
whitelist *whitelist
|
allowList *dataprovider.IPList
|
||||||
|
rateLimitersList *dataprovider.IPList
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsAtomicUploadEnabled returns true if atomic upload is enabled
|
// IsAtomicUploadEnabled returns true if atomic upload is enabled
|
||||||
|
@ -633,6 +603,24 @@ func (c *Configuration) GetProxyListener(listener net.Listener) (*proxyproto.Lis
|
||||||
return nil, errors.New("proxy protocol not configured")
|
return nil, errors.New("proxy protocol not configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetRateLimitersStatus returns the rate limiters status
|
||||||
|
func (c *Configuration) GetRateLimitersStatus() (bool, []string) {
|
||||||
|
enabled := false
|
||||||
|
var protocols []string
|
||||||
|
for _, rlCfg := range c.RateLimitersConfig {
|
||||||
|
if rlCfg.isEnabled() {
|
||||||
|
enabled = true
|
||||||
|
protocols = append(protocols, rlCfg.Protocols...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return enabled, util.RemoveDuplicates(protocols, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAllowListEnabled returns true if the global allow list is enabled
|
||||||
|
func (c *Configuration) IsAllowListEnabled() bool {
|
||||||
|
return c.AllowListStatus > 0
|
||||||
|
}
|
||||||
|
|
||||||
// ExecuteStartupHook runs the startup hook if defined
|
// ExecuteStartupHook runs the startup hook if defined
|
||||||
func (c *Configuration) ExecuteStartupHook() error {
|
func (c *Configuration) ExecuteStartupHook() error {
|
||||||
if c.StartupHook == "" {
|
if c.StartupHook == "" {
|
||||||
|
@ -941,7 +929,7 @@ func (conns *ActiveConnections) Remove(connectionID string) {
|
||||||
logger.ConnectionFailedLog("", ip, dataprovider.LoginMethodNoAuthTryed, conn.GetProtocol(),
|
logger.ConnectionFailedLog("", ip, dataprovider.LoginMethodNoAuthTryed, conn.GetProtocol(),
|
||||||
dataprovider.ErrNoAuthTryed.Error())
|
dataprovider.ErrNoAuthTryed.Error())
|
||||||
metric.AddNoAuthTryed()
|
metric.AddNoAuthTryed()
|
||||||
AddDefenderEvent(ip, HostEventNoLoginTried)
|
AddDefenderEvent(ip, ProtocolFTP, HostEventNoLoginTried)
|
||||||
dataprovider.ExecutePostLoginHook(&dataprovider.User{}, dataprovider.LoginMethodNoAuthTryed, ip,
|
dataprovider.ExecutePostLoginHook(&dataprovider.User{}, dataprovider.LoginMethodNoAuthTryed, ip,
|
||||||
conn.GetProtocol(), dataprovider.ErrNoAuthTryed)
|
conn.GetProtocol(), dataprovider.ErrNoAuthTryed)
|
||||||
}
|
}
|
||||||
|
@ -1130,12 +1118,18 @@ func (conns *ActiveConnections) GetClientConnections() int32 {
|
||||||
// IsNewConnectionAllowed returns an error if the maximum number of concurrent allowed
|
// IsNewConnectionAllowed returns an error if the maximum number of concurrent allowed
|
||||||
// connections is exceeded or a whitelist is defined and the specified ipAddr is not listed
|
// connections is exceeded or a whitelist is defined and the specified ipAddr is not listed
|
||||||
// or the service is shutting down
|
// or the service is shutting down
|
||||||
func (conns *ActiveConnections) IsNewConnectionAllowed(ipAddr string) error {
|
func (conns *ActiveConnections) IsNewConnectionAllowed(ipAddr, protocol string) error {
|
||||||
if isShuttingDown.Load() {
|
if isShuttingDown.Load() {
|
||||||
return ErrShuttingDown
|
return ErrShuttingDown
|
||||||
}
|
}
|
||||||
if Config.whitelist != nil {
|
if Config.allowList != nil {
|
||||||
if !Config.whitelist.isAllowed(ipAddr) {
|
isListed, _, err := Config.allowList.IsListed(ipAddr, protocol)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(logSender, "", "unable to query allow list, connection denied, ip %q, protocol %s, err: %v",
|
||||||
|
ipAddr, protocol, err)
|
||||||
|
return ErrConnectionDenied
|
||||||
|
}
|
||||||
|
if !isListed {
|
||||||
return ErrConnectionDenied
|
return ErrConnectionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1146,7 +1140,7 @@ func (conns *ActiveConnections) IsNewConnectionAllowed(ipAddr string) error {
|
||||||
if Config.MaxPerHostConnections > 0 {
|
if Config.MaxPerHostConnections > 0 {
|
||||||
if total := conns.clients.getTotalFrom(ipAddr); total > Config.MaxPerHostConnections {
|
if total := conns.clients.getTotalFrom(ipAddr); total > Config.MaxPerHostConnections {
|
||||||
logger.Info(logSender, "", "active connections from %s %d/%d", ipAddr, total, Config.MaxPerHostConnections)
|
logger.Info(logSender, "", "active connections from %s %d/%d", ipAddr, total, Config.MaxPerHostConnections)
|
||||||
AddDefenderEvent(ipAddr, HostEventLimitExceeded)
|
AddDefenderEvent(ipAddr, protocol, HostEventLimitExceeded)
|
||||||
return ErrConnectionDenied
|
return ErrConnectionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -215,6 +215,66 @@ func TestConnections(t *testing.T) {
|
||||||
Connections.RUnlock()
|
Connections.RUnlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestInitializationClosedProvider(t *testing.T) {
|
||||||
|
configCopy := Config
|
||||||
|
|
||||||
|
providerConf := dataprovider.GetProviderConfig()
|
||||||
|
err := dataprovider.Close()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
config := Configuration{
|
||||||
|
AllowListStatus: 1,
|
||||||
|
}
|
||||||
|
err = Initialize(config, 0)
|
||||||
|
if assert.Error(t, err) {
|
||||||
|
assert.Contains(t, err.Error(), "unable to initialize the allow list")
|
||||||
|
}
|
||||||
|
|
||||||
|
config.AllowListStatus = 0
|
||||||
|
config.RateLimitersConfig = []RateLimiterConfig{
|
||||||
|
{
|
||||||
|
Average: 100,
|
||||||
|
Period: 1000,
|
||||||
|
Burst: 5,
|
||||||
|
Type: int(rateLimiterTypeGlobal),
|
||||||
|
Protocols: rateLimiterProtocolValues,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err = Initialize(config, 0)
|
||||||
|
if assert.Error(t, err) {
|
||||||
|
assert.Contains(t, err.Error(), "unable to initialize ratelimiters list")
|
||||||
|
}
|
||||||
|
|
||||||
|
config.RateLimitersConfig = nil
|
||||||
|
config.DefenderConfig = DefenderConfig{
|
||||||
|
Enabled: true,
|
||||||
|
Driver: DefenderDriverProvider,
|
||||||
|
BanTime: 10,
|
||||||
|
BanTimeIncrement: 50,
|
||||||
|
Threshold: 10,
|
||||||
|
ScoreInvalid: 2,
|
||||||
|
ScoreValid: 1,
|
||||||
|
ScoreNoAuth: 2,
|
||||||
|
ObservationTime: 15,
|
||||||
|
EntriesSoftLimit: 100,
|
||||||
|
EntriesHardLimit: 150,
|
||||||
|
}
|
||||||
|
err = Initialize(config, 0)
|
||||||
|
if assert.Error(t, err) {
|
||||||
|
assert.Contains(t, err.Error(), "defender initialization error")
|
||||||
|
}
|
||||||
|
config.DefenderConfig.Driver = DefenderDriverMemory
|
||||||
|
err = Initialize(config, 0)
|
||||||
|
if assert.Error(t, err) {
|
||||||
|
assert.Contains(t, err.Error(), "defender initialization error")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
Config = configCopy
|
||||||
|
}
|
||||||
|
|
||||||
func TestSSHConnections(t *testing.T) {
|
func TestSSHConnections(t *testing.T) {
|
||||||
conn1, conn2 := net.Pipe()
|
conn1, conn2 := net.Pipe()
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
@ -298,10 +358,10 @@ func TestDefenderIntegration(t *testing.T) {
|
||||||
|
|
||||||
assert.Nil(t, Reload())
|
assert.Nil(t, Reload())
|
||||||
// 192.168.1.12 is banned from the ipfilter plugin
|
// 192.168.1.12 is banned from the ipfilter plugin
|
||||||
assert.True(t, IsBanned("192.168.1.12"))
|
assert.True(t, IsBanned("192.168.1.12", ProtocolFTP))
|
||||||
|
|
||||||
AddDefenderEvent(ip, HostEventNoLoginTried)
|
AddDefenderEvent(ip, ProtocolFTP, HostEventNoLoginTried)
|
||||||
assert.False(t, IsBanned(ip))
|
assert.False(t, IsBanned(ip, ProtocolFTP))
|
||||||
|
|
||||||
banTime, err := GetDefenderBanTime(ip)
|
banTime, err := GetDefenderBanTime(ip)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -342,21 +402,13 @@ func TestDefenderIntegration(t *testing.T) {
|
||||||
// ScoreInvalid cannot be greater than threshold
|
// ScoreInvalid cannot be greater than threshold
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
Config.DefenderConfig.Threshold = 3
|
Config.DefenderConfig.Threshold = 3
|
||||||
Config.DefenderConfig.SafeListFile = filepath.Join(os.TempDir(), "sl.json")
|
|
||||||
err = os.WriteFile(Config.DefenderConfig.SafeListFile, []byte(`{}`), 0644)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
defer os.Remove(Config.DefenderConfig.SafeListFile)
|
|
||||||
|
|
||||||
err = Initialize(Config, 0)
|
err = Initialize(Config, 0)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Nil(t, Reload())
|
assert.Nil(t, Reload())
|
||||||
err = os.WriteFile(Config.DefenderConfig.SafeListFile, []byte(`{`), 0644)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = Reload()
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
AddDefenderEvent(ip, HostEventNoLoginTried)
|
AddDefenderEvent(ip, ProtocolSSH, HostEventNoLoginTried)
|
||||||
assert.False(t, IsBanned(ip))
|
assert.False(t, IsBanned(ip, ProtocolSSH))
|
||||||
score, err = GetDefenderScore(ip)
|
score, err = GetDefenderScore(ip)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 2, score)
|
assert.Equal(t, 2, score)
|
||||||
|
@ -370,9 +422,9 @@ func TestDefenderIntegration(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Nil(t, banTime)
|
assert.Nil(t, banTime)
|
||||||
|
|
||||||
AddDefenderEvent(ip, HostEventLoginFailed)
|
AddDefenderEvent(ip, ProtocolHTTP, HostEventLoginFailed)
|
||||||
AddDefenderEvent(ip, HostEventNoLoginTried)
|
AddDefenderEvent(ip, ProtocolHTTP, HostEventNoLoginTried)
|
||||||
assert.True(t, IsBanned(ip))
|
assert.True(t, IsBanned(ip, ProtocolHTTP))
|
||||||
score, err = GetDefenderScore(ip)
|
score, err = GetDefenderScore(ip)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 0, score)
|
assert.Equal(t, 0, score)
|
||||||
|
@ -398,9 +450,31 @@ func TestDefenderIntegration(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRateLimitersIntegration(t *testing.T) {
|
func TestRateLimitersIntegration(t *testing.T) {
|
||||||
// by default defender is nil
|
|
||||||
configCopy := Config
|
configCopy := Config
|
||||||
|
|
||||||
|
enabled, protocols := Config.GetRateLimitersStatus()
|
||||||
|
assert.False(t, enabled)
|
||||||
|
assert.Len(t, protocols, 0)
|
||||||
|
|
||||||
|
entries := []dataprovider.IPListEntry{
|
||||||
|
{
|
||||||
|
IPOrNet: "172.16.24.7/32",
|
||||||
|
Type: dataprovider.IPListTypeRateLimiterSafeList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "172.16.0.0/16",
|
||||||
|
Type: dataprovider.IPListTypeRateLimiterSafeList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx := range entries {
|
||||||
|
e := entries[idx]
|
||||||
|
err := dataprovider.AddIPListEntry(&e, "", "", "")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
Config.RateLimitersConfig = []RateLimiterConfig{
|
Config.RateLimitersConfig = []RateLimiterConfig{
|
||||||
{
|
{
|
||||||
Average: 100,
|
Average: 100,
|
||||||
|
@ -423,16 +497,10 @@ func TestRateLimitersIntegration(t *testing.T) {
|
||||||
err := Initialize(Config, 0)
|
err := Initialize(Config, 0)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
Config.RateLimitersConfig[0].Period = 1000
|
Config.RateLimitersConfig[0].Period = 1000
|
||||||
Config.RateLimitersConfig[0].AllowList = []string{"1.1.1", "1.1.1.2"}
|
|
||||||
err = Initialize(Config, 0)
|
|
||||||
if assert.Error(t, err) {
|
|
||||||
assert.Contains(t, err.Error(), "unable to parse rate limiter allow list")
|
|
||||||
}
|
|
||||||
Config.RateLimitersConfig[0].AllowList = []string{"172.16.24.7"}
|
|
||||||
Config.RateLimitersConfig[1].AllowList = []string{"172.16.0.0/16"}
|
|
||||||
|
|
||||||
err = Initialize(Config, 0)
|
err = Initialize(Config, 0)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, Config.rateLimitersList)
|
||||||
|
|
||||||
assert.Len(t, rateLimiters, 4)
|
assert.Len(t, rateLimiters, 4)
|
||||||
assert.Len(t, rateLimiters[ProtocolSSH], 1)
|
assert.Len(t, rateLimiters[ProtocolSSH], 1)
|
||||||
|
@ -440,9 +508,17 @@ func TestRateLimitersIntegration(t *testing.T) {
|
||||||
assert.Len(t, rateLimiters[ProtocolWebDAV], 2)
|
assert.Len(t, rateLimiters[ProtocolWebDAV], 2)
|
||||||
assert.Len(t, rateLimiters[ProtocolHTTP], 1)
|
assert.Len(t, rateLimiters[ProtocolHTTP], 1)
|
||||||
|
|
||||||
|
enabled, protocols = Config.GetRateLimitersStatus()
|
||||||
|
assert.True(t, enabled)
|
||||||
|
assert.Len(t, protocols, 4)
|
||||||
|
assert.Contains(t, protocols, ProtocolFTP)
|
||||||
|
assert.Contains(t, protocols, ProtocolSSH)
|
||||||
|
assert.Contains(t, protocols, ProtocolHTTP)
|
||||||
|
assert.Contains(t, protocols, ProtocolWebDAV)
|
||||||
|
|
||||||
source1 := "127.1.1.1"
|
source1 := "127.1.1.1"
|
||||||
source2 := "127.1.1.2"
|
source2 := "127.1.1.2"
|
||||||
source3 := "172.16.24.7" // whitelisted
|
source3 := "172.16.24.7" // in safelist
|
||||||
|
|
||||||
_, err = LimitRate(ProtocolSSH, source1)
|
_, err = LimitRate(ProtocolSSH, source1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -465,59 +541,12 @@ func TestRateLimitersIntegration(t *testing.T) {
|
||||||
_, err = LimitRate(ProtocolWebDAV, source3)
|
_, err = LimitRate(ProtocolWebDAV, source3)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
for _, e := range entries {
|
||||||
Config = configCopy
|
err := dataprovider.DeleteIPListEntry(e.IPOrNet, e.Type, "", "", "")
|
||||||
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWhitelist(t *testing.T) {
|
assert.Nil(t, configCopy.rateLimitersList)
|
||||||
configCopy := Config
|
|
||||||
|
|
||||||
Config.whitelist = &whitelist{}
|
|
||||||
err := Config.whitelist.reload()
|
|
||||||
if assert.Error(t, err) {
|
|
||||||
assert.Contains(t, err.Error(), "cannot accept a nil whitelist")
|
|
||||||
}
|
|
||||||
wlFile := filepath.Join(os.TempDir(), "wl.json")
|
|
||||||
Config.WhiteListFile = wlFile
|
|
||||||
|
|
||||||
err = os.WriteFile(wlFile, []byte(`invalid list file`), 0664)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = Initialize(Config, 0)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
wl := HostListFile{
|
|
||||||
IPAddresses: []string{"172.18.1.1", "172.18.1.2"},
|
|
||||||
CIDRNetworks: []string{"10.8.7.0/24"},
|
|
||||||
}
|
|
||||||
data, err := json.Marshal(wl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(wlFile, data, 0664)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
defer os.Remove(wlFile)
|
|
||||||
|
|
||||||
err = Initialize(Config, 0)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
assert.NoError(t, Connections.IsNewConnectionAllowed("172.18.1.1"))
|
|
||||||
assert.Error(t, Connections.IsNewConnectionAllowed("172.18.1.3"))
|
|
||||||
assert.NoError(t, Connections.IsNewConnectionAllowed("10.8.7.3"))
|
|
||||||
assert.Error(t, Connections.IsNewConnectionAllowed("10.8.8.2"))
|
|
||||||
|
|
||||||
wl.IPAddresses = append(wl.IPAddresses, "172.18.1.3")
|
|
||||||
wl.CIDRNetworks = append(wl.CIDRNetworks, "10.8.8.0/24")
|
|
||||||
data, err = json.Marshal(wl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(wlFile, data, 0664)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Error(t, Connections.IsNewConnectionAllowed("10.8.8.3"))
|
|
||||||
|
|
||||||
err = Reload()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NoError(t, Connections.IsNewConnectionAllowed("10.8.8.3"))
|
|
||||||
assert.NoError(t, Connections.IsNewConnectionAllowed("172.18.1.3"))
|
|
||||||
assert.NoError(t, Connections.IsNewConnectionAllowed("172.18.1.2"))
|
|
||||||
assert.Error(t, Connections.IsNewConnectionAllowed("172.18.1.12"))
|
|
||||||
|
|
||||||
Config = configCopy
|
Config = configCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -551,12 +580,12 @@ func TestMaxConnections(t *testing.T) {
|
||||||
Config.MaxPerHostConnections = 0
|
Config.MaxPerHostConnections = 0
|
||||||
|
|
||||||
ipAddr := "192.168.7.8"
|
ipAddr := "192.168.7.8"
|
||||||
assert.NoError(t, Connections.IsNewConnectionAllowed(ipAddr))
|
assert.NoError(t, Connections.IsNewConnectionAllowed(ipAddr, ProtocolFTP))
|
||||||
|
|
||||||
Config.MaxTotalConnections = 1
|
Config.MaxTotalConnections = 1
|
||||||
Config.MaxPerHostConnections = perHost
|
Config.MaxPerHostConnections = perHost
|
||||||
|
|
||||||
assert.NoError(t, Connections.IsNewConnectionAllowed(ipAddr))
|
assert.NoError(t, Connections.IsNewConnectionAllowed(ipAddr, ProtocolHTTP))
|
||||||
c := NewBaseConnection("id", ProtocolSFTP, "", "", dataprovider.User{})
|
c := NewBaseConnection("id", ProtocolSFTP, "", "", dataprovider.User{})
|
||||||
fakeConn := &fakeConnection{
|
fakeConn := &fakeConnection{
|
||||||
BaseConnection: c,
|
BaseConnection: c,
|
||||||
|
@ -564,18 +593,18 @@ func TestMaxConnections(t *testing.T) {
|
||||||
err := Connections.Add(fakeConn)
|
err := Connections.Add(fakeConn)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, Connections.GetStats(""), 1)
|
assert.Len(t, Connections.GetStats(""), 1)
|
||||||
assert.Error(t, Connections.IsNewConnectionAllowed(ipAddr))
|
assert.Error(t, Connections.IsNewConnectionAllowed(ipAddr, ProtocolSSH))
|
||||||
|
|
||||||
res := Connections.Close(fakeConn.GetID(), "")
|
res := Connections.Close(fakeConn.GetID(), "")
|
||||||
assert.True(t, res)
|
assert.True(t, res)
|
||||||
assert.Eventually(t, func() bool { return len(Connections.GetStats("")) == 0 }, 300*time.Millisecond, 50*time.Millisecond)
|
assert.Eventually(t, func() bool { return len(Connections.GetStats("")) == 0 }, 300*time.Millisecond, 50*time.Millisecond)
|
||||||
|
|
||||||
assert.NoError(t, Connections.IsNewConnectionAllowed(ipAddr))
|
assert.NoError(t, Connections.IsNewConnectionAllowed(ipAddr, ProtocolSSH))
|
||||||
Connections.AddClientConnection(ipAddr)
|
Connections.AddClientConnection(ipAddr)
|
||||||
Connections.AddClientConnection(ipAddr)
|
Connections.AddClientConnection(ipAddr)
|
||||||
assert.Error(t, Connections.IsNewConnectionAllowed(ipAddr))
|
assert.Error(t, Connections.IsNewConnectionAllowed(ipAddr, ProtocolSSH))
|
||||||
Connections.RemoveClientConnection(ipAddr)
|
Connections.RemoveClientConnection(ipAddr)
|
||||||
assert.NoError(t, Connections.IsNewConnectionAllowed(ipAddr))
|
assert.NoError(t, Connections.IsNewConnectionAllowed(ipAddr, ProtocolWebDAV))
|
||||||
Connections.RemoveClientConnection(ipAddr)
|
Connections.RemoveClientConnection(ipAddr)
|
||||||
|
|
||||||
Config.MaxTotalConnections = oldValue
|
Config.MaxTotalConnections = oldValue
|
||||||
|
@ -615,13 +644,13 @@ func TestMaxConnectionPerHost(t *testing.T) {
|
||||||
|
|
||||||
ipAddr := "192.168.9.9"
|
ipAddr := "192.168.9.9"
|
||||||
Connections.AddClientConnection(ipAddr)
|
Connections.AddClientConnection(ipAddr)
|
||||||
assert.NoError(t, Connections.IsNewConnectionAllowed(ipAddr))
|
assert.NoError(t, Connections.IsNewConnectionAllowed(ipAddr, ProtocolSSH))
|
||||||
|
|
||||||
Connections.AddClientConnection(ipAddr)
|
Connections.AddClientConnection(ipAddr)
|
||||||
assert.NoError(t, Connections.IsNewConnectionAllowed(ipAddr))
|
assert.NoError(t, Connections.IsNewConnectionAllowed(ipAddr, ProtocolWebDAV))
|
||||||
|
|
||||||
Connections.AddClientConnection(ipAddr)
|
Connections.AddClientConnection(ipAddr)
|
||||||
assert.Error(t, Connections.IsNewConnectionAllowed(ipAddr))
|
assert.Error(t, Connections.IsNewConnectionAllowed(ipAddr, ProtocolFTP))
|
||||||
assert.Equal(t, int32(3), Connections.GetClientConnections())
|
assert.Equal(t, int32(3), Connections.GetClientConnections())
|
||||||
|
|
||||||
Connections.RemoveClientConnection(ipAddr)
|
Connections.RemoveClientConnection(ipAddr)
|
||||||
|
@ -725,7 +754,7 @@ func TestCloseConnection(t *testing.T) {
|
||||||
fakeConn := &fakeConnection{
|
fakeConn := &fakeConnection{
|
||||||
BaseConnection: c,
|
BaseConnection: c,
|
||||||
}
|
}
|
||||||
assert.NoError(t, Connections.IsNewConnectionAllowed("127.0.0.1"))
|
assert.NoError(t, Connections.IsNewConnectionAllowed("127.0.0.1", ProtocolHTTP))
|
||||||
err := Connections.Add(fakeConn)
|
err := Connections.Add(fakeConn)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, Connections.GetStats(""), 1)
|
assert.Len(t, Connections.GetStats(""), 1)
|
||||||
|
@ -1440,6 +1469,118 @@ func TestMetadataAPIRole(t *testing.T) {
|
||||||
require.Len(t, ActiveMetadataChecks.Get(""), 0)
|
require.Len(t, ActiveMetadataChecks.Get(""), 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIPList(t *testing.T) {
|
||||||
|
type test struct {
|
||||||
|
ip string
|
||||||
|
protocol string
|
||||||
|
expectedMatch bool
|
||||||
|
expectedMode int
|
||||||
|
expectedErr bool
|
||||||
|
}
|
||||||
|
|
||||||
|
entries := []dataprovider.IPListEntry{
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.0.0/25",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.0.128/25",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
Protocols: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.2.128/32",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "::/0",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
Protocols: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "2001:4860:4860::8888/120",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
Protocols: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "2001:4860:4860::8988/120",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "::1/128",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
ipList, err := dataprovider.NewIPList(dataprovider.IPListTypeDefender)
|
||||||
|
require.NoError(t, err)
|
||||||
|
for idx := range entries {
|
||||||
|
e := entries[idx]
|
||||||
|
err := dataprovider.AddIPListEntry(&e, "", "", "")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
tests := []test{
|
||||||
|
{ip: "1.1.1.1", protocol: ProtocolSSH, expectedMatch: false, expectedMode: 0, expectedErr: false},
|
||||||
|
{ip: "invalid ip", protocol: ProtocolSSH, expectedMatch: false, expectedMode: 0, expectedErr: true},
|
||||||
|
{ip: "192.168.0.1", protocol: ProtocolFTP, expectedMatch: true, expectedMode: dataprovider.ListModeAllow, expectedErr: false},
|
||||||
|
{ip: "192.168.0.2", protocol: ProtocolHTTP, expectedMatch: true, expectedMode: dataprovider.ListModeAllow, expectedErr: false},
|
||||||
|
{ip: "192.168.0.3", protocol: ProtocolWebDAV, expectedMatch: true, expectedMode: dataprovider.ListModeAllow, expectedErr: false},
|
||||||
|
{ip: "192.168.0.4", protocol: ProtocolSSH, expectedMatch: true, expectedMode: dataprovider.ListModeAllow, expectedErr: false},
|
||||||
|
{ip: "192.168.0.156", protocol: ProtocolSSH, expectedMatch: true, expectedMode: dataprovider.ListModeDeny, expectedErr: false},
|
||||||
|
{ip: "192.168.0.158", protocol: ProtocolFTP, expectedMatch: true, expectedMode: dataprovider.ListModeDeny, expectedErr: false},
|
||||||
|
{ip: "192.168.0.158", protocol: ProtocolHTTP, expectedMatch: false, expectedMode: 0, expectedErr: false},
|
||||||
|
{ip: "192.168.2.128", protocol: ProtocolHTTP, expectedMatch: false, expectedMode: 0, expectedErr: false},
|
||||||
|
{ip: "192.168.2.128", protocol: ProtocolSSH, expectedMatch: true, expectedMode: dataprovider.ListModeAllow, expectedErr: false},
|
||||||
|
{ip: "::2", protocol: ProtocolSSH, expectedMatch: false, expectedMode: 0, expectedErr: false},
|
||||||
|
{ip: "::2", protocol: ProtocolWebDAV, expectedMatch: true, expectedMode: dataprovider.ListModeDeny, expectedErr: false},
|
||||||
|
{ip: "::1", protocol: ProtocolSSH, expectedMatch: true, expectedMode: dataprovider.ListModeAllow, expectedErr: false},
|
||||||
|
{ip: "::1", protocol: ProtocolHTTP, expectedMatch: true, expectedMode: dataprovider.ListModeAllow, expectedErr: false},
|
||||||
|
{ip: "2001:4860:4860:0000:0000:0000:0000:8889", protocol: ProtocolSSH, expectedMatch: true, expectedMode: dataprovider.ListModeDeny, expectedErr: false},
|
||||||
|
{ip: "2001:4860:4860:0000:0000:0000:0000:8889", protocol: ProtocolFTP, expectedMatch: false, expectedMode: 0, expectedErr: false},
|
||||||
|
{ip: "2001:4860:4860:0000:0000:0000:0000:8989", protocol: ProtocolFTP, expectedMatch: true, expectedMode: dataprovider.ListModeAllow, expectedErr: false},
|
||||||
|
{ip: "2001:4860:4860:0000:0000:0000:0000:89F1", protocol: ProtocolSSH, expectedMatch: true, expectedMode: dataprovider.ListModeAllow, expectedErr: false},
|
||||||
|
{ip: "2001:4860:4860:0000:0000:0000:0000:89F1", protocol: ProtocolHTTP, expectedMatch: false, expectedMode: 0, expectedErr: false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
match, mode, err := ipList.IsListed(tc.ip, tc.protocol)
|
||||||
|
if tc.expectedErr {
|
||||||
|
assert.Error(t, err, "ip %s, protocol %s", tc.ip, tc.protocol)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err, "ip %s, protocol %s", tc.ip, tc.protocol)
|
||||||
|
}
|
||||||
|
assert.Equal(t, tc.expectedMatch, match, "ip %s, protocol %s", tc.ip, tc.protocol)
|
||||||
|
assert.Equal(t, tc.expectedMode, mode, "ip %s, protocol %s", tc.ip, tc.protocol)
|
||||||
|
}
|
||||||
|
|
||||||
|
ipList.DisableMemoryMode()
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
match, mode, err := ipList.IsListed(tc.ip, tc.protocol)
|
||||||
|
if tc.expectedErr {
|
||||||
|
assert.Error(t, err, "ip %s, protocol %s", tc.ip, tc.protocol)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err, "ip %s, protocol %s", tc.ip, tc.protocol)
|
||||||
|
}
|
||||||
|
assert.Equal(t, tc.expectedMatch, match, "ip %s, protocol %s", tc.ip, tc.protocol)
|
||||||
|
assert.Equal(t, tc.expectedMode, mode, "ip %s, protocol %s", tc.ip, tc.protocol)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, e := range entries {
|
||||||
|
err := dataprovider.DeleteIPListEntry(e.IPOrNet, e.Type, "", "", "")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkBcryptHashing(b *testing.B) {
|
func BenchmarkBcryptHashing(b *testing.B) {
|
||||||
bcryptPassword := "bcryptpassword"
|
bcryptPassword := "bcryptpassword"
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
|
|
|
@ -15,19 +15,10 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/yl2chen/cidranger"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/dataprovider"
|
"github.com/drakkan/sftpgo/v2/internal/dataprovider"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/logger"
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// HostEvent is the enumerable for the supported host events
|
// HostEvent is the enumerable for the supported host events
|
||||||
|
@ -55,12 +46,12 @@ var (
|
||||||
type Defender interface {
|
type Defender interface {
|
||||||
GetHosts() ([]dataprovider.DefenderEntry, error)
|
GetHosts() ([]dataprovider.DefenderEntry, error)
|
||||||
GetHost(ip string) (dataprovider.DefenderEntry, error)
|
GetHost(ip string) (dataprovider.DefenderEntry, error)
|
||||||
AddEvent(ip string, event HostEvent)
|
AddEvent(ip, protocol string, event HostEvent)
|
||||||
IsBanned(ip string) bool
|
IsBanned(ip, protocol string) bool
|
||||||
|
IsSafe(ip, protocol string) bool
|
||||||
GetBanTime(ip string) (*time.Time, error)
|
GetBanTime(ip string) (*time.Time, error)
|
||||||
GetScore(ip string) (int, error)
|
GetScore(ip string) (int, error)
|
||||||
DeleteHost(ip string) bool
|
DeleteHost(ip string) bool
|
||||||
Reload() error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefenderConfig defines the "defender" configuration
|
// DefenderConfig defines the "defender" configuration
|
||||||
|
@ -98,59 +89,33 @@ type DefenderConfig struct {
|
||||||
// to return when you request for the entire host list from the defender
|
// to return when you request for the entire host list from the defender
|
||||||
EntriesSoftLimit int `json:"entries_soft_limit" mapstructure:"entries_soft_limit"`
|
EntriesSoftLimit int `json:"entries_soft_limit" mapstructure:"entries_soft_limit"`
|
||||||
EntriesHardLimit int `json:"entries_hard_limit" mapstructure:"entries_hard_limit"`
|
EntriesHardLimit int `json:"entries_hard_limit" mapstructure:"entries_hard_limit"`
|
||||||
// Path to a file containing a list of IP addresses and/or networks to never ban
|
|
||||||
SafeListFile string `json:"safelist_file" mapstructure:"safelist_file"`
|
|
||||||
// Path to a file containing a list of IP addresses and/or networks to always ban
|
|
||||||
BlockListFile string `json:"blocklist_file" mapstructure:"blocklist_file"`
|
|
||||||
// List of IP addresses and/or networks to never ban.
|
|
||||||
// For large lists prefer SafeListFile
|
|
||||||
SafeList []string `json:"safelist" mapstructure:"safelist"`
|
|
||||||
// List of IP addresses and/or networks to always ban.
|
|
||||||
// For large lists prefer BlockListFile
|
|
||||||
BlockList []string `json:"blocklist" mapstructure:"blocklist"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type baseDefender struct {
|
type baseDefender struct {
|
||||||
config *DefenderConfig
|
config *DefenderConfig
|
||||||
sync.RWMutex
|
ipList *dataprovider.IPList
|
||||||
safeList *HostList
|
|
||||||
blockList *HostList
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reload reloads block and safe lists
|
func (d *baseDefender) isBanned(ip, protocol string) bool {
|
||||||
func (d *baseDefender) Reload() error {
|
isListed, mode, err := d.ipList.IsListed(ip, protocol)
|
||||||
blockList, err := loadHostListFromFile(d.config.BlockListFile)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return false
|
||||||
}
|
}
|
||||||
blockList = addEntriesToList(d.config.BlockList, blockList, "blocklist")
|
if isListed && mode == dataprovider.ListModeDeny {
|
||||||
|
|
||||||
d.Lock()
|
|
||||||
d.blockList = blockList
|
|
||||||
d.Unlock()
|
|
||||||
|
|
||||||
safeList, err := loadHostListFromFile(d.config.SafeListFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
safeList = addEntriesToList(d.config.SafeList, safeList, "safelist")
|
|
||||||
|
|
||||||
d.Lock()
|
|
||||||
d.safeList = safeList
|
|
||||||
d.Unlock()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *baseDefender) isBanned(ip string) bool {
|
|
||||||
if d.blockList != nil && d.blockList.isListed(ip) {
|
|
||||||
// permanent ban
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *baseDefender) IsSafe(ip, protocol string) bool {
|
||||||
|
isListed, mode, err := d.ipList.IsListed(ip, protocol)
|
||||||
|
if err == nil && isListed && mode == dataprovider.ListModeAllow {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (d *baseDefender) getScore(event HostEvent) int {
|
func (d *baseDefender) getScore(event HostEvent) int {
|
||||||
var score int
|
var score int
|
||||||
|
|
||||||
|
@ -167,31 +132,6 @@ func (d *baseDefender) getScore(event HostEvent) int {
|
||||||
return score
|
return score
|
||||||
}
|
}
|
||||||
|
|
||||||
// HostListFile defines the structure expected for safe/block list files
|
|
||||||
type HostListFile struct {
|
|
||||||
IPAddresses []string `json:"addresses"`
|
|
||||||
CIDRNetworks []string `json:"networks"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// HostList defines the structure used to keep the HostListFile in memory
|
|
||||||
type HostList struct {
|
|
||||||
IPAddresses map[string]bool
|
|
||||||
Ranges cidranger.Ranger
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *HostList) isListed(ip string) bool {
|
|
||||||
if _, ok := h.IPAddresses[ip]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
ok, err := h.Ranges.Contains(net.ParseIP(ip))
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
type hostEvent struct {
|
type hostEvent struct {
|
||||||
dateTime time.Time
|
dateTime time.Time
|
||||||
score int
|
score int
|
||||||
|
@ -259,113 +199,3 @@ func (c *DefenderConfig) validate() error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadHostListFromFile(name string) (*HostList, error) {
|
|
||||||
if name == "" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if !util.IsFileInputValid(name) {
|
|
||||||
return nil, fmt.Errorf("invalid host list file name %#v", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err := os.Stat(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// opinionated max size, you should avoid big host lists
|
|
||||||
if info.Size() > 1048576*5 { // 5MB
|
|
||||||
return nil, fmt.Errorf("host list file %#v is too big: %v bytes", name, info.Size())
|
|
||||||
}
|
|
||||||
|
|
||||||
content, err := os.ReadFile(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to read input file %#v: %v", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var hostList HostListFile
|
|
||||||
|
|
||||||
err = json.Unmarshal(content, &hostList)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(hostList.CIDRNetworks) > 0 || len(hostList.IPAddresses) > 0 {
|
|
||||||
result := &HostList{
|
|
||||||
IPAddresses: make(map[string]bool),
|
|
||||||
Ranges: cidranger.NewPCTrieRanger(),
|
|
||||||
}
|
|
||||||
ipCount := 0
|
|
||||||
cdrCount := 0
|
|
||||||
for _, ip := range hostList.IPAddresses {
|
|
||||||
if net.ParseIP(ip) == nil {
|
|
||||||
logger.Warn(logSender, "", "unable to parse IP %#v", ip)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
result.IPAddresses[ip] = true
|
|
||||||
ipCount++
|
|
||||||
}
|
|
||||||
for _, cidrNet := range hostList.CIDRNetworks {
|
|
||||||
_, network, err := net.ParseCIDR(cidrNet)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn(logSender, "", "unable to parse CIDR network %#v: %v", cidrNet, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err = result.Ranges.Insert(cidranger.NewBasicRangerEntry(*network))
|
|
||||||
if err == nil {
|
|
||||||
cdrCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Info(logSender, "", "list %#v loaded, ip addresses loaded: %v/%v networks loaded: %v/%v",
|
|
||||||
name, ipCount, len(hostList.IPAddresses), cdrCount, len(hostList.CIDRNetworks))
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addEntriesToList(entries []string, hostList *HostList, listName string) *HostList {
|
|
||||||
if len(entries) == 0 {
|
|
||||||
return hostList
|
|
||||||
}
|
|
||||||
|
|
||||||
if hostList == nil {
|
|
||||||
hostList = &HostList{
|
|
||||||
IPAddresses: make(map[string]bool),
|
|
||||||
Ranges: cidranger.NewPCTrieRanger(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ipCount := 0
|
|
||||||
ipLoaded := 0
|
|
||||||
cdrCount := 0
|
|
||||||
cdrLoaded := 0
|
|
||||||
|
|
||||||
for _, entry := range entries {
|
|
||||||
entry = strings.TrimSpace(entry)
|
|
||||||
if strings.LastIndex(entry, "/") > 0 {
|
|
||||||
cdrCount++
|
|
||||||
_, network, err := net.ParseCIDR(entry)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn(logSender, "", "unable to parse CIDR network %#v: %v", entry, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err = hostList.Ranges.Insert(cidranger.NewBasicRangerEntry(*network))
|
|
||||||
if err == nil {
|
|
||||||
cdrLoaded++
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ipCount++
|
|
||||||
if net.ParseIP(entry) == nil {
|
|
||||||
logger.Warn(logSender, "", "unable to parse IP %#v", entry)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
hostList.IPAddresses[entry] = true
|
|
||||||
ipLoaded++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logger.Info(logSender, "", "%s from config loaded, ip addresses loaded: %v/%v networks loaded: %v/%v",
|
|
||||||
listName, ipLoaded, ipCount, cdrLoaded, cdrCount)
|
|
||||||
|
|
||||||
return hostList
|
|
||||||
}
|
|
||||||
|
|
|
@ -15,45 +15,88 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/yl2chen/cidranger"
|
"github.com/yl2chen/cidranger"
|
||||||
|
|
||||||
|
"github.com/drakkan/sftpgo/v2/internal/dataprovider"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBasicDefender(t *testing.T) {
|
func TestBasicDefender(t *testing.T) {
|
||||||
bl := HostListFile{
|
entries := []dataprovider.IPListEntry{
|
||||||
IPAddresses: []string{"172.16.1.1", "172.16.1.2"},
|
{
|
||||||
CIDRNetworks: []string{"10.8.0.0/24"},
|
IPOrNet: "172.16.1.1/32",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "172.16.1.2/32",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "10.8.0.0/24",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.1.1/32",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.1.2/32",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "10.8.9.0/24",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "172.16.1.3/32",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "172.16.1.4/32",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.8.0/24",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.1.3/32",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.1.4/32",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.9.0/24",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
sl := HostListFile{
|
|
||||||
IPAddresses: []string{"172.16.1.3", "172.16.1.4"},
|
for idx := range entries {
|
||||||
CIDRNetworks: []string{"192.168.8.0/24"},
|
e := entries[idx]
|
||||||
|
err := dataprovider.AddIPListEntry(&e, "", "", "")
|
||||||
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
blFile := filepath.Join(os.TempDir(), "bl.json")
|
|
||||||
slFile := filepath.Join(os.TempDir(), "sl.json")
|
|
||||||
|
|
||||||
data, err := json.Marshal(bl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
err = os.WriteFile(blFile, data, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
data, err = json.Marshal(sl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
err = os.WriteFile(slFile, data, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
config := &DefenderConfig{
|
config := &DefenderConfig{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
|
@ -67,31 +110,21 @@ func TestBasicDefender(t *testing.T) {
|
||||||
ObservationTime: 15,
|
ObservationTime: 15,
|
||||||
EntriesSoftLimit: 1,
|
EntriesSoftLimit: 1,
|
||||||
EntriesHardLimit: 2,
|
EntriesHardLimit: 2,
|
||||||
SafeListFile: "slFile",
|
|
||||||
BlockListFile: "blFile",
|
|
||||||
SafeList: []string{"192.168.1.3", "192.168.1.4", "192.168.9.0/24"},
|
|
||||||
BlockList: []string{"192.168.1.1", "192.168.1.2", "10.8.9.0/24"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = newInMemoryDefender(config)
|
|
||||||
assert.Error(t, err)
|
|
||||||
config.BlockListFile = blFile
|
|
||||||
_, err = newInMemoryDefender(config)
|
|
||||||
assert.Error(t, err)
|
|
||||||
config.SafeListFile = slFile
|
|
||||||
d, err := newInMemoryDefender(config)
|
d, err := newInMemoryDefender(config)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
defender := d.(*memoryDefender)
|
defender := d.(*memoryDefender)
|
||||||
assert.True(t, defender.IsBanned("172.16.1.1"))
|
assert.True(t, defender.IsBanned("172.16.1.1", ProtocolSSH))
|
||||||
assert.True(t, defender.IsBanned("192.168.1.1"))
|
assert.True(t, defender.IsBanned("192.168.1.1", ProtocolFTP))
|
||||||
assert.False(t, defender.IsBanned("172.16.1.10"))
|
assert.False(t, defender.IsBanned("172.16.1.10", ProtocolSSH))
|
||||||
assert.False(t, defender.IsBanned("192.168.1.10"))
|
assert.False(t, defender.IsBanned("192.168.1.10", ProtocolSSH))
|
||||||
assert.False(t, defender.IsBanned("10.8.2.3"))
|
assert.False(t, defender.IsBanned("10.8.2.3", ProtocolSSH))
|
||||||
assert.False(t, defender.IsBanned("10.9.2.3"))
|
assert.False(t, defender.IsBanned("10.9.2.3", ProtocolSSH))
|
||||||
assert.True(t, defender.IsBanned("10.8.0.3"))
|
assert.True(t, defender.IsBanned("10.8.0.3", ProtocolSSH))
|
||||||
assert.True(t, defender.IsBanned("10.8.9.3"))
|
assert.True(t, defender.IsBanned("10.8.9.3", ProtocolSSH))
|
||||||
assert.False(t, defender.IsBanned("invalid ip"))
|
assert.False(t, defender.IsBanned("invalid ip", ProtocolSSH))
|
||||||
assert.Equal(t, 0, defender.countBanned())
|
assert.Equal(t, 0, defender.countBanned())
|
||||||
assert.Equal(t, 0, defender.countHosts())
|
assert.Equal(t, 0, defender.countHosts())
|
||||||
hosts, err := defender.GetHosts()
|
hosts, err := defender.GetHosts()
|
||||||
|
@ -100,15 +133,15 @@ func TestBasicDefender(t *testing.T) {
|
||||||
_, err = defender.GetHost("10.8.0.4")
|
_, err = defender.GetHost("10.8.0.4")
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|
||||||
defender.AddEvent("172.16.1.4", HostEventLoginFailed)
|
defender.AddEvent("172.16.1.4", ProtocolSSH, HostEventLoginFailed)
|
||||||
defender.AddEvent("192.168.1.4", HostEventLoginFailed)
|
defender.AddEvent("192.168.1.4", ProtocolSSH, HostEventLoginFailed)
|
||||||
defender.AddEvent("192.168.8.4", HostEventUserNotFound)
|
defender.AddEvent("192.168.8.4", ProtocolSSH, HostEventUserNotFound)
|
||||||
defender.AddEvent("172.16.1.3", HostEventLimitExceeded)
|
defender.AddEvent("172.16.1.3", ProtocolSSH, HostEventLimitExceeded)
|
||||||
defender.AddEvent("192.168.1.3", HostEventLimitExceeded)
|
defender.AddEvent("192.168.1.3", ProtocolSSH, HostEventLimitExceeded)
|
||||||
assert.Equal(t, 0, defender.countHosts())
|
assert.Equal(t, 0, defender.countHosts())
|
||||||
|
|
||||||
testIP := "12.34.56.78"
|
testIP := "12.34.56.78"
|
||||||
defender.AddEvent(testIP, HostEventLoginFailed)
|
defender.AddEvent(testIP, ProtocolSSH, HostEventLoginFailed)
|
||||||
assert.Equal(t, 1, defender.countHosts())
|
assert.Equal(t, 1, defender.countHosts())
|
||||||
assert.Equal(t, 0, defender.countBanned())
|
assert.Equal(t, 0, defender.countBanned())
|
||||||
score, err := defender.GetScore(testIP)
|
score, err := defender.GetScore(testIP)
|
||||||
|
@ -128,7 +161,7 @@ func TestBasicDefender(t *testing.T) {
|
||||||
banTime, err := defender.GetBanTime(testIP)
|
banTime, err := defender.GetBanTime(testIP)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Nil(t, banTime)
|
assert.Nil(t, banTime)
|
||||||
defender.AddEvent(testIP, HostEventLimitExceeded)
|
defender.AddEvent(testIP, ProtocolSSH, HostEventLimitExceeded)
|
||||||
assert.Equal(t, 1, defender.countHosts())
|
assert.Equal(t, 1, defender.countHosts())
|
||||||
assert.Equal(t, 0, defender.countBanned())
|
assert.Equal(t, 0, defender.countBanned())
|
||||||
score, err = defender.GetScore(testIP)
|
score, err = defender.GetScore(testIP)
|
||||||
|
@ -141,8 +174,8 @@ func TestBasicDefender(t *testing.T) {
|
||||||
assert.True(t, hosts[0].BanTime.IsZero())
|
assert.True(t, hosts[0].BanTime.IsZero())
|
||||||
assert.Empty(t, hosts[0].GetBanTime())
|
assert.Empty(t, hosts[0].GetBanTime())
|
||||||
}
|
}
|
||||||
defender.AddEvent(testIP, HostEventUserNotFound)
|
defender.AddEvent(testIP, ProtocolSSH, HostEventUserNotFound)
|
||||||
defender.AddEvent(testIP, HostEventNoLoginTried)
|
defender.AddEvent(testIP, ProtocolSSH, HostEventNoLoginTried)
|
||||||
assert.Equal(t, 0, defender.countHosts())
|
assert.Equal(t, 0, defender.countHosts())
|
||||||
assert.Equal(t, 1, defender.countBanned())
|
assert.Equal(t, 1, defender.countBanned())
|
||||||
score, err = defender.GetScore(testIP)
|
score, err = defender.GetScore(testIP)
|
||||||
|
@ -169,11 +202,11 @@ func TestBasicDefender(t *testing.T) {
|
||||||
testIP2 := "12.34.56.80"
|
testIP2 := "12.34.56.80"
|
||||||
testIP3 := "12.34.56.81"
|
testIP3 := "12.34.56.81"
|
||||||
|
|
||||||
defender.AddEvent(testIP1, HostEventNoLoginTried)
|
defender.AddEvent(testIP1, ProtocolSSH, HostEventNoLoginTried)
|
||||||
defender.AddEvent(testIP2, HostEventNoLoginTried)
|
defender.AddEvent(testIP2, ProtocolSSH, HostEventNoLoginTried)
|
||||||
assert.Equal(t, 2, defender.countHosts())
|
assert.Equal(t, 2, defender.countHosts())
|
||||||
time.Sleep(20 * time.Millisecond)
|
time.Sleep(20 * time.Millisecond)
|
||||||
defender.AddEvent(testIP3, HostEventNoLoginTried)
|
defender.AddEvent(testIP3, ProtocolSSH, HostEventNoLoginTried)
|
||||||
assert.Equal(t, defender.config.EntriesSoftLimit, defender.countHosts())
|
assert.Equal(t, defender.config.EntriesSoftLimit, defender.countHosts())
|
||||||
// testIP1 and testIP2 should be removed
|
// testIP1 and testIP2 should be removed
|
||||||
assert.Equal(t, defender.config.EntriesSoftLimit, defender.countHosts())
|
assert.Equal(t, defender.config.EntriesSoftLimit, defender.countHosts())
|
||||||
|
@ -187,8 +220,8 @@ func TestBasicDefender(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 2, score)
|
assert.Equal(t, 2, score)
|
||||||
|
|
||||||
defender.AddEvent(testIP3, HostEventNoLoginTried)
|
defender.AddEvent(testIP3, ProtocolSSH, HostEventNoLoginTried)
|
||||||
defender.AddEvent(testIP3, HostEventNoLoginTried)
|
defender.AddEvent(testIP3, ProtocolSSH, HostEventNoLoginTried)
|
||||||
// IP3 is now banned
|
// IP3 is now banned
|
||||||
banTime, err = defender.GetBanTime(testIP3)
|
banTime, err = defender.GetBanTime(testIP3)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -197,7 +230,7 @@ func TestBasicDefender(t *testing.T) {
|
||||||
|
|
||||||
time.Sleep(20 * time.Millisecond)
|
time.Sleep(20 * time.Millisecond)
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
defender.AddEvent(testIP1, HostEventNoLoginTried)
|
defender.AddEvent(testIP1, ProtocolSSH, HostEventNoLoginTried)
|
||||||
}
|
}
|
||||||
assert.Equal(t, 0, defender.countHosts())
|
assert.Equal(t, 0, defender.countHosts())
|
||||||
assert.Equal(t, config.EntriesSoftLimit, defender.countBanned())
|
assert.Equal(t, config.EntriesSoftLimit, defender.countBanned())
|
||||||
|
@ -212,9 +245,9 @@ func TestBasicDefender(t *testing.T) {
|
||||||
assert.NotNil(t, banTime)
|
assert.NotNil(t, banTime)
|
||||||
|
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
defender.AddEvent(testIP, HostEventNoLoginTried)
|
defender.AddEvent(testIP, ProtocolSSH, HostEventNoLoginTried)
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
defender.AddEvent(testIP3, HostEventNoLoginTried)
|
defender.AddEvent(testIP3, ProtocolSSH, HostEventNoLoginTried)
|
||||||
}
|
}
|
||||||
assert.Equal(t, 0, defender.countHosts())
|
assert.Equal(t, 0, defender.countHosts())
|
||||||
assert.Equal(t, defender.config.EntriesSoftLimit, defender.countBanned())
|
assert.Equal(t, defender.config.EntriesSoftLimit, defender.countBanned())
|
||||||
|
@ -222,7 +255,7 @@ func TestBasicDefender(t *testing.T) {
|
||||||
banTime, err = defender.GetBanTime(testIP3)
|
banTime, err = defender.GetBanTime(testIP3)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
if assert.NotNil(t, banTime) {
|
if assert.NotNil(t, banTime) {
|
||||||
assert.True(t, defender.IsBanned(testIP3))
|
assert.True(t, defender.IsBanned(testIP3, ProtocolFTP))
|
||||||
// ban time should increase
|
// ban time should increase
|
||||||
newBanTime, err := defender.GetBanTime(testIP3)
|
newBanTime, err := defender.GetBanTime(testIP3)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -232,11 +265,11 @@ func TestBasicDefender(t *testing.T) {
|
||||||
assert.True(t, defender.DeleteHost(testIP3))
|
assert.True(t, defender.DeleteHost(testIP3))
|
||||||
assert.False(t, defender.DeleteHost(testIP3))
|
assert.False(t, defender.DeleteHost(testIP3))
|
||||||
|
|
||||||
err = os.Remove(slFile)
|
for _, e := range entries {
|
||||||
assert.NoError(t, err)
|
err := dataprovider.DeleteIPListEntry(e.IPOrNet, e.Type, "", "", "")
|
||||||
err = os.Remove(blFile)
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestExpiredHostBans(t *testing.T) {
|
func TestExpiredHostBans(t *testing.T) {
|
||||||
config := &DefenderConfig{
|
config := &DefenderConfig{
|
||||||
|
@ -265,14 +298,14 @@ func TestExpiredHostBans(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, res, 0)
|
assert.Len(t, res, 0)
|
||||||
|
|
||||||
assert.False(t, defender.IsBanned(testIP))
|
assert.False(t, defender.IsBanned(testIP, ProtocolFTP))
|
||||||
_, err = defender.GetHost(testIP)
|
_, err = defender.GetHost(testIP)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
_, ok := defender.banned[testIP]
|
_, ok := defender.banned[testIP]
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
// now add an event for an expired banned ip, it should be removed
|
// now add an event for an expired banned ip, it should be removed
|
||||||
defender.AddEvent(testIP, HostEventLoginFailed)
|
defender.AddEvent(testIP, ProtocolFTP, HostEventLoginFailed)
|
||||||
assert.False(t, defender.IsBanned(testIP))
|
assert.False(t, defender.IsBanned(testIP, ProtocolFTP))
|
||||||
entry, err := defender.GetHost(testIP)
|
entry, err := defender.GetHost(testIP)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, testIP, entry.IP)
|
assert.Equal(t, testIP, entry.IP)
|
||||||
|
@ -314,94 +347,6 @@ func TestExpiredHostBans(t *testing.T) {
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadHostListFromFile(t *testing.T) {
|
|
||||||
_, err := loadHostListFromFile(".")
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
hostsFilePath := filepath.Join(os.TempDir(), "hostfile")
|
|
||||||
content := make([]byte, 1048576*6)
|
|
||||||
_, err = rand.Read(content)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
err = os.WriteFile(hostsFilePath, content, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = loadHostListFromFile(hostsFilePath)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
hl := HostListFile{
|
|
||||||
IPAddresses: []string{},
|
|
||||||
CIDRNetworks: []string{},
|
|
||||||
}
|
|
||||||
|
|
||||||
asJSON, err := json.Marshal(hl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(hostsFilePath, asJSON, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
hostList, err := loadHostListFromFile(hostsFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Nil(t, hostList)
|
|
||||||
|
|
||||||
hl.IPAddresses = append(hl.IPAddresses, "invalidip")
|
|
||||||
asJSON, err = json.Marshal(hl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(hostsFilePath, asJSON, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
hostList, err = loadHostListFromFile(hostsFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, hostList.IPAddresses, 0)
|
|
||||||
|
|
||||||
hl.IPAddresses = nil
|
|
||||||
hl.CIDRNetworks = append(hl.CIDRNetworks, "invalid net")
|
|
||||||
|
|
||||||
asJSON, err = json.Marshal(hl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(hostsFilePath, asJSON, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
hostList, err = loadHostListFromFile(hostsFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotNil(t, hostList)
|
|
||||||
assert.Len(t, hostList.IPAddresses, 0)
|
|
||||||
assert.Equal(t, 0, hostList.Ranges.Len())
|
|
||||||
|
|
||||||
if runtime.GOOS != osWindows {
|
|
||||||
err = os.Chmod(hostsFilePath, 0111)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = loadHostListFromFile(hostsFilePath)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
err = os.Chmod(hostsFilePath, 0644)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.WriteFile(hostsFilePath, []byte("non json content"), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
_, err = loadHostListFromFile(hostsFilePath)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
err = os.Remove(hostsFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddEntriesToHostList(t *testing.T) {
|
|
||||||
name := "testList"
|
|
||||||
hostlist := addEntriesToList([]string{"192.168.6.1", "10.7.0.0/25"}, nil, name)
|
|
||||||
require.NotNil(t, hostlist)
|
|
||||||
assert.True(t, hostlist.isListed("192.168.6.1"))
|
|
||||||
assert.False(t, hostlist.isListed("192.168.6.2"))
|
|
||||||
assert.True(t, hostlist.isListed("10.7.0.28"))
|
|
||||||
assert.False(t, hostlist.isListed("10.7.0.129"))
|
|
||||||
// load invalid values
|
|
||||||
hostlist = addEntriesToList([]string{"invalidip", "invalidnet/24"}, nil, name)
|
|
||||||
require.NotNil(t, hostlist)
|
|
||||||
assert.Len(t, hostlist.IPAddresses, 0)
|
|
||||||
assert.Equal(t, 0, hostlist.Ranges.Len())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDefenderCleanup(t *testing.T) {
|
func TestDefenderCleanup(t *testing.T) {
|
||||||
d := memoryDefender{
|
d := memoryDefender{
|
||||||
baseDefender: baseDefender{
|
baseDefender: baseDefender{
|
||||||
|
@ -577,7 +522,7 @@ func BenchmarkDefenderBannedSearch(b *testing.B) {
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
d.IsBanned("192.168.1.1")
|
d.IsBanned("192.168.1.1", ProtocolSSH)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -593,7 +538,7 @@ func BenchmarkCleanup(b *testing.B) {
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
|
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
|
||||||
d.AddEvent(ip.String(), HostEventLoginFailed)
|
d.AddEvent(ip.String(), ProtocolSSH, HostEventLoginFailed)
|
||||||
if d.countHosts() > d.config.EntriesHardLimit {
|
if d.countHosts() > d.config.EntriesHardLimit {
|
||||||
panic("too many hosts")
|
panic("too many hosts")
|
||||||
}
|
}
|
||||||
|
@ -604,72 +549,10 @@ func BenchmarkCleanup(b *testing.B) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkDefenderBannedSearchWithBlockList(b *testing.B) {
|
|
||||||
d := getDefenderForBench()
|
|
||||||
|
|
||||||
d.blockList = &HostList{
|
|
||||||
IPAddresses: make(map[string]bool),
|
|
||||||
Ranges: cidranger.NewPCTrieRanger(),
|
|
||||||
}
|
|
||||||
|
|
||||||
ip, ipnet, err := net.ParseCIDR("129.8.0.0/12") // 1048574 ip addresses
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
|
|
||||||
d.banned[ip.String()] = time.Now().Add(10 * time.Minute)
|
|
||||||
d.blockList.IPAddresses[ip.String()] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 255; i++ {
|
|
||||||
cidr := fmt.Sprintf("10.8.%v.1/24", i)
|
|
||||||
_, network, _ := net.ParseCIDR(cidr)
|
|
||||||
if err := d.blockList.Ranges.Insert(cidranger.NewBasicRangerEntry(*network)); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
d.IsBanned("192.168.1.1")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHostListSearch(b *testing.B) {
|
|
||||||
hostlist := &HostList{
|
|
||||||
IPAddresses: make(map[string]bool),
|
|
||||||
Ranges: cidranger.NewPCTrieRanger(),
|
|
||||||
}
|
|
||||||
|
|
||||||
ip, ipnet, _ := net.ParseCIDR("172.16.0.0/16")
|
|
||||||
|
|
||||||
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
|
|
||||||
hostlist.IPAddresses[ip.String()] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 255; i++ {
|
|
||||||
cidr := fmt.Sprintf("10.8.%v.1/24", i)
|
|
||||||
_, network, _ := net.ParseCIDR(cidr)
|
|
||||||
if err := hostlist.Ranges.Insert(cidranger.NewBasicRangerEntry(*network)); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
if hostlist.isListed("192.167.1.2") {
|
|
||||||
panic("should not be listed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCIDRanger(b *testing.B) {
|
func BenchmarkCIDRanger(b *testing.B) {
|
||||||
ranger := cidranger.NewPCTrieRanger()
|
ranger := cidranger.NewPCTrieRanger()
|
||||||
for i := 0; i < 255; i++ {
|
for i := 0; i < 255; i++ {
|
||||||
cidr := fmt.Sprintf("192.168.%v.1/24", i)
|
cidr := fmt.Sprintf("192.168.%d.1/24", i)
|
||||||
_, network, _ := net.ParseCIDR(cidr)
|
_, network, _ := net.ParseCIDR(cidr)
|
||||||
if err := ranger.Insert(cidranger.NewBasicRangerEntry(*network)); err != nil {
|
if err := ranger.Insert(cidranger.NewBasicRangerEntry(*network)); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -689,7 +572,7 @@ func BenchmarkCIDRanger(b *testing.B) {
|
||||||
func BenchmarkNetContains(b *testing.B) {
|
func BenchmarkNetContains(b *testing.B) {
|
||||||
var nets []*net.IPNet
|
var nets []*net.IPNet
|
||||||
for i := 0; i < 255; i++ {
|
for i := 0; i < 255; i++ {
|
||||||
cidr := fmt.Sprintf("192.168.%v.1/24", i)
|
cidr := fmt.Sprintf("192.168.%d.1/24", i)
|
||||||
_, network, _ := net.ParseCIDR(cidr)
|
_, network, _ := net.ParseCIDR(cidr)
|
||||||
nets = append(nets, network)
|
nets = append(nets, network)
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/dataprovider"
|
"github.com/drakkan/sftpgo/v2/internal/dataprovider"
|
||||||
|
@ -24,7 +25,7 @@ import (
|
||||||
|
|
||||||
type dbDefender struct {
|
type dbDefender struct {
|
||||||
baseDefender
|
baseDefender
|
||||||
lastCleanup time.Time
|
lastCleanup atomic.Int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDBDefender(config *DefenderConfig) (Defender, error) {
|
func newDBDefender(config *DefenderConfig) (Defender, error) {
|
||||||
|
@ -32,16 +33,17 @@ func newDBDefender(config *DefenderConfig) (Defender, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
ipList, err := dataprovider.NewIPList(dataprovider.IPListTypeDefender)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
defender := &dbDefender{
|
defender := &dbDefender{
|
||||||
baseDefender: baseDefender{
|
baseDefender: baseDefender{
|
||||||
config: config,
|
config: config,
|
||||||
|
ipList: ipList,
|
||||||
},
|
},
|
||||||
lastCleanup: time.Time{},
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := defender.Reload(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
defender.lastCleanup.Store(0)
|
||||||
|
|
||||||
return defender, nil
|
return defender, nil
|
||||||
}
|
}
|
||||||
|
@ -59,13 +61,10 @@ func (d *dbDefender) GetHost(ip string) (dataprovider.DefenderEntry, error) {
|
||||||
// IsBanned returns true if the specified IP is banned
|
// IsBanned returns true if the specified IP is banned
|
||||||
// and increase ban time if the IP is found.
|
// and increase ban time if the IP is found.
|
||||||
// This method must be called as soon as the client connects
|
// This method must be called as soon as the client connects
|
||||||
func (d *dbDefender) IsBanned(ip string) bool {
|
func (d *dbDefender) IsBanned(ip, protocol string) bool {
|
||||||
d.RLock()
|
if d.baseDefender.isBanned(ip, protocol) {
|
||||||
if d.baseDefender.isBanned(ip) {
|
|
||||||
d.RUnlock()
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
d.RUnlock()
|
|
||||||
|
|
||||||
_, err := dataprovider.IsDefenderHostBanned(ip)
|
_, err := dataprovider.IsDefenderHostBanned(ip)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -90,13 +89,10 @@ func (d *dbDefender) DeleteHost(ip string) bool {
|
||||||
|
|
||||||
// AddEvent adds an event for the given IP.
|
// AddEvent adds an event for the given IP.
|
||||||
// This method must be called for clients not yet banned
|
// This method must be called for clients not yet banned
|
||||||
func (d *dbDefender) AddEvent(ip string, event HostEvent) {
|
func (d *dbDefender) AddEvent(ip, protocol string, event HostEvent) {
|
||||||
d.RLock()
|
if d.IsSafe(ip, protocol) {
|
||||||
if d.safeList != nil && d.safeList.isListed(ip) {
|
|
||||||
d.RUnlock()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
d.RUnlock()
|
|
||||||
|
|
||||||
score := d.baseDefender.getScore(event)
|
score := d.baseDefender.getScore(event)
|
||||||
|
|
||||||
|
@ -165,15 +161,17 @@ func (d *dbDefender) getStartObservationTime() int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dbDefender) getLastCleanup() time.Time {
|
func (d *dbDefender) getLastCleanup() time.Time {
|
||||||
d.RLock()
|
val := d.lastCleanup.Load()
|
||||||
defer d.RUnlock()
|
if val == 0 {
|
||||||
|
return time.Time{}
|
||||||
return d.lastCleanup
|
}
|
||||||
|
return util.GetTimeFromMsecSinceEpoch(val)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dbDefender) setLastCleanup(when time.Time) {
|
func (d *dbDefender) setLastCleanup(when time.Time) {
|
||||||
d.Lock()
|
if when.IsZero() {
|
||||||
defer d.Unlock()
|
d.lastCleanup.Store(0)
|
||||||
|
return
|
||||||
d.lastCleanup = when
|
}
|
||||||
|
d.lastCleanup.Store(util.GetTimeAsMsSinceEpoch(when))
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,9 +16,6 @@ package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -32,6 +29,45 @@ func TestBasicDbDefender(t *testing.T) {
|
||||||
if !isDbDefenderSupported() {
|
if !isDbDefenderSupported() {
|
||||||
t.Skip("this test is not supported with the current database provider")
|
t.Skip("this test is not supported with the current database provider")
|
||||||
}
|
}
|
||||||
|
entries := []dataprovider.IPListEntry{
|
||||||
|
{
|
||||||
|
IPOrNet: "172.16.1.1/32",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "172.16.1.2/32",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "10.8.0.0/24",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "172.16.1.3/32",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "172.16.1.4/32",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.8.0/24",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx := range entries {
|
||||||
|
e := entries[idx]
|
||||||
|
err := dataprovider.AddIPListEntry(&e, "", "", "")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
config := &DefenderConfig{
|
config := &DefenderConfig{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
BanTime: 10,
|
BanTime: 10,
|
||||||
|
@ -44,61 +80,31 @@ func TestBasicDbDefender(t *testing.T) {
|
||||||
ObservationTime: 15,
|
ObservationTime: 15,
|
||||||
EntriesSoftLimit: 1,
|
EntriesSoftLimit: 1,
|
||||||
EntriesHardLimit: 10,
|
EntriesHardLimit: 10,
|
||||||
SafeListFile: "slFile",
|
|
||||||
BlockListFile: "blFile",
|
|
||||||
}
|
}
|
||||||
_, err := newDBDefender(config)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
bl := HostListFile{
|
|
||||||
IPAddresses: []string{"172.16.1.1", "172.16.1.2"},
|
|
||||||
CIDRNetworks: []string{"10.8.0.0/24"},
|
|
||||||
}
|
|
||||||
sl := HostListFile{
|
|
||||||
IPAddresses: []string{"172.16.1.3", "172.16.1.4"},
|
|
||||||
CIDRNetworks: []string{"192.168.8.0/24"},
|
|
||||||
}
|
|
||||||
blFile := filepath.Join(os.TempDir(), "bl.json")
|
|
||||||
slFile := filepath.Join(os.TempDir(), "sl.json")
|
|
||||||
|
|
||||||
data, err := json.Marshal(bl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(blFile, data, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
data, err = json.Marshal(sl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(slFile, data, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
config.BlockListFile = blFile
|
|
||||||
_, err = newDBDefender(config)
|
|
||||||
assert.Error(t, err)
|
|
||||||
config.SafeListFile = slFile
|
|
||||||
d, err := newDBDefender(config)
|
d, err := newDBDefender(config)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defender := d.(*dbDefender)
|
defender := d.(*dbDefender)
|
||||||
assert.True(t, defender.IsBanned("172.16.1.1"))
|
assert.True(t, defender.IsBanned("172.16.1.1", ProtocolFTP))
|
||||||
assert.False(t, defender.IsBanned("172.16.1.10"))
|
assert.False(t, defender.IsBanned("172.16.1.10", ProtocolSSH))
|
||||||
assert.False(t, defender.IsBanned("10.8.1.3"))
|
assert.False(t, defender.IsBanned("10.8.1.3", ProtocolHTTP))
|
||||||
assert.True(t, defender.IsBanned("10.8.0.4"))
|
assert.True(t, defender.IsBanned("10.8.0.4", ProtocolWebDAV))
|
||||||
assert.False(t, defender.IsBanned("invalid ip"))
|
assert.False(t, defender.IsBanned("invalid ip", ProtocolSSH))
|
||||||
hosts, err := defender.GetHosts()
|
hosts, err := defender.GetHosts()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, hosts, 0)
|
assert.Len(t, hosts, 0)
|
||||||
_, err = defender.GetHost("10.8.0.3")
|
_, err = defender.GetHost("10.8.0.3")
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|
||||||
defender.AddEvent("172.16.1.4", HostEventLoginFailed)
|
defender.AddEvent("172.16.1.4", ProtocolSSH, HostEventLoginFailed)
|
||||||
defender.AddEvent("192.168.8.4", HostEventUserNotFound)
|
defender.AddEvent("192.168.8.4", ProtocolSSH, HostEventUserNotFound)
|
||||||
defender.AddEvent("172.16.1.3", HostEventLimitExceeded)
|
defender.AddEvent("172.16.1.3", ProtocolSSH, HostEventLimitExceeded)
|
||||||
hosts, err = defender.GetHosts()
|
hosts, err = defender.GetHosts()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, hosts, 0)
|
assert.Len(t, hosts, 0)
|
||||||
assert.True(t, defender.getLastCleanup().IsZero())
|
assert.True(t, defender.getLastCleanup().IsZero())
|
||||||
|
|
||||||
testIP := "123.45.67.89"
|
testIP := "123.45.67.89"
|
||||||
defender.AddEvent(testIP, HostEventLoginFailed)
|
defender.AddEvent(testIP, ProtocolSSH, HostEventLoginFailed)
|
||||||
lastCleanup := defender.getLastCleanup()
|
lastCleanup := defender.getLastCleanup()
|
||||||
assert.False(t, lastCleanup.IsZero())
|
assert.False(t, lastCleanup.IsZero())
|
||||||
score, err := defender.GetScore(testIP)
|
score, err := defender.GetScore(testIP)
|
||||||
|
@ -118,7 +124,7 @@ func TestBasicDbDefender(t *testing.T) {
|
||||||
banTime, err := defender.GetBanTime(testIP)
|
banTime, err := defender.GetBanTime(testIP)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Nil(t, banTime)
|
assert.Nil(t, banTime)
|
||||||
defender.AddEvent(testIP, HostEventLimitExceeded)
|
defender.AddEvent(testIP, ProtocolSSH, HostEventLimitExceeded)
|
||||||
score, err = defender.GetScore(testIP)
|
score, err = defender.GetScore(testIP)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 4, score)
|
assert.Equal(t, 4, score)
|
||||||
|
@ -129,8 +135,8 @@ func TestBasicDbDefender(t *testing.T) {
|
||||||
assert.True(t, hosts[0].BanTime.IsZero())
|
assert.True(t, hosts[0].BanTime.IsZero())
|
||||||
assert.Empty(t, hosts[0].GetBanTime())
|
assert.Empty(t, hosts[0].GetBanTime())
|
||||||
}
|
}
|
||||||
defender.AddEvent(testIP, HostEventNoLoginTried)
|
defender.AddEvent(testIP, ProtocolSSH, HostEventNoLoginTried)
|
||||||
defender.AddEvent(testIP, HostEventNoLoginTried)
|
defender.AddEvent(testIP, ProtocolSSH, HostEventNoLoginTried)
|
||||||
score, err = defender.GetScore(testIP)
|
score, err = defender.GetScore(testIP)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 0, score)
|
assert.Equal(t, 0, score)
|
||||||
|
@ -150,7 +156,7 @@ func TestBasicDbDefender(t *testing.T) {
|
||||||
assert.Equal(t, 0, host.Score)
|
assert.Equal(t, 0, host.Score)
|
||||||
assert.NotEmpty(t, host.GetBanTime())
|
assert.NotEmpty(t, host.GetBanTime())
|
||||||
// ban time should increase
|
// ban time should increase
|
||||||
assert.True(t, defender.IsBanned(testIP))
|
assert.True(t, defender.IsBanned(testIP, ProtocolSSH))
|
||||||
newBanTime, err := defender.GetBanTime(testIP)
|
newBanTime, err := defender.GetBanTime(testIP)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.True(t, newBanTime.After(*banTime))
|
assert.True(t, newBanTime.After(*banTime))
|
||||||
|
@ -162,9 +168,9 @@ func TestBasicDbDefender(t *testing.T) {
|
||||||
testIP2 := "123.45.67.91"
|
testIP2 := "123.45.67.91"
|
||||||
testIP3 := "123.45.67.92"
|
testIP3 := "123.45.67.92"
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
defender.AddEvent(testIP, HostEventUserNotFound)
|
defender.AddEvent(testIP, ProtocolSSH, HostEventUserNotFound)
|
||||||
defender.AddEvent(testIP1, HostEventNoLoginTried)
|
defender.AddEvent(testIP1, ProtocolSSH, HostEventNoLoginTried)
|
||||||
defender.AddEvent(testIP2, HostEventUserNotFound)
|
defender.AddEvent(testIP2, ProtocolSSH, HostEventUserNotFound)
|
||||||
}
|
}
|
||||||
hosts, err = defender.GetHosts()
|
hosts, err = defender.GetHosts()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -174,7 +180,7 @@ func TestBasicDbDefender(t *testing.T) {
|
||||||
assert.False(t, host.BanTime.IsZero())
|
assert.False(t, host.BanTime.IsZero())
|
||||||
assert.NotEmpty(t, host.GetBanTime())
|
assert.NotEmpty(t, host.GetBanTime())
|
||||||
}
|
}
|
||||||
defender.AddEvent(testIP3, HostEventLoginFailed)
|
defender.AddEvent(testIP3, ProtocolSSH, HostEventLoginFailed)
|
||||||
hosts, err = defender.GetHosts()
|
hosts, err = defender.GetHosts()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, hosts, 4)
|
assert.Len(t, hosts, 4)
|
||||||
|
@ -248,11 +254,11 @@ func TestBasicDbDefender(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, hosts, 0)
|
assert.Len(t, hosts, 0)
|
||||||
|
|
||||||
err = os.Remove(slFile)
|
for _, e := range entries {
|
||||||
assert.NoError(t, err)
|
err := dataprovider.DeleteIPListEntry(e.IPOrNet, e.Type, "", "", "")
|
||||||
err = os.Remove(blFile)
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestDbDefenderCleanup(t *testing.T) {
|
func TestDbDefenderCleanup(t *testing.T) {
|
||||||
if !isDbDefenderSupported() {
|
if !isDbDefenderSupported() {
|
||||||
|
@ -280,6 +286,8 @@ func TestDbDefenderCleanup(t *testing.T) {
|
||||||
assert.False(t, lastCleanup.IsZero())
|
assert.False(t, lastCleanup.IsZero())
|
||||||
defender.cleanup()
|
defender.cleanup()
|
||||||
assert.Equal(t, lastCleanup, defender.getLastCleanup())
|
assert.Equal(t, lastCleanup, defender.getLastCleanup())
|
||||||
|
defender.setLastCleanup(time.Time{})
|
||||||
|
assert.True(t, defender.getLastCleanup().IsZero())
|
||||||
defender.setLastCleanup(time.Now().Add(-time.Duration(config.ObservationTime) * time.Minute * 4))
|
defender.setLastCleanup(time.Now().Add(-time.Duration(config.ObservationTime) * time.Minute * 4))
|
||||||
time.Sleep(20 * time.Millisecond)
|
time.Sleep(20 * time.Millisecond)
|
||||||
defender.cleanup()
|
defender.cleanup()
|
||||||
|
@ -289,7 +297,7 @@ func TestDbDefenderCleanup(t *testing.T) {
|
||||||
err = dataprovider.Close()
|
err = dataprovider.Close()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
lastCleanup = time.Now().Add(-time.Duration(config.ObservationTime) * time.Minute * 4)
|
lastCleanup = util.GetTimeFromMsecSinceEpoch(time.Now().Add(-time.Duration(config.ObservationTime) * time.Minute * 4).UnixMilli())
|
||||||
defender.setLastCleanup(lastCleanup)
|
defender.setLastCleanup(lastCleanup)
|
||||||
defender.cleanup()
|
defender.cleanup()
|
||||||
// cleanup will fail and so last cleanup should be reset to the previous value
|
// cleanup will fail and so last cleanup should be reset to the previous value
|
||||||
|
|
|
@ -16,6 +16,7 @@ package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sort"
|
"sort"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/dataprovider"
|
"github.com/drakkan/sftpgo/v2/internal/dataprovider"
|
||||||
|
@ -24,6 +25,7 @@ import (
|
||||||
|
|
||||||
type memoryDefender struct {
|
type memoryDefender struct {
|
||||||
baseDefender
|
baseDefender
|
||||||
|
sync.RWMutex
|
||||||
// IP addresses of the clients trying to connected are stored inside hosts,
|
// IP addresses of the clients trying to connected are stored inside hosts,
|
||||||
// they are added to banned once the thresold is reached.
|
// they are added to banned once the thresold is reached.
|
||||||
// A violation from a banned host will increase the ban time
|
// A violation from a banned host will increase the ban time
|
||||||
|
@ -37,18 +39,19 @@ func newInMemoryDefender(config *DefenderConfig) (Defender, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
ipList, err := dataprovider.NewIPList(dataprovider.IPListTypeDefender)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
defender := &memoryDefender{
|
defender := &memoryDefender{
|
||||||
baseDefender: baseDefender{
|
baseDefender: baseDefender{
|
||||||
config: config,
|
config: config,
|
||||||
|
ipList: ipList,
|
||||||
},
|
},
|
||||||
hosts: make(map[string]hostScore),
|
hosts: make(map[string]hostScore),
|
||||||
banned: make(map[string]time.Time),
|
banned: make(map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := defender.Reload(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return defender, nil
|
return defender, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,7 +122,7 @@ func (d *memoryDefender) GetHost(ip string) (dataprovider.DefenderEntry, error)
|
||||||
// IsBanned returns true if the specified IP is banned
|
// IsBanned returns true if the specified IP is banned
|
||||||
// and increase ban time if the IP is found.
|
// and increase ban time if the IP is found.
|
||||||
// This method must be called as soon as the client connects
|
// This method must be called as soon as the client connects
|
||||||
func (d *memoryDefender) IsBanned(ip string) bool {
|
func (d *memoryDefender) IsBanned(ip, protocol string) bool {
|
||||||
d.RLock()
|
d.RLock()
|
||||||
|
|
||||||
if banTime, ok := d.banned[ip]; ok {
|
if banTime, ok := d.banned[ip]; ok {
|
||||||
|
@ -145,7 +148,7 @@ func (d *memoryDefender) IsBanned(ip string) bool {
|
||||||
|
|
||||||
defer d.RUnlock()
|
defer d.RUnlock()
|
||||||
|
|
||||||
return d.baseDefender.isBanned(ip)
|
return d.baseDefender.isBanned(ip, protocol)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteHost removes the specified IP from the defender lists
|
// DeleteHost removes the specified IP from the defender lists
|
||||||
|
@ -168,14 +171,14 @@ func (d *memoryDefender) DeleteHost(ip string) bool {
|
||||||
|
|
||||||
// AddEvent adds an event for the given IP.
|
// AddEvent adds an event for the given IP.
|
||||||
// This method must be called for clients not yet banned
|
// This method must be called for clients not yet banned
|
||||||
func (d *memoryDefender) AddEvent(ip string, event HostEvent) {
|
func (d *memoryDefender) AddEvent(ip, protocol string, event HostEvent) {
|
||||||
d.Lock()
|
if d.IsSafe(ip, protocol) {
|
||||||
defer d.Unlock()
|
|
||||||
|
|
||||||
if d.safeList != nil && d.safeList.isListed(ip) {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.Lock()
|
||||||
|
defer d.Unlock()
|
||||||
|
|
||||||
// ignore events for already banned hosts
|
// ignore events for already banned hosts
|
||||||
if v, ok := d.banned[ip]; ok {
|
if v, ok := d.banned[ip]; ok {
|
||||||
if v.After(time.Now()) {
|
if v.After(time.Now()) {
|
||||||
|
|
|
@ -37,6 +37,7 @@ func startEventScheduler() {
|
||||||
stopEventScheduler()
|
stopEventScheduler()
|
||||||
|
|
||||||
eventScheduler = cron.New(cron.WithLocation(time.UTC))
|
eventScheduler = cron.New(cron.WithLocation(time.UTC))
|
||||||
|
eventManager.loadRules()
|
||||||
_, err := eventScheduler.AddFunc("@every 10m", eventManager.loadRules)
|
_, err := eventScheduler.AddFunc("@every 10m", eventManager.loadRules)
|
||||||
util.PanicOnError(err)
|
util.PanicOnError(err)
|
||||||
eventScheduler.Start()
|
eventScheduler.Start()
|
||||||
|
|
|
@ -109,6 +109,12 @@ func TestMain(m *testing.M) {
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
logger.InfoToConsole("Starting COMMON tests, provider: %v", providerConf.Driver)
|
logger.InfoToConsole("Starting COMMON tests, provider: %v", providerConf.Driver)
|
||||||
|
|
||||||
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
|
if err != nil {
|
||||||
|
logger.ErrorToConsole("error initializing data provider: %v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
err = common.Initialize(config.GetCommonConfig(), 0)
|
err = common.Initialize(config.GetCommonConfig(), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.WarnToConsole("error initializing common: %v", err)
|
logger.WarnToConsole("error initializing common: %v", err)
|
||||||
|
@ -116,12 +122,6 @@ func TestMain(m *testing.M) {
|
||||||
}
|
}
|
||||||
common.SetCertAutoReloadMode(true)
|
common.SetCertAutoReloadMode(true)
|
||||||
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
|
||||||
if err != nil {
|
|
||||||
logger.ErrorToConsole("error initializing data provider: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
httpConfig := config.GetHTTPConfig()
|
httpConfig := config.GetHTTPConfig()
|
||||||
httpConfig.Timeout = 5
|
httpConfig.Timeout = 5
|
||||||
httpConfig.RetryMax = 0
|
httpConfig.RetryMax = 0
|
||||||
|
@ -3189,6 +3189,87 @@ func TestUserPasswordHashing(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAllowList(t *testing.T) {
|
||||||
|
configCopy := common.Config
|
||||||
|
|
||||||
|
entries := []dataprovider.IPListEntry{
|
||||||
|
{
|
||||||
|
IPOrNet: "172.18.1.1/32",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "172.18.1.2/32",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "10.8.7.0/24",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "0.0.0.0/0",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 8,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "::/0",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 8,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, e := range entries {
|
||||||
|
_, resp, err := httpdtest.AddIPListEntry(e, http.StatusCreated)
|
||||||
|
assert.NoError(t, err, string(resp))
|
||||||
|
}
|
||||||
|
|
||||||
|
common.Config.AllowListStatus = 1
|
||||||
|
err := common.Initialize(common.Config, 0)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, common.Config.IsAllowListEnabled())
|
||||||
|
|
||||||
|
testIP := "172.18.1.1"
|
||||||
|
assert.NoError(t, common.Connections.IsNewConnectionAllowed(testIP, common.ProtocolFTP))
|
||||||
|
entry := entries[0]
|
||||||
|
entry.Protocols = 1
|
||||||
|
_, _, err = httpdtest.UpdateIPListEntry(entry, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Error(t, common.Connections.IsNewConnectionAllowed(testIP, common.ProtocolFTP))
|
||||||
|
assert.NoError(t, common.Connections.IsNewConnectionAllowed(testIP, common.ProtocolSSH))
|
||||||
|
_, err = httpdtest.RemoveIPListEntry(entry, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
entries = entries[1:]
|
||||||
|
assert.Error(t, common.Connections.IsNewConnectionAllowed(testIP, common.ProtocolSSH))
|
||||||
|
assert.Error(t, common.Connections.IsNewConnectionAllowed("172.18.1.3", common.ProtocolSSH))
|
||||||
|
assert.NoError(t, common.Connections.IsNewConnectionAllowed("172.18.1.3", common.ProtocolHTTP))
|
||||||
|
|
||||||
|
assert.NoError(t, common.Connections.IsNewConnectionAllowed("10.8.7.3", common.ProtocolWebDAV))
|
||||||
|
assert.NoError(t, common.Connections.IsNewConnectionAllowed("10.8.7.4", common.ProtocolSSH))
|
||||||
|
assert.Error(t, common.Connections.IsNewConnectionAllowed("10.8.7.4", common.ProtocolFTP))
|
||||||
|
assert.NoError(t, common.Connections.IsNewConnectionAllowed("10.8.7.4", common.ProtocolHTTP))
|
||||||
|
assert.NoError(t, common.Connections.IsNewConnectionAllowed("2001:0db8::1428:57ab", common.ProtocolHTTP))
|
||||||
|
assert.Error(t, common.Connections.IsNewConnectionAllowed("2001:0db8::1428:57ab", common.ProtocolSSH))
|
||||||
|
assert.Error(t, common.Connections.IsNewConnectionAllowed("10.8.8.2", common.ProtocolWebDAV))
|
||||||
|
assert.Error(t, common.Connections.IsNewConnectionAllowed("invalid IP", common.ProtocolHTTP))
|
||||||
|
|
||||||
|
common.Config = configCopy
|
||||||
|
err = common.Initialize(common.Config, 0)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.False(t, common.Config.IsAllowListEnabled())
|
||||||
|
|
||||||
|
for _, e := range entries {
|
||||||
|
_, err := httpdtest.RemoveIPListEntry(e, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestDbDefenderErrors(t *testing.T) {
|
func TestDbDefenderErrors(t *testing.T) {
|
||||||
if !isDbDefenderSupported() {
|
if !isDbDefenderSupported() {
|
||||||
t.Skip("this test is not supported with the current database provider")
|
t.Skip("this test is not supported with the current database provider")
|
||||||
|
@ -3203,7 +3284,7 @@ func TestDbDefenderErrors(t *testing.T) {
|
||||||
hosts, err := common.GetDefenderHosts()
|
hosts, err := common.GetDefenderHosts()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, hosts, 0)
|
assert.Len(t, hosts, 0)
|
||||||
common.AddDefenderEvent(testIP, common.HostEventLimitExceeded)
|
common.AddDefenderEvent(testIP, common.ProtocolSSH, common.HostEventLimitExceeded)
|
||||||
hosts, err = common.GetDefenderHosts()
|
hosts, err = common.GetDefenderHosts()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, hosts, 1)
|
assert.Len(t, hosts, 1)
|
||||||
|
@ -3217,7 +3298,7 @@ func TestDbDefenderErrors(t *testing.T) {
|
||||||
err = dataprovider.Close()
|
err = dataprovider.Close()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
common.AddDefenderEvent(testIP, common.HostEventLimitExceeded)
|
common.AddDefenderEvent(testIP, common.ProtocolFTP, common.HostEventLimitExceeded)
|
||||||
_, err = common.GetDefenderHosts()
|
_, err = common.GetDefenderHosts()
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
_, err = common.GetDefenderHost(testIP)
|
_, err = common.GetDefenderHost(testIP)
|
||||||
|
|
|
@ -17,7 +17,6 @@ package common
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
@ -62,8 +61,6 @@ type RateLimiterConfig struct {
|
||||||
// Available protocols are: "SFTP", "FTP", "DAV".
|
// Available protocols are: "SFTP", "FTP", "DAV".
|
||||||
// A rate limiter with no protocols defined is disabled
|
// A rate limiter with no protocols defined is disabled
|
||||||
Protocols []string `json:"protocols" mapstructure:"protocols"`
|
Protocols []string `json:"protocols" mapstructure:"protocols"`
|
||||||
// AllowList defines a list of IP addresses and IP ranges excluded from rate limiting
|
|
||||||
AllowList []string `json:"allow_list" mapstructure:"mapstructure"`
|
|
||||||
// If the rate limit is exceeded, the defender is enabled, and this is a per-source limiter,
|
// If the rate limit is exceeded, the defender is enabled, and this is a per-source limiter,
|
||||||
// a new defender event will be generated
|
// a new defender event will be generated
|
||||||
GenerateDefenderEvents bool `json:"generate_defender_events" mapstructure:"generate_defender_events"`
|
GenerateDefenderEvents bool `json:"generate_defender_events" mapstructure:"generate_defender_events"`
|
||||||
|
@ -142,23 +139,12 @@ type rateLimiter struct {
|
||||||
globalBucket *rate.Limiter
|
globalBucket *rate.Limiter
|
||||||
buckets sourceBuckets
|
buckets sourceBuckets
|
||||||
generateDefenderEvents bool
|
generateDefenderEvents bool
|
||||||
allowList []func(net.IP) bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait blocks until the limit allows one event to happen
|
// Wait blocks until the limit allows one event to happen
|
||||||
// or returns an error if the time to wait exceeds the max
|
// or returns an error if the time to wait exceeds the max
|
||||||
// allowed delay
|
// allowed delay
|
||||||
func (rl *rateLimiter) Wait(source string) (time.Duration, error) {
|
func (rl *rateLimiter) Wait(source, protocol string) (time.Duration, error) {
|
||||||
if len(rl.allowList) > 0 {
|
|
||||||
ip := net.ParseIP(source)
|
|
||||||
if ip != nil {
|
|
||||||
for idx := range rl.allowList {
|
|
||||||
if rl.allowList[idx](ip) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var res *rate.Reservation
|
var res *rate.Reservation
|
||||||
if rl.globalBucket != nil {
|
if rl.globalBucket != nil {
|
||||||
res = rl.globalBucket.Reserve()
|
res = rl.globalBucket.Reserve()
|
||||||
|
@ -177,7 +163,7 @@ func (rl *rateLimiter) Wait(source string) (time.Duration, error) {
|
||||||
if delay > rl.maxDelay {
|
if delay > rl.maxDelay {
|
||||||
res.Cancel()
|
res.Cancel()
|
||||||
if rl.generateDefenderEvents && rl.globalBucket == nil {
|
if rl.generateDefenderEvents && rl.globalBucket == nil {
|
||||||
AddDefenderEvent(source, HostEventLimitExceeded)
|
AddDefenderEvent(source, protocol, HostEventLimitExceeded)
|
||||||
}
|
}
|
||||||
return delay, fmt.Errorf("rate limit exceed, wait time to respect rate %v, max wait time allowed %v", delay, rl.maxDelay)
|
return delay, fmt.Errorf("rate limit exceed, wait time to respect rate %v, max wait time allowed %v", delay, rl.maxDelay)
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,8 +20,6 @@ import (
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRateLimiterConfig(t *testing.T) {
|
func TestRateLimiterConfig(t *testing.T) {
|
||||||
|
@ -79,9 +77,9 @@ func TestRateLimiter(t *testing.T) {
|
||||||
Protocols: rateLimiterProtocolValues,
|
Protocols: rateLimiterProtocolValues,
|
||||||
}
|
}
|
||||||
limiter := config.getLimiter()
|
limiter := config.getLimiter()
|
||||||
_, err := limiter.Wait("")
|
_, err := limiter.Wait("", ProtocolFTP)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = limiter.Wait("")
|
_, err = limiter.Wait("", ProtocolSSH)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
config.Type = int(rateLimiterTypeSource)
|
config.Type = int(rateLimiterTypeSource)
|
||||||
|
@ -91,28 +89,17 @@ func TestRateLimiter(t *testing.T) {
|
||||||
limiter = config.getLimiter()
|
limiter = config.getLimiter()
|
||||||
|
|
||||||
source := "192.168.1.2"
|
source := "192.168.1.2"
|
||||||
_, err = limiter.Wait(source)
|
_, err = limiter.Wait(source, ProtocolSSH)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = limiter.Wait(source)
|
_, err = limiter.Wait(source, ProtocolSSH)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
// a different source should work
|
// a different source should work
|
||||||
_, err = limiter.Wait(source + "1")
|
_, err = limiter.Wait(source+"1", ProtocolSSH)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
allowList := []string{"192.168.1.0/24"}
|
|
||||||
allowFuncs, err := util.ParseAllowedIPAndRanges(allowList)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
limiter.allowList = allowFuncs
|
|
||||||
for i := 0; i < 5; i++ {
|
|
||||||
_, err = limiter.Wait(source)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
_, err = limiter.Wait("not an ip")
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
config.Burst = 0
|
config.Burst = 0
|
||||||
limiter = config.getLimiter()
|
limiter = config.getLimiter()
|
||||||
_, err = limiter.Wait(source)
|
_, err = limiter.Wait(source, ProtocolSSH)
|
||||||
require.ErrorIs(t, err, errReserve)
|
require.ErrorIs(t, err, errReserve)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,10 +118,10 @@ func TestLimiterCleanup(t *testing.T) {
|
||||||
source2 := "10.8.0.2"
|
source2 := "10.8.0.2"
|
||||||
source3 := "10.8.0.3"
|
source3 := "10.8.0.3"
|
||||||
source4 := "10.8.0.4"
|
source4 := "10.8.0.4"
|
||||||
_, err := limiter.Wait(source1)
|
_, err := limiter.Wait(source1, ProtocolSSH)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
time.Sleep(20 * time.Millisecond)
|
time.Sleep(20 * time.Millisecond)
|
||||||
_, err = limiter.Wait(source2)
|
_, err = limiter.Wait(source2, ProtocolSSH)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
time.Sleep(20 * time.Millisecond)
|
time.Sleep(20 * time.Millisecond)
|
||||||
assert.Len(t, limiter.buckets.buckets, 2)
|
assert.Len(t, limiter.buckets.buckets, 2)
|
||||||
|
@ -142,7 +129,7 @@ func TestLimiterCleanup(t *testing.T) {
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
_, ok = limiter.buckets.buckets[source2]
|
_, ok = limiter.buckets.buckets[source2]
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
_, err = limiter.Wait(source3)
|
_, err = limiter.Wait(source3, ProtocolSSH)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, limiter.buckets.buckets, 3)
|
assert.Len(t, limiter.buckets.buckets, 3)
|
||||||
_, ok = limiter.buckets.buckets[source1]
|
_, ok = limiter.buckets.buckets[source1]
|
||||||
|
@ -152,7 +139,7 @@ func TestLimiterCleanup(t *testing.T) {
|
||||||
_, ok = limiter.buckets.buckets[source3]
|
_, ok = limiter.buckets.buckets[source3]
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
time.Sleep(20 * time.Millisecond)
|
time.Sleep(20 * time.Millisecond)
|
||||||
_, err = limiter.Wait(source4)
|
_, err = limiter.Wait(source4, ProtocolSSH)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, limiter.buckets.buckets, 2)
|
assert.Len(t, limiter.buckets.buckets, 2)
|
||||||
_, ok = limiter.buckets.buckets[source3]
|
_, ok = limiter.buckets.buckets[source3]
|
||||||
|
|
|
@ -139,7 +139,7 @@ func (m *CertManager) IsRevoked(crt *x509.Certificate, caCrt *x509.Certificate)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, crl := range m.crls {
|
for _, crl := range m.crls {
|
||||||
if !crl.HasExpired(time.Now()) && caCrt.CheckCRLSignature(crl) == nil {
|
if !crl.HasExpired(time.Now()) && caCrt.CheckCRLSignature(crl) == nil { //nolint:staticcheck
|
||||||
for _, rc := range crl.TBSCertList.RevokedCertificates {
|
for _, rc := range crl.TBSCertList.RevokedCertificates {
|
||||||
if rc.SerialNumber.Cmp(crt.SerialNumber) == 0 {
|
if rc.SerialNumber.Cmp(crt.SerialNumber) == 0 {
|
||||||
return true
|
return true
|
||||||
|
@ -171,7 +171,7 @@ func (m *CertManager) LoadCRLs() error {
|
||||||
logger.Warn(m.logSender, "", "unable to read revocation list %q", revocationList)
|
logger.Warn(m.logSender, "", "unable to read revocation list %q", revocationList)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
crl, err := x509.ParseCRL(crlBytes)
|
crl, err := x509.ParseCRL(crlBytes) //nolint:staticcheck
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Warn(m.logSender, "", "unable to parse revocation list %q", revocationList)
|
logger.Warn(m.logSender, "", "unable to parse revocation list %q", revocationList)
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -153,7 +153,6 @@ var (
|
||||||
Burst: 1,
|
Burst: 1,
|
||||||
Type: 2,
|
Type: 2,
|
||||||
Protocols: []string{common.ProtocolSSH, common.ProtocolFTP, common.ProtocolWebDAV, common.ProtocolHTTP},
|
Protocols: []string{common.ProtocolSSH, common.ProtocolFTP, common.ProtocolWebDAV, common.ProtocolHTTP},
|
||||||
AllowList: []string{},
|
|
||||||
GenerateDefenderEvents: false,
|
GenerateDefenderEvents: false,
|
||||||
EntriesSoftLimit: 100,
|
EntriesSoftLimit: 100,
|
||||||
EntriesHardLimit: 150,
|
EntriesHardLimit: 150,
|
||||||
|
@ -210,7 +209,7 @@ func Init() {
|
||||||
DataRetentionHook: "",
|
DataRetentionHook: "",
|
||||||
MaxTotalConnections: 0,
|
MaxTotalConnections: 0,
|
||||||
MaxPerHostConnections: 20,
|
MaxPerHostConnections: 20,
|
||||||
WhiteListFile: "",
|
AllowListStatus: 0,
|
||||||
AllowSelfConnections: 0,
|
AllowSelfConnections: 0,
|
||||||
DefenderConfig: common.DefenderConfig{
|
DefenderConfig: common.DefenderConfig{
|
||||||
Enabled: false,
|
Enabled: false,
|
||||||
|
@ -225,10 +224,6 @@ func Init() {
|
||||||
ObservationTime: 30,
|
ObservationTime: 30,
|
||||||
EntriesSoftLimit: 100,
|
EntriesSoftLimit: 100,
|
||||||
EntriesHardLimit: 150,
|
EntriesHardLimit: 150,
|
||||||
SafeListFile: "",
|
|
||||||
BlockListFile: "",
|
|
||||||
SafeList: []string{},
|
|
||||||
BlockList: []string{},
|
|
||||||
},
|
},
|
||||||
RateLimitersConfig: []common.RateLimiterConfig{defaultRateLimiter},
|
RateLimitersConfig: []common.RateLimiterConfig{defaultRateLimiter},
|
||||||
},
|
},
|
||||||
|
@ -889,12 +884,6 @@ func getRateLimitersFromEnv(idx int) {
|
||||||
isSet = true
|
isSet = true
|
||||||
}
|
}
|
||||||
|
|
||||||
allowList, ok := lookupStringListFromEnv(fmt.Sprintf("SFTPGO_COMMON__RATE_LIMITERS__%v__ALLOW_LIST", idx))
|
|
||||||
if ok {
|
|
||||||
rtlConfig.AllowList = allowList
|
|
||||||
isSet = true
|
|
||||||
}
|
|
||||||
|
|
||||||
generateEvents, ok := lookupBoolFromEnv(fmt.Sprintf("SFTPGO_COMMON__RATE_LIMITERS__%v__GENERATE_DEFENDER_EVENTS", idx))
|
generateEvents, ok := lookupBoolFromEnv(fmt.Sprintf("SFTPGO_COMMON__RATE_LIMITERS__%v__GENERATE_DEFENDER_EVENTS", idx))
|
||||||
if ok {
|
if ok {
|
||||||
rtlConfig.GenerateDefenderEvents = generateEvents
|
rtlConfig.GenerateDefenderEvents = generateEvents
|
||||||
|
@ -1959,7 +1948,7 @@ func setViperDefaults() {
|
||||||
viper.SetDefault("common.data_retention_hook", globalConf.Common.DataRetentionHook)
|
viper.SetDefault("common.data_retention_hook", globalConf.Common.DataRetentionHook)
|
||||||
viper.SetDefault("common.max_total_connections", globalConf.Common.MaxTotalConnections)
|
viper.SetDefault("common.max_total_connections", globalConf.Common.MaxTotalConnections)
|
||||||
viper.SetDefault("common.max_per_host_connections", globalConf.Common.MaxPerHostConnections)
|
viper.SetDefault("common.max_per_host_connections", globalConf.Common.MaxPerHostConnections)
|
||||||
viper.SetDefault("common.whitelist_file", globalConf.Common.WhiteListFile)
|
viper.SetDefault("common.allowlist_status", globalConf.Common.AllowListStatus)
|
||||||
viper.SetDefault("common.allow_self_connections", globalConf.Common.AllowSelfConnections)
|
viper.SetDefault("common.allow_self_connections", globalConf.Common.AllowSelfConnections)
|
||||||
viper.SetDefault("common.defender.enabled", globalConf.Common.DefenderConfig.Enabled)
|
viper.SetDefault("common.defender.enabled", globalConf.Common.DefenderConfig.Enabled)
|
||||||
viper.SetDefault("common.defender.driver", globalConf.Common.DefenderConfig.Driver)
|
viper.SetDefault("common.defender.driver", globalConf.Common.DefenderConfig.Driver)
|
||||||
|
@ -1973,10 +1962,6 @@ func setViperDefaults() {
|
||||||
viper.SetDefault("common.defender.observation_time", globalConf.Common.DefenderConfig.ObservationTime)
|
viper.SetDefault("common.defender.observation_time", globalConf.Common.DefenderConfig.ObservationTime)
|
||||||
viper.SetDefault("common.defender.entries_soft_limit", globalConf.Common.DefenderConfig.EntriesSoftLimit)
|
viper.SetDefault("common.defender.entries_soft_limit", globalConf.Common.DefenderConfig.EntriesSoftLimit)
|
||||||
viper.SetDefault("common.defender.entries_hard_limit", globalConf.Common.DefenderConfig.EntriesHardLimit)
|
viper.SetDefault("common.defender.entries_hard_limit", globalConf.Common.DefenderConfig.EntriesHardLimit)
|
||||||
viper.SetDefault("common.defender.safelist_file", globalConf.Common.DefenderConfig.SafeListFile)
|
|
||||||
viper.SetDefault("common.defender.blocklist_file", globalConf.Common.DefenderConfig.BlockListFile)
|
|
||||||
viper.SetDefault("common.defender.safelist", globalConf.Common.DefenderConfig.SafeList)
|
|
||||||
viper.SetDefault("common.defender.blocklist", globalConf.Common.DefenderConfig.BlockList)
|
|
||||||
viper.SetDefault("acme.email", globalConf.ACME.Email)
|
viper.SetDefault("acme.email", globalConf.ACME.Email)
|
||||||
viper.SetDefault("acme.key_type", globalConf.ACME.KeyType)
|
viper.SetDefault("acme.key_type", globalConf.ACME.KeyType)
|
||||||
viper.SetDefault("acme.certs_path", globalConf.ACME.CertsPath)
|
viper.SetDefault("acme.certs_path", globalConf.ACME.CertsPath)
|
||||||
|
|
|
@ -804,9 +804,7 @@ func TestRateLimitersFromEnv(t *testing.T) {
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__GENERATE_DEFENDER_EVENTS", "1")
|
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__GENERATE_DEFENDER_EVENTS", "1")
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__ENTRIES_SOFT_LIMIT", "50")
|
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__ENTRIES_SOFT_LIMIT", "50")
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__ENTRIES_HARD_LIMIT", "100")
|
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__ENTRIES_HARD_LIMIT", "100")
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__ALLOW_LIST", ", 172.16.2.4, ")
|
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__8__AVERAGE", "50")
|
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__8__AVERAGE", "50")
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__8__ALLOW_LIST", "192.168.1.1, 192.168.2.0/24")
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__AVERAGE")
|
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__AVERAGE")
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__PERIOD")
|
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__PERIOD")
|
||||||
|
@ -816,9 +814,7 @@ func TestRateLimitersFromEnv(t *testing.T) {
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__GENERATE_DEFENDER_EVENTS")
|
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__GENERATE_DEFENDER_EVENTS")
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__ENTRIES_SOFT_LIMIT")
|
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__ENTRIES_SOFT_LIMIT")
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__ENTRIES_HARD_LIMIT")
|
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__ENTRIES_HARD_LIMIT")
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__ALLOW_LIST")
|
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__8__AVERAGE")
|
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__8__AVERAGE")
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__8__ALLOW_LIST")
|
|
||||||
})
|
})
|
||||||
|
|
||||||
err := config.LoadConfig(configDir, "")
|
err := config.LoadConfig(configDir, "")
|
||||||
|
@ -836,12 +832,7 @@ func TestRateLimitersFromEnv(t *testing.T) {
|
||||||
require.True(t, limiters[0].GenerateDefenderEvents)
|
require.True(t, limiters[0].GenerateDefenderEvents)
|
||||||
require.Equal(t, 50, limiters[0].EntriesSoftLimit)
|
require.Equal(t, 50, limiters[0].EntriesSoftLimit)
|
||||||
require.Equal(t, 100, limiters[0].EntriesHardLimit)
|
require.Equal(t, 100, limiters[0].EntriesHardLimit)
|
||||||
require.Len(t, limiters[0].AllowList, 1)
|
|
||||||
require.Equal(t, "172.16.2.4", limiters[0].AllowList[0])
|
|
||||||
require.Equal(t, int64(50), limiters[1].Average)
|
require.Equal(t, int64(50), limiters[1].Average)
|
||||||
require.Len(t, limiters[1].AllowList, 2)
|
|
||||||
require.Equal(t, "192.168.1.1", limiters[1].AllowList[0])
|
|
||||||
require.Equal(t, "192.168.2.0/24", limiters[1].AllowList[1])
|
|
||||||
// we check the default values here
|
// we check the default values here
|
||||||
require.Equal(t, int64(1000), limiters[1].Period)
|
require.Equal(t, int64(1000), limiters[1].Period)
|
||||||
require.Equal(t, 1, limiters[1].Burst)
|
require.Equal(t, 1, limiters[1].Burst)
|
||||||
|
|
|
@ -51,6 +51,7 @@ const (
|
||||||
actionObjectEventAction = "event_action"
|
actionObjectEventAction = "event_action"
|
||||||
actionObjectEventRule = "event_rule"
|
actionObjectEventRule = "event_rule"
|
||||||
actionObjectRole = "role"
|
actionObjectRole = "role"
|
||||||
|
actionObjectIPListEntry = "ip_list_entry"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -57,6 +57,7 @@ const (
|
||||||
PermAdminViewEvents = "view_events"
|
PermAdminViewEvents = "view_events"
|
||||||
PermAdminManageEventRules = "manage_event_rules"
|
PermAdminManageEventRules = "manage_event_rules"
|
||||||
PermAdminManageRoles = "manage_roles"
|
PermAdminManageRoles = "manage_roles"
|
||||||
|
PermAdminManageIPLists = "manage_ip_lists"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -73,9 +74,10 @@ var (
|
||||||
PermAdminViewUsers, PermAdminManageGroups, PermAdminViewConnections, PermAdminCloseConnections,
|
PermAdminViewUsers, PermAdminManageGroups, PermAdminViewConnections, PermAdminCloseConnections,
|
||||||
PermAdminViewServerStatus, PermAdminManageAdmins, PermAdminManageRoles, PermAdminManageEventRules,
|
PermAdminViewServerStatus, PermAdminManageAdmins, PermAdminManageRoles, PermAdminManageEventRules,
|
||||||
PermAdminManageAPIKeys, PermAdminQuotaScans, PermAdminManageSystem, PermAdminManageDefender,
|
PermAdminManageAPIKeys, PermAdminQuotaScans, PermAdminManageSystem, PermAdminManageDefender,
|
||||||
PermAdminViewDefender, PermAdminRetentionChecks, PermAdminMetadataChecks, PermAdminViewEvents}
|
PermAdminViewDefender, PermAdminManageIPLists, PermAdminRetentionChecks, PermAdminMetadataChecks,
|
||||||
|
PermAdminViewEvents}
|
||||||
forbiddenPermsForRoleAdmins = []string{PermAdminAny, PermAdminManageAdmins, PermAdminManageSystem,
|
forbiddenPermsForRoleAdmins = []string{PermAdminAny, PermAdminManageAdmins, PermAdminManageSystem,
|
||||||
PermAdminManageEventRules, PermAdminManageRoles}
|
PermAdminManageEventRules, PermAdminManageIPLists, PermAdminManageRoles}
|
||||||
)
|
)
|
||||||
|
|
||||||
// AdminTOTPConfig defines the time-based one time password configuration
|
// AdminTOTPConfig defines the time-based one time password configuration
|
||||||
|
|
|
@ -18,10 +18,12 @@
|
||||||
package dataprovider
|
package dataprovider
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/netip"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
@ -35,7 +37,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
boltDatabaseVersion = 26
|
boltDatabaseVersion = 27
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -48,10 +50,11 @@ var (
|
||||||
actionsBucket = []byte("events_actions")
|
actionsBucket = []byte("events_actions")
|
||||||
rulesBucket = []byte("events_rules")
|
rulesBucket = []byte("events_rules")
|
||||||
rolesBucket = []byte("roles")
|
rolesBucket = []byte("roles")
|
||||||
|
ipListsBucket = []byte("ip_lists")
|
||||||
dbVersionBucket = []byte("db_version")
|
dbVersionBucket = []byte("db_version")
|
||||||
dbVersionKey = []byte("version")
|
dbVersionKey = []byte("version")
|
||||||
boltBuckets = [][]byte{usersBucket, groupsBucket, foldersBucket, adminsBucket, apiKeysBucket,
|
boltBuckets = [][]byte{usersBucket, groupsBucket, foldersBucket, adminsBucket, apiKeysBucket,
|
||||||
sharesBucket, actionsBucket, rulesBucket, rolesBucket, dbVersionBucket}
|
sharesBucket, actionsBucket, rulesBucket, rolesBucket, ipListsBucket, dbVersionBucket}
|
||||||
)
|
)
|
||||||
|
|
||||||
// BoltProvider defines the auth provider for bolt key/value store
|
// BoltProvider defines the auth provider for bolt key/value store
|
||||||
|
@ -85,7 +88,7 @@ func initializeBoltProvider(basePath string) error {
|
||||||
_, e := tx.CreateBucketIfNotExists(bucket)
|
_, e := tx.CreateBucketIfNotExists(bucket)
|
||||||
return e
|
return e
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
providerLog(logger.LevelError, "error creating bucket %#v: %v", string(bucket), err)
|
providerLog(logger.LevelError, "error creating bucket %q: %v", string(bucket), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2749,6 +2752,231 @@ func (p *BoltProvider) dumpRoles() ([]Role, error) {
|
||||||
return roles, err
|
return roles, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *BoltProvider) ipListEntryExists(ipOrNet string, listType IPListType) (IPListEntry, error) {
|
||||||
|
entry := IPListEntry{
|
||||||
|
IPOrNet: ipOrNet,
|
||||||
|
Type: listType,
|
||||||
|
}
|
||||||
|
err := p.dbHandle.View(func(tx *bolt.Tx) error {
|
||||||
|
bucket, err := p.getIPListsBucket(tx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e := bucket.Get([]byte(entry.getKey()))
|
||||||
|
if e == nil {
|
||||||
|
return util.NewRecordNotFoundError(fmt.Sprintf("entry %q does not exist", entry.IPOrNet))
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(e, &entry)
|
||||||
|
if err == nil {
|
||||||
|
entry.PrepareForRendering()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return entry, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BoltProvider) addIPListEntry(entry *IPListEntry) error {
|
||||||
|
if err := entry.validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return p.dbHandle.Update(func(tx *bolt.Tx) error {
|
||||||
|
bucket, err := p.getIPListsBucket(tx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if e := bucket.Get([]byte(entry.getKey())); e != nil {
|
||||||
|
return fmt.Errorf("entry %q already exists", entry.IPOrNet)
|
||||||
|
}
|
||||||
|
entry.CreatedAt = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||||
|
entry.UpdatedAt = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||||
|
buf, err := json.Marshal(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return bucket.Put([]byte(entry.getKey()), buf)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BoltProvider) updateIPListEntry(entry *IPListEntry) error {
|
||||||
|
if err := entry.validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return p.dbHandle.Update(func(tx *bolt.Tx) error {
|
||||||
|
bucket, err := p.getIPListsBucket(tx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var e []byte
|
||||||
|
if e = bucket.Get([]byte(entry.getKey())); e == nil {
|
||||||
|
return fmt.Errorf("entry %q does not exist", entry.IPOrNet)
|
||||||
|
}
|
||||||
|
var oldEntry IPListEntry
|
||||||
|
err = json.Unmarshal(e, &oldEntry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
entry.CreatedAt = oldEntry.CreatedAt
|
||||||
|
entry.UpdatedAt = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||||
|
buf, err := json.Marshal(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return bucket.Put([]byte(entry.getKey()), buf)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BoltProvider) deleteIPListEntry(entry IPListEntry, softDelete bool) error {
|
||||||
|
return p.dbHandle.Update(func(tx *bolt.Tx) error {
|
||||||
|
bucket, err := p.getIPListsBucket(tx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if e := bucket.Get([]byte(entry.getKey())); e == nil {
|
||||||
|
return fmt.Errorf("entry %q does not exist", entry.IPOrNet)
|
||||||
|
}
|
||||||
|
return bucket.Delete([]byte(entry.getKey()))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BoltProvider) getIPListEntries(listType IPListType, filter, from, order string, limit int) ([]IPListEntry, error) {
|
||||||
|
entries := make([]IPListEntry, 0, 15)
|
||||||
|
err := p.dbHandle.View(func(tx *bolt.Tx) error {
|
||||||
|
bucket, err := p.getIPListsBucket(tx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
prefix := []byte(fmt.Sprintf("%d_", listType))
|
||||||
|
acceptKey := func(k []byte) bool {
|
||||||
|
return k != nil && bytes.HasPrefix(k, prefix)
|
||||||
|
}
|
||||||
|
cursor := bucket.Cursor()
|
||||||
|
if order == OrderASC {
|
||||||
|
for k, v := cursor.Seek(prefix); acceptKey(k); k, v = cursor.Next() {
|
||||||
|
var entry IPListEntry
|
||||||
|
err = json.Unmarshal(v, &entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if entry.satisfySearchConstraints(filter, from, order) {
|
||||||
|
entry.PrepareForRendering()
|
||||||
|
entries = append(entries, entry)
|
||||||
|
if limit > 0 && len(entries) >= limit {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for k, v := cursor.Last(); acceptKey(k); k, v = cursor.Prev() {
|
||||||
|
var entry IPListEntry
|
||||||
|
err = json.Unmarshal(v, &entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if entry.satisfySearchConstraints(filter, from, order) {
|
||||||
|
entry.PrepareForRendering()
|
||||||
|
entries = append(entries, entry)
|
||||||
|
if limit > 0 && len(entries) >= limit {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BoltProvider) getRecentlyUpdatedIPListEntries(after int64) ([]IPListEntry, error) {
|
||||||
|
return nil, ErrNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BoltProvider) dumpIPListEntries() ([]IPListEntry, error) {
|
||||||
|
entries := make([]IPListEntry, 0, 10)
|
||||||
|
err := p.dbHandle.View(func(tx *bolt.Tx) error {
|
||||||
|
bucket, err := p.getIPListsBucket(tx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if count := bucket.Stats().KeyN; count > ipListMemoryLimit {
|
||||||
|
providerLog(logger.LevelInfo, "IP lists excluded from dump, too many entries: %d", count)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
cursor := bucket.Cursor()
|
||||||
|
for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
|
||||||
|
var entry IPListEntry
|
||||||
|
err = json.Unmarshal(v, &entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
entry.PrepareForRendering()
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BoltProvider) countIPListEntries(listType IPListType) (int64, error) {
|
||||||
|
var count int64
|
||||||
|
err := p.dbHandle.View(func(tx *bolt.Tx) error {
|
||||||
|
bucket, err := p.getIPListsBucket(tx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if listType == 0 {
|
||||||
|
count = int64(bucket.Stats().KeyN)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
prefix := []byte(fmt.Sprintf("%d_", listType))
|
||||||
|
cursor := bucket.Cursor()
|
||||||
|
for k, _ := cursor.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, _ = cursor.Next() {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BoltProvider) getListEntriesForIP(ip string, listType IPListType) ([]IPListEntry, error) {
|
||||||
|
entries := make([]IPListEntry, 0, 3)
|
||||||
|
ipAddr, err := netip.ParseAddr(ip)
|
||||||
|
if err != nil {
|
||||||
|
return entries, fmt.Errorf("invalid ip address %s", ip)
|
||||||
|
}
|
||||||
|
var netType int
|
||||||
|
var ipBytes []byte
|
||||||
|
if ipAddr.Is4() || ipAddr.Is4In6() {
|
||||||
|
netType = ipTypeV4
|
||||||
|
as4 := ipAddr.As4()
|
||||||
|
ipBytes = as4[:]
|
||||||
|
} else {
|
||||||
|
netType = ipTypeV6
|
||||||
|
as16 := ipAddr.As16()
|
||||||
|
ipBytes = as16[:]
|
||||||
|
}
|
||||||
|
err = p.dbHandle.View(func(tx *bolt.Tx) error {
|
||||||
|
bucket, err := p.getIPListsBucket(tx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
prefix := []byte(fmt.Sprintf("%d_", listType))
|
||||||
|
cursor := bucket.Cursor()
|
||||||
|
for k, v := cursor.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = cursor.Next() {
|
||||||
|
var entry IPListEntry
|
||||||
|
err = json.Unmarshal(v, &entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if entry.IPType == netType && bytes.Compare(ipBytes, entry.First) >= 0 && bytes.Compare(ipBytes, entry.Last) <= 0 {
|
||||||
|
entry.PrepareForRendering()
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
|
||||||
func (p *BoltProvider) setFirstDownloadTimestamp(username string) error {
|
func (p *BoltProvider) setFirstDownloadTimestamp(username string) error {
|
||||||
return p.dbHandle.Update(func(tx *bolt.Tx) error {
|
return p.dbHandle.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := p.getUsersBucket(tx)
|
bucket, err := p.getUsersBucket(tx)
|
||||||
|
@ -2833,9 +3061,9 @@ func (p *BoltProvider) migrateDatabase() error {
|
||||||
providerLog(logger.LevelError, "%v", err)
|
providerLog(logger.LevelError, "%v", err)
|
||||||
logger.ErrorToConsole("%v", err)
|
logger.ErrorToConsole("%v", err)
|
||||||
return err
|
return err
|
||||||
case version == 23, version == 24, version == 25:
|
case version == 23, version == 24, version == 25, version == 26:
|
||||||
logger.InfoToConsole("updating database schema version: %d -> 26", version)
|
logger.InfoToConsole("updating database schema version: %d -> 27", version)
|
||||||
providerLog(logger.LevelInfo, "updating database schema version: %d -> 26", version)
|
providerLog(logger.LevelInfo, "updating database schema version: %d -> 27", version)
|
||||||
err := p.dbHandle.Update(func(tx *bolt.Tx) error {
|
err := p.dbHandle.Update(func(tx *bolt.Tx) error {
|
||||||
rules, err := p.dumpEventRules()
|
rules, err := p.dumpEventRules()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -2845,8 +3073,8 @@ func (p *BoltProvider) migrateDatabase() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, rule := range rules {
|
for idx := range rules {
|
||||||
rule := rule // pin
|
rule := rules[idx]
|
||||||
if rule.Status == 1 {
|
if rule.Status == 1 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -2867,7 +3095,7 @@ func (p *BoltProvider) migrateDatabase() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return updateBoltDatabaseVersion(p.dbHandle, 26)
|
return updateBoltDatabaseVersion(p.dbHandle, 27)
|
||||||
default:
|
default:
|
||||||
if version > boltDatabaseVersion {
|
if version > boltDatabaseVersion {
|
||||||
providerLog(logger.LevelError, "database schema version %d is newer than the supported one: %d", version,
|
providerLog(logger.LevelError, "database schema version %d is newer than the supported one: %d", version,
|
||||||
|
@ -3639,6 +3867,15 @@ func (p *BoltProvider) getRolesBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||||
return bucket, err
|
return bucket, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *BoltProvider) getIPListsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||||
|
var err error
|
||||||
|
bucket := tx.Bucket(rolesBucket)
|
||||||
|
if bucket == nil {
|
||||||
|
err = fmt.Errorf("unable to find IP lists bucket, bolt database structure not correcly defined")
|
||||||
|
}
|
||||||
|
return bucket, err
|
||||||
|
}
|
||||||
|
|
||||||
func (p *BoltProvider) getFoldersBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
func (p *BoltProvider) getFoldersBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||||
var err error
|
var err error
|
||||||
bucket := tx.Bucket(foldersBucket)
|
bucket := tx.Bucket(foldersBucket)
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// Package dataprovider provides data access.
|
// Package dataprovider provides data access.
|
||||||
// It abstracts different data providers and exposes a common API.
|
// It abstracts different data providers using a common API.
|
||||||
package dataprovider
|
package dataprovider
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -87,7 +87,7 @@ const (
|
||||||
CockroachDataProviderName = "cockroachdb"
|
CockroachDataProviderName = "cockroachdb"
|
||||||
// DumpVersion defines the version for the dump.
|
// DumpVersion defines the version for the dump.
|
||||||
// For restore/load we support the current version and the previous one
|
// For restore/load we support the current version and the previous one
|
||||||
DumpVersion = 15
|
DumpVersion = 16
|
||||||
|
|
||||||
argonPwdPrefix = "$argon2id$"
|
argonPwdPrefix = "$argon2id$"
|
||||||
bcryptPwdPrefix = "$2a$"
|
bcryptPwdPrefix = "$2a$"
|
||||||
|
@ -191,6 +191,7 @@ var (
|
||||||
sqlTableTasks string
|
sqlTableTasks string
|
||||||
sqlTableNodes string
|
sqlTableNodes string
|
||||||
sqlTableRoles string
|
sqlTableRoles string
|
||||||
|
sqlTableIPLists string
|
||||||
sqlTableSchemaVersion string
|
sqlTableSchemaVersion string
|
||||||
argon2Params *argon2id.Params
|
argon2Params *argon2id.Params
|
||||||
lastLoginMinDelay = 10 * time.Minute
|
lastLoginMinDelay = 10 * time.Minute
|
||||||
|
@ -223,6 +224,7 @@ func initSQLTables() {
|
||||||
sqlTableTasks = "tasks"
|
sqlTableTasks = "tasks"
|
||||||
sqlTableNodes = "nodes"
|
sqlTableNodes = "nodes"
|
||||||
sqlTableRoles = "roles"
|
sqlTableRoles = "roles"
|
||||||
|
sqlTableIPLists = "ip_lists"
|
||||||
sqlTableSchemaVersion = "schema_version"
|
sqlTableSchemaVersion = "schema_version"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -663,6 +665,7 @@ type BackupData struct {
|
||||||
EventActions []BaseEventAction `json:"event_actions"`
|
EventActions []BaseEventAction `json:"event_actions"`
|
||||||
EventRules []EventRule `json:"event_rules"`
|
EventRules []EventRule `json:"event_rules"`
|
||||||
Roles []Role `json:"roles"`
|
Roles []Role `json:"roles"`
|
||||||
|
IPLists []IPListEntry `json:"ip_lists"`
|
||||||
Version int `json:"version"`
|
Version int `json:"version"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -805,6 +808,15 @@ type Provider interface {
|
||||||
deleteRole(role Role) error
|
deleteRole(role Role) error
|
||||||
getRoles(limit int, offset int, order string, minimal bool) ([]Role, error)
|
getRoles(limit int, offset int, order string, minimal bool) ([]Role, error)
|
||||||
dumpRoles() ([]Role, error)
|
dumpRoles() ([]Role, error)
|
||||||
|
ipListEntryExists(ipOrNet string, listType IPListType) (IPListEntry, error)
|
||||||
|
addIPListEntry(entry *IPListEntry) error
|
||||||
|
updateIPListEntry(entry *IPListEntry) error
|
||||||
|
deleteIPListEntry(entry IPListEntry, softDelete bool) error
|
||||||
|
getIPListEntries(listType IPListType, filter, from, order string, limit int) ([]IPListEntry, error)
|
||||||
|
getRecentlyUpdatedIPListEntries(after int64) ([]IPListEntry, error)
|
||||||
|
dumpIPListEntries() ([]IPListEntry, error)
|
||||||
|
countIPListEntries(listType IPListType) (int64, error)
|
||||||
|
getListEntriesForIP(ip string, listType IPListType) ([]IPListEntry, error)
|
||||||
checkAvailability() error
|
checkAvailability() error
|
||||||
close() error
|
close() error
|
||||||
reloadConfig() error
|
reloadConfig() error
|
||||||
|
@ -984,16 +996,18 @@ func validateSQLTablesPrefix() error {
|
||||||
sqlTableTasks = config.SQLTablesPrefix + sqlTableTasks
|
sqlTableTasks = config.SQLTablesPrefix + sqlTableTasks
|
||||||
sqlTableNodes = config.SQLTablesPrefix + sqlTableNodes
|
sqlTableNodes = config.SQLTablesPrefix + sqlTableNodes
|
||||||
sqlTableRoles = config.SQLTablesPrefix + sqlTableRoles
|
sqlTableRoles = config.SQLTablesPrefix + sqlTableRoles
|
||||||
|
sqlTableIPLists = config.SQLTablesPrefix + sqlTableIPLists
|
||||||
sqlTableSchemaVersion = config.SQLTablesPrefix + sqlTableSchemaVersion
|
sqlTableSchemaVersion = config.SQLTablesPrefix + sqlTableSchemaVersion
|
||||||
providerLog(logger.LevelDebug, "sql table for users %q, folders %q users folders mapping %q admins %q "+
|
providerLog(logger.LevelDebug, "sql table for users %q, folders %q users folders mapping %q admins %q "+
|
||||||
"api keys %q shares %q defender hosts %q defender events %q transfers %q groups %q "+
|
"api keys %q shares %q defender hosts %q defender events %q transfers %q groups %q "+
|
||||||
"users groups mapping %q admins groups mapping %q groups folders mapping %q shared sessions %q "+
|
"users groups mapping %q admins groups mapping %q groups folders mapping %q shared sessions %q "+
|
||||||
"schema version %q events actions %q events rules %q rules actions mapping %q tasks %q nodes %q roles %q",
|
"schema version %q events actions %q events rules %q rules actions mapping %q tasks %q nodes %q roles %q"+
|
||||||
|
"ip lists %q",
|
||||||
sqlTableUsers, sqlTableFolders, sqlTableUsersFoldersMapping, sqlTableAdmins, sqlTableAPIKeys,
|
sqlTableUsers, sqlTableFolders, sqlTableUsersFoldersMapping, sqlTableAdmins, sqlTableAPIKeys,
|
||||||
sqlTableShares, sqlTableDefenderHosts, sqlTableDefenderEvents, sqlTableActiveTransfers, sqlTableGroups,
|
sqlTableShares, sqlTableDefenderHosts, sqlTableDefenderEvents, sqlTableActiveTransfers, sqlTableGroups,
|
||||||
sqlTableUsersGroupsMapping, sqlTableAdminsGroupsMapping, sqlTableGroupsFoldersMapping, sqlTableSharedSessions,
|
sqlTableUsersGroupsMapping, sqlTableAdminsGroupsMapping, sqlTableGroupsFoldersMapping, sqlTableSharedSessions,
|
||||||
sqlTableSchemaVersion, sqlTableEventsActions, sqlTableEventsRules, sqlTableRulesActionsMapping,
|
sqlTableSchemaVersion, sqlTableEventsActions, sqlTableEventsRules, sqlTableRulesActionsMapping,
|
||||||
sqlTableTasks, sqlTableNodes, sqlTableRoles)
|
sqlTableTasks, sqlTableNodes, sqlTableRoles, sqlTableIPLists)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1543,6 +1557,59 @@ func ShareExists(shareID, username string) (Share, error) {
|
||||||
return provider.shareExists(shareID, username)
|
return provider.shareExists(shareID, username)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddIPListEntry adds a new IP list entry
|
||||||
|
func AddIPListEntry(entry *IPListEntry, executor, ipAddress, executorRole string) error {
|
||||||
|
err := provider.addIPListEntry(entry)
|
||||||
|
if err == nil {
|
||||||
|
executeAction(operationAdd, executor, ipAddress, actionObjectIPListEntry, entry.getName(), executorRole, entry)
|
||||||
|
for _, l := range inMemoryLists {
|
||||||
|
l.addEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIPListEntry updates an existing IP list entry
|
||||||
|
func UpdateIPListEntry(entry *IPListEntry, executor, ipAddress, executorRole string) error {
|
||||||
|
err := provider.updateIPListEntry(entry)
|
||||||
|
if err == nil {
|
||||||
|
executeAction(operationUpdate, executor, ipAddress, actionObjectIPListEntry, entry.getName(), executorRole, entry)
|
||||||
|
for _, l := range inMemoryLists {
|
||||||
|
l.updateEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteIPListEntry deletes an existing IP list entry
|
||||||
|
func DeleteIPListEntry(ipOrNet string, listType IPListType, executor, ipAddress, executorRole string) error {
|
||||||
|
entry, err := provider.ipListEntryExists(ipOrNet, listType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = provider.deleteIPListEntry(entry, config.IsShared == 1)
|
||||||
|
if err == nil {
|
||||||
|
executeAction(operationDelete, executor, ipAddress, actionObjectIPListEntry, entry.getName(), executorRole, &entry)
|
||||||
|
for _, l := range inMemoryLists {
|
||||||
|
l.removeEntry(&entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPListEntryExists returns the IP list entry with the given IP/net and type if it exists
|
||||||
|
func IPListEntryExists(ipOrNet string, listType IPListType) (IPListEntry, error) {
|
||||||
|
return provider.ipListEntryExists(ipOrNet, listType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetIPListEntries returns the IP list entries applying the specified criteria and search limit
|
||||||
|
func GetIPListEntries(listType IPListType, filter, from, order string, limit int) ([]IPListEntry, error) {
|
||||||
|
if !util.Contains(supportedIPListType, listType) {
|
||||||
|
return nil, util.NewValidationError(fmt.Sprintf("invalid list type %d", listType))
|
||||||
|
}
|
||||||
|
return provider.getIPListEntries(listType, filter, from, order, limit)
|
||||||
|
}
|
||||||
|
|
||||||
// AddRole adds a new role
|
// AddRole adds a new role
|
||||||
func AddRole(role *Role, executor, ipAddress, executorRole string) error {
|
func AddRole(role *Role, executor, ipAddress, executorRole string) error {
|
||||||
role.Name = config.convertName(role.Name)
|
role.Name = config.convertName(role.Name)
|
||||||
|
@ -2235,6 +2302,10 @@ func DumpData() (BackupData, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return data, err
|
return data, err
|
||||||
}
|
}
|
||||||
|
ipLists, err := provider.dumpIPListEntries()
|
||||||
|
if err != nil {
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
data.Users = users
|
data.Users = users
|
||||||
data.Groups = groups
|
data.Groups = groups
|
||||||
data.Folders = folders
|
data.Folders = folders
|
||||||
|
@ -2244,6 +2315,7 @@ func DumpData() (BackupData, error) {
|
||||||
data.EventActions = actions
|
data.EventActions = actions
|
||||||
data.EventRules = rules
|
data.EventRules = rules
|
||||||
data.Roles = roles
|
data.Roles = roles
|
||||||
|
data.IPLists = ipLists
|
||||||
data.Version = DumpVersion
|
data.Version = DumpVersion
|
||||||
return data, err
|
return data, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -500,7 +500,7 @@ func (c *EventActionEmailConfig) validate() error {
|
||||||
|
|
||||||
// FolderRetention defines a folder retention configuration
|
// FolderRetention defines a folder retention configuration
|
||||||
type FolderRetention struct {
|
type FolderRetention struct {
|
||||||
// Path is the exposed virtual directory path, if no other specific retention is defined,
|
// Path is the virtual directory path, if no other specific retention is defined,
|
||||||
// the retention applies for sub directories too. For example if retention is defined
|
// the retention applies for sub directories too. For example if retention is defined
|
||||||
// for the paths "/" and "/sub" then the retention for "/" is applied for any file outside
|
// for the paths "/" and "/sub" then the retention for "/" is applied for any file outside
|
||||||
// the "/sub" directory
|
// the "/sub" directory
|
||||||
|
|
493
internal/dataprovider/iplist.go
Normal file
493
internal/dataprovider/iplist.go
Normal file
|
@ -0,0 +1,493 @@
|
||||||
|
// Copyright (C) 2019-2023 Nicola Murino
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published
|
||||||
|
// by the Free Software Foundation, version 3.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package dataprovider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/netip"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/yl2chen/cidranger"
|
||||||
|
|
||||||
|
"github.com/drakkan/sftpgo/v2/internal/logger"
|
||||||
|
"github.com/drakkan/sftpgo/v2/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// maximum number of entries to match in memory
|
||||||
|
// if the list contains more elements than this limit a
|
||||||
|
// database query will be executed
|
||||||
|
ipListMemoryLimit = 15000
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
inMemoryLists map[IPListType]*IPList
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inMemoryLists = map[IPListType]*IPList{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPListType is the enumerable for the supported IP list types
|
||||||
|
type IPListType int
|
||||||
|
|
||||||
|
// AsString returns the string representation for the list type
|
||||||
|
func (t IPListType) AsString() string {
|
||||||
|
switch t {
|
||||||
|
case IPListTypeAllowList:
|
||||||
|
return "Allow list"
|
||||||
|
case IPListTypeDefender:
|
||||||
|
return "Defender"
|
||||||
|
case IPListTypeRateLimiterSafeList:
|
||||||
|
return "Rate limiters safe list"
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Supported IP list types
|
||||||
|
const (
|
||||||
|
IPListTypeAllowList IPListType = iota + 1
|
||||||
|
IPListTypeDefender
|
||||||
|
IPListTypeRateLimiterSafeList
|
||||||
|
)
|
||||||
|
|
||||||
|
// Supported IP list modes
|
||||||
|
const (
|
||||||
|
ListModeAllow = iota + 1
|
||||||
|
ListModeDeny
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ipTypeV4 = iota + 1
|
||||||
|
ipTypeV6
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
supportedIPListType = []IPListType{IPListTypeAllowList, IPListTypeDefender, IPListTypeRateLimiterSafeList}
|
||||||
|
)
|
||||||
|
|
||||||
|
// CheckIPListType returns an error if the provided IP list type is not valid
|
||||||
|
func CheckIPListType(t IPListType) error {
|
||||||
|
if !util.Contains(supportedIPListType, t) {
|
||||||
|
return util.NewValidationError(fmt.Sprintf("invalid list type %d", t))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPListEntry defines an entry for the IP addresses list
|
||||||
|
type IPListEntry struct {
|
||||||
|
IPOrNet string `json:"ipornet"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
Type IPListType `json:"type"`
|
||||||
|
Mode int `json:"mode"`
|
||||||
|
// Defines the protocols the entry applies to
|
||||||
|
// - 0 all the supported protocols
|
||||||
|
// - 1 SSH
|
||||||
|
// - 2 FTP
|
||||||
|
// - 4 WebDAV
|
||||||
|
// - 8 HTTP
|
||||||
|
// Protocols can be combined
|
||||||
|
Protocols int `json:"protocols"`
|
||||||
|
First []byte `json:"first,omitempty"`
|
||||||
|
Last []byte `json:"last,omitempty"`
|
||||||
|
IPType int `json:"ip_type,omitempty"`
|
||||||
|
// Creation time as unix timestamp in milliseconds
|
||||||
|
CreatedAt int64 `json:"created_at"`
|
||||||
|
// last update time as unix timestamp in milliseconds
|
||||||
|
UpdatedAt int64 `json:"updated_at"`
|
||||||
|
// in multi node setups we mark the rule as deleted to be able to update the cache
|
||||||
|
DeletedAt int64 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrepareForRendering prepares an IP list entry for rendering.
|
||||||
|
// It hides internal fields
|
||||||
|
func (e *IPListEntry) PrepareForRendering() {
|
||||||
|
e.First = nil
|
||||||
|
e.Last = nil
|
||||||
|
e.IPType = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasProtocol returns true if the specified protocol is defined
|
||||||
|
func (e *IPListEntry) HasProtocol(proto string) bool {
|
||||||
|
switch proto {
|
||||||
|
case protocolSSH:
|
||||||
|
return e.Protocols&1 != 0
|
||||||
|
case protocolFTP:
|
||||||
|
return e.Protocols&2 != 0
|
||||||
|
case protocolWebDAV:
|
||||||
|
return e.Protocols&4 != 0
|
||||||
|
case protocolHTTP:
|
||||||
|
return e.Protocols&8 != 0
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderAsJSON implements the renderer interface used within plugins
|
||||||
|
func (e *IPListEntry) RenderAsJSON(reload bool) ([]byte, error) {
|
||||||
|
if reload {
|
||||||
|
entry, err := provider.ipListEntryExists(e.IPOrNet, e.Type)
|
||||||
|
if err != nil {
|
||||||
|
providerLog(logger.LevelError, "unable to reload IP list entry before rendering as json: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entry.PrepareForRendering()
|
||||||
|
return json.Marshal(entry)
|
||||||
|
}
|
||||||
|
e.PrepareForRendering()
|
||||||
|
return json.Marshal(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *IPListEntry) getKey() string {
|
||||||
|
return fmt.Sprintf("%d_%s", e.Type, e.IPOrNet)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *IPListEntry) getName() string {
|
||||||
|
return e.Type.AsString() + "-" + e.IPOrNet
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *IPListEntry) getFirst() netip.Addr {
|
||||||
|
if e.IPType == ipTypeV4 {
|
||||||
|
var a4 [4]byte
|
||||||
|
copy(a4[:], e.First)
|
||||||
|
return netip.AddrFrom4(a4)
|
||||||
|
}
|
||||||
|
var a16 [16]byte
|
||||||
|
copy(a16[:], e.First)
|
||||||
|
return netip.AddrFrom16(a16)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *IPListEntry) getLast() netip.Addr {
|
||||||
|
if e.IPType == ipTypeV4 {
|
||||||
|
var a4 [4]byte
|
||||||
|
copy(a4[:], e.Last)
|
||||||
|
return netip.AddrFrom4(a4)
|
||||||
|
}
|
||||||
|
var a16 [16]byte
|
||||||
|
copy(a16[:], e.Last)
|
||||||
|
return netip.AddrFrom16(a16)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *IPListEntry) checkProtocols() {
|
||||||
|
for _, proto := range ValidProtocols {
|
||||||
|
if !e.HasProtocol(proto) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e.Protocols = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *IPListEntry) validate() error {
|
||||||
|
if err := CheckIPListType(e.Type); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e.checkProtocols()
|
||||||
|
switch e.Type {
|
||||||
|
case IPListTypeDefender:
|
||||||
|
if e.Mode < ListModeAllow || e.Mode > ListModeDeny {
|
||||||
|
return util.NewValidationError(fmt.Sprintf("invalid list mode: %d", e.Mode))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if e.Mode != ListModeAllow {
|
||||||
|
return util.NewValidationError("invalid list mode")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e.PrepareForRendering()
|
||||||
|
if !strings.Contains(e.IPOrNet, "/") {
|
||||||
|
// parse as IP
|
||||||
|
parsed, err := netip.ParseAddr(e.IPOrNet)
|
||||||
|
if err != nil {
|
||||||
|
return util.NewValidationError(fmt.Sprintf("invalid IP %q", e.IPOrNet))
|
||||||
|
}
|
||||||
|
if parsed.Is4() {
|
||||||
|
e.IPOrNet += "/32"
|
||||||
|
} else if parsed.Is4In6() {
|
||||||
|
e.IPOrNet = netip.AddrFrom4(parsed.As4()).String() + "/32"
|
||||||
|
} else {
|
||||||
|
e.IPOrNet += "/128"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prefix, err := netip.ParsePrefix(e.IPOrNet)
|
||||||
|
if err != nil {
|
||||||
|
return util.NewValidationError(fmt.Sprintf("invalid network %q: %v", e.IPOrNet, err))
|
||||||
|
}
|
||||||
|
prefix = prefix.Masked()
|
||||||
|
if prefix.Addr().Is4In6() {
|
||||||
|
e.IPOrNet = fmt.Sprintf("%s/%d", netip.AddrFrom4(prefix.Addr().As4()).String(), prefix.Bits()-96)
|
||||||
|
}
|
||||||
|
// TODO: to remove when the in memory ranger switch to netip
|
||||||
|
_, _, err = net.ParseCIDR(e.IPOrNet)
|
||||||
|
if err != nil {
|
||||||
|
return util.NewValidationError(fmt.Sprintf("invalid network: %v", err))
|
||||||
|
}
|
||||||
|
if prefix.Addr().Is4() || prefix.Addr().Is4In6() {
|
||||||
|
e.IPType = ipTypeV4
|
||||||
|
first := prefix.Addr().As4()
|
||||||
|
last := util.GetLastIPForPrefix(prefix).As4()
|
||||||
|
e.First = first[:]
|
||||||
|
e.Last = last[:]
|
||||||
|
} else {
|
||||||
|
e.IPType = ipTypeV6
|
||||||
|
first := prefix.Addr().As16()
|
||||||
|
last := util.GetLastIPForPrefix(prefix).As16()
|
||||||
|
e.First = first[:]
|
||||||
|
e.Last = last[:]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *IPListEntry) getACopy() IPListEntry {
|
||||||
|
first := make([]byte, len(e.First))
|
||||||
|
copy(first, e.First)
|
||||||
|
last := make([]byte, len(e.Last))
|
||||||
|
copy(last, e.Last)
|
||||||
|
|
||||||
|
return IPListEntry{
|
||||||
|
IPOrNet: e.IPOrNet,
|
||||||
|
Description: e.Description,
|
||||||
|
Type: e.Type,
|
||||||
|
Mode: e.Mode,
|
||||||
|
First: first,
|
||||||
|
Last: last,
|
||||||
|
IPType: e.IPType,
|
||||||
|
Protocols: e.Protocols,
|
||||||
|
CreatedAt: e.CreatedAt,
|
||||||
|
UpdatedAt: e.UpdatedAt,
|
||||||
|
DeletedAt: e.DeletedAt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAsRangerEntry returns the entry as cidranger.RangerEntry
|
||||||
|
func (e *IPListEntry) getAsRangerEntry() (cidranger.RangerEntry, error) {
|
||||||
|
_, network, err := net.ParseCIDR(e.IPOrNet)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entry := e.getACopy()
|
||||||
|
return &rangerEntry{
|
||||||
|
entry: &entry,
|
||||||
|
network: *network,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e IPListEntry) satisfySearchConstraints(filter, from, order string) bool {
|
||||||
|
if filter != "" && !strings.HasPrefix(e.IPOrNet, filter) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if from != "" {
|
||||||
|
if order == OrderASC {
|
||||||
|
return e.IPOrNet > from
|
||||||
|
}
|
||||||
|
return e.IPOrNet < from
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type rangerEntry struct {
|
||||||
|
entry *IPListEntry
|
||||||
|
network net.IPNet
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *rangerEntry) Network() net.IPNet {
|
||||||
|
return e.network
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPList defines an IP list
|
||||||
|
type IPList struct {
|
||||||
|
isInMemory atomic.Bool
|
||||||
|
listType IPListType
|
||||||
|
mu sync.RWMutex
|
||||||
|
Ranges cidranger.Ranger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *IPList) addEntry(e *IPListEntry) {
|
||||||
|
if l.listType != e.Type {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !l.isInMemory.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
entry, err := e.getAsRangerEntry()
|
||||||
|
if err != nil {
|
||||||
|
providerLog(logger.LevelError, "unable to get entry to add %q for list type %d, disabling memory mode, err: %v",
|
||||||
|
e.IPOrNet, l.listType, err)
|
||||||
|
l.isInMemory.Store(false)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
|
||||||
|
if err := l.Ranges.Insert(entry); err != nil {
|
||||||
|
providerLog(logger.LevelError, "unable to add entry %q for list type %d, disabling memory mode, err: %v",
|
||||||
|
e.IPOrNet, l.listType, err)
|
||||||
|
l.isInMemory.Store(false)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if l.Ranges.Len() >= ipListMemoryLimit {
|
||||||
|
providerLog(logger.LevelError, "memory limit exceeded for list type %d, disabling memory mode", l.listType)
|
||||||
|
l.isInMemory.Store(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *IPList) removeEntry(e *IPListEntry) {
|
||||||
|
if l.listType != e.Type {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !l.isInMemory.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
entry, err := e.getAsRangerEntry()
|
||||||
|
if err != nil {
|
||||||
|
providerLog(logger.LevelError, "unable to get entry to remove %q for list type %d, disabling memory mode, err: %v",
|
||||||
|
e.IPOrNet, l.listType, err)
|
||||||
|
l.isInMemory.Store(false)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
|
||||||
|
if _, err := l.Ranges.Remove(entry.Network()); err != nil {
|
||||||
|
providerLog(logger.LevelError, "unable to remove entry %q for list type %d, disabling memory mode, err: %v",
|
||||||
|
e.IPOrNet, l.listType, err)
|
||||||
|
l.isInMemory.Store(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *IPList) updateEntry(e *IPListEntry) {
|
||||||
|
if l.listType != e.Type {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !l.isInMemory.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
entry, err := e.getAsRangerEntry()
|
||||||
|
if err != nil {
|
||||||
|
providerLog(logger.LevelError, "unable to get entry to update %q for list type %d, disabling memory mode, err: %v",
|
||||||
|
e.IPOrNet, l.listType, err)
|
||||||
|
l.isInMemory.Store(false)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
|
||||||
|
if _, err := l.Ranges.Remove(entry.Network()); err != nil {
|
||||||
|
providerLog(logger.LevelError, "unable to remove entry to update %q for list type %d, disabling memory mode, err: %v",
|
||||||
|
e.IPOrNet, l.listType, err)
|
||||||
|
l.isInMemory.Store(false)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := l.Ranges.Insert(entry); err != nil {
|
||||||
|
providerLog(logger.LevelError, "unable to add entry to update %q for list type %d, disabling memory mode, err: %v",
|
||||||
|
e.IPOrNet, l.listType, err)
|
||||||
|
l.isInMemory.Store(false)
|
||||||
|
}
|
||||||
|
if l.Ranges.Len() >= ipListMemoryLimit {
|
||||||
|
providerLog(logger.LevelError, "memory limit exceeded for list type %d, disabling memory mode", l.listType)
|
||||||
|
l.isInMemory.Store(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableMemoryMode disables memory mode forcing database queries
|
||||||
|
func (l *IPList) DisableMemoryMode() {
|
||||||
|
l.isInMemory.Store(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsListed checks if there is a match for the specified IP and protocol.
|
||||||
|
// If there are multiple matches, the first one is returned, in no particular order,
|
||||||
|
// so the behavior is undefined
|
||||||
|
func (l *IPList) IsListed(ip, protocol string) (bool, int, error) {
|
||||||
|
if l.isInMemory.Load() {
|
||||||
|
l.mu.RLock()
|
||||||
|
defer l.mu.RUnlock()
|
||||||
|
|
||||||
|
parsedIP := net.ParseIP(ip)
|
||||||
|
if parsedIP == nil {
|
||||||
|
return false, 0, fmt.Errorf("invalid IP %s", ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err := l.Ranges.ContainingNetworks(parsedIP)
|
||||||
|
if err != nil {
|
||||||
|
return false, 0, fmt.Errorf("unable to find containing networks for ip %q: %w", ip, err)
|
||||||
|
}
|
||||||
|
for _, e := range entries {
|
||||||
|
entry, ok := e.(*rangerEntry)
|
||||||
|
if ok {
|
||||||
|
if entry.entry.Protocols == 0 || entry.entry.HasProtocol(protocol) {
|
||||||
|
return true, entry.entry.Mode, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err := provider.getListEntriesForIP(ip, l.listType)
|
||||||
|
if err != nil {
|
||||||
|
return false, 0, err
|
||||||
|
}
|
||||||
|
for _, e := range entries {
|
||||||
|
if e.Protocols == 0 || e.HasProtocol(protocol) {
|
||||||
|
return true, e.Mode, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIPList returns a new IP list for the specified type
|
||||||
|
func NewIPList(listType IPListType) (*IPList, error) {
|
||||||
|
delete(inMemoryLists, listType)
|
||||||
|
count, err := provider.countIPListEntries(listType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if count < ipListMemoryLimit {
|
||||||
|
providerLog(logger.LevelInfo, "using in-memory matching for list type %d, num entries: %d", listType, count)
|
||||||
|
entries, err := provider.getIPListEntries(listType, "", "", OrderASC, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ipList := &IPList{
|
||||||
|
listType: listType,
|
||||||
|
Ranges: cidranger.NewPCTrieRanger(),
|
||||||
|
}
|
||||||
|
for idx := range entries {
|
||||||
|
e := entries[idx]
|
||||||
|
entry, err := e.getAsRangerEntry()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to get ranger for entry %q: %w", e.IPOrNet, err)
|
||||||
|
}
|
||||||
|
if err := ipList.Ranges.Insert(entry); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to add ranger for entry %q: %w", e.IPOrNet, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ipList.isInMemory.Store(true)
|
||||||
|
inMemoryLists[listType] = ipList
|
||||||
|
|
||||||
|
return ipList, nil
|
||||||
|
}
|
||||||
|
providerLog(logger.LevelInfo, "list type %d has %d entries, in-memory matching disabled", listType, count)
|
||||||
|
ipList := &IPList{
|
||||||
|
listType: listType,
|
||||||
|
Ranges: nil,
|
||||||
|
}
|
||||||
|
ipList.isInMemory.Store(false)
|
||||||
|
return ipList, nil
|
||||||
|
}
|
|
@ -15,9 +15,11 @@
|
||||||
package dataprovider
|
package dataprovider
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/netip"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -74,6 +76,10 @@ type memoryProviderHandle struct {
|
||||||
roles map[string]Role
|
roles map[string]Role
|
||||||
// slice with ordered roles
|
// slice with ordered roles
|
||||||
roleNames []string
|
roleNames []string
|
||||||
|
// map for IP List entry
|
||||||
|
ipListEntries map[string]IPListEntry
|
||||||
|
// slice with ordered IP list entries
|
||||||
|
ipListEntriesKeys []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// MemoryProvider defines the auth provider for a memory store
|
// MemoryProvider defines the auth provider for a memory store
|
||||||
|
@ -110,6 +116,8 @@ func initializeMemoryProvider(basePath string) {
|
||||||
rulesNames: []string{},
|
rulesNames: []string{},
|
||||||
roles: map[string]Role{},
|
roles: map[string]Role{},
|
||||||
roleNames: []string{},
|
roleNames: []string{},
|
||||||
|
ipListEntries: map[string]IPListEntry{},
|
||||||
|
ipListEntriesKeys: []string{},
|
||||||
configFile: configFile,
|
configFile: configFile,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -670,6 +678,13 @@ func (p *MemoryProvider) roleExistsInternal(name string) (Role, error) {
|
||||||
return Role{}, util.NewRecordNotFoundError(fmt.Sprintf("role %q does not exist", name))
|
return Role{}, util.NewRecordNotFoundError(fmt.Sprintf("role %q does not exist", name))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *MemoryProvider) ipListEntryExistsInternal(entry *IPListEntry) (IPListEntry, error) {
|
||||||
|
if val, ok := p.dbHandle.ipListEntries[entry.getKey()]; ok {
|
||||||
|
return val.getACopy(), nil
|
||||||
|
}
|
||||||
|
return IPListEntry{}, util.NewRecordNotFoundError(fmt.Sprintf("IP list entry %q does not exist", entry.getName()))
|
||||||
|
}
|
||||||
|
|
||||||
func (p *MemoryProvider) addAdmin(admin *Admin) error {
|
func (p *MemoryProvider) addAdmin(admin *Admin) error {
|
||||||
p.dbHandle.Lock()
|
p.dbHandle.Lock()
|
||||||
defer p.dbHandle.Unlock()
|
defer p.dbHandle.Unlock()
|
||||||
|
@ -2590,6 +2605,198 @@ func (p *MemoryProvider) dumpRoles() ([]Role, error) {
|
||||||
return roles, nil
|
return roles, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *MemoryProvider) ipListEntryExists(ipOrNet string, listType IPListType) (IPListEntry, error) {
|
||||||
|
p.dbHandle.Lock()
|
||||||
|
defer p.dbHandle.Unlock()
|
||||||
|
if p.dbHandle.isClosed {
|
||||||
|
return IPListEntry{}, errMemoryProviderClosed
|
||||||
|
}
|
||||||
|
entry, err := p.ipListEntryExistsInternal(&IPListEntry{IPOrNet: ipOrNet, Type: listType})
|
||||||
|
if err != nil {
|
||||||
|
return entry, err
|
||||||
|
}
|
||||||
|
entry.PrepareForRendering()
|
||||||
|
return entry, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MemoryProvider) addIPListEntry(entry *IPListEntry) error {
|
||||||
|
if err := entry.validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.dbHandle.Lock()
|
||||||
|
defer p.dbHandle.Unlock()
|
||||||
|
if p.dbHandle.isClosed {
|
||||||
|
return errMemoryProviderClosed
|
||||||
|
}
|
||||||
|
_, err := p.ipListEntryExistsInternal(entry)
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("entry %q already exists", entry.IPOrNet)
|
||||||
|
}
|
||||||
|
entry.CreatedAt = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||||
|
entry.UpdatedAt = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||||
|
p.dbHandle.ipListEntries[entry.getKey()] = entry.getACopy()
|
||||||
|
p.dbHandle.ipListEntriesKeys = append(p.dbHandle.ipListEntriesKeys, entry.getKey())
|
||||||
|
sort.Strings(p.dbHandle.ipListEntriesKeys)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MemoryProvider) updateIPListEntry(entry *IPListEntry) error {
|
||||||
|
if err := entry.validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.dbHandle.Lock()
|
||||||
|
defer p.dbHandle.Unlock()
|
||||||
|
if p.dbHandle.isClosed {
|
||||||
|
return errMemoryProviderClosed
|
||||||
|
}
|
||||||
|
oldEntry, err := p.ipListEntryExistsInternal(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
entry.CreatedAt = oldEntry.CreatedAt
|
||||||
|
entry.UpdatedAt = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||||
|
p.dbHandle.ipListEntries[entry.getKey()] = entry.getACopy()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MemoryProvider) deleteIPListEntry(entry IPListEntry, softDelete bool) error {
|
||||||
|
if err := entry.validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.dbHandle.Lock()
|
||||||
|
defer p.dbHandle.Unlock()
|
||||||
|
if p.dbHandle.isClosed {
|
||||||
|
return errMemoryProviderClosed
|
||||||
|
}
|
||||||
|
_, err := p.ipListEntryExistsInternal(&entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
delete(p.dbHandle.ipListEntries, entry.getKey())
|
||||||
|
p.dbHandle.ipListEntriesKeys = make([]string, 0, len(p.dbHandle.ipListEntries))
|
||||||
|
for k := range p.dbHandle.ipListEntries {
|
||||||
|
p.dbHandle.ipListEntriesKeys = append(p.dbHandle.ipListEntriesKeys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(p.dbHandle.ipListEntriesKeys)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MemoryProvider) getIPListEntries(listType IPListType, filter, from, order string, limit int) ([]IPListEntry, error) {
|
||||||
|
p.dbHandle.Lock()
|
||||||
|
defer p.dbHandle.Unlock()
|
||||||
|
|
||||||
|
if p.dbHandle.isClosed {
|
||||||
|
return nil, errMemoryProviderClosed
|
||||||
|
}
|
||||||
|
entries := make([]IPListEntry, 0, 15)
|
||||||
|
if order == OrderASC {
|
||||||
|
for _, k := range p.dbHandle.ipListEntriesKeys {
|
||||||
|
e := p.dbHandle.ipListEntries[k]
|
||||||
|
if e.Type == listType && e.satisfySearchConstraints(filter, from, order) {
|
||||||
|
entry := e.getACopy()
|
||||||
|
entry.PrepareForRendering()
|
||||||
|
entries = append(entries, entry)
|
||||||
|
if limit > 0 && len(entries) >= limit {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for i := len(p.dbHandle.ipListEntriesKeys) - 1; i >= 0; i-- {
|
||||||
|
e := p.dbHandle.ipListEntries[p.dbHandle.ipListEntriesKeys[i]]
|
||||||
|
if e.Type == listType && e.satisfySearchConstraints(filter, from, order) {
|
||||||
|
entry := e.getACopy()
|
||||||
|
entry.PrepareForRendering()
|
||||||
|
entries = append(entries, entry)
|
||||||
|
if limit > 0 && len(entries) >= limit {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MemoryProvider) getRecentlyUpdatedIPListEntries(after int64) ([]IPListEntry, error) {
|
||||||
|
return nil, ErrNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MemoryProvider) dumpIPListEntries() ([]IPListEntry, error) {
|
||||||
|
p.dbHandle.Lock()
|
||||||
|
defer p.dbHandle.Unlock()
|
||||||
|
|
||||||
|
if p.dbHandle.isClosed {
|
||||||
|
return nil, errMemoryProviderClosed
|
||||||
|
}
|
||||||
|
if count := len(p.dbHandle.ipListEntriesKeys); count > ipListMemoryLimit {
|
||||||
|
providerLog(logger.LevelInfo, "IP lists excluded from dump, too many entries: %d", count)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
entries := make([]IPListEntry, 0, len(p.dbHandle.ipListEntries))
|
||||||
|
for _, k := range p.dbHandle.ipListEntriesKeys {
|
||||||
|
e := p.dbHandle.ipListEntries[k]
|
||||||
|
entry := e.getACopy()
|
||||||
|
entry.PrepareForRendering()
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MemoryProvider) countIPListEntries(listType IPListType) (int64, error) {
|
||||||
|
p.dbHandle.Lock()
|
||||||
|
defer p.dbHandle.Unlock()
|
||||||
|
|
||||||
|
if p.dbHandle.isClosed {
|
||||||
|
return 0, errMemoryProviderClosed
|
||||||
|
}
|
||||||
|
if listType == 0 {
|
||||||
|
return int64(len(p.dbHandle.ipListEntriesKeys)), nil
|
||||||
|
}
|
||||||
|
var count int64
|
||||||
|
for _, k := range p.dbHandle.ipListEntriesKeys {
|
||||||
|
e := p.dbHandle.ipListEntries[k]
|
||||||
|
if e.Type == listType {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MemoryProvider) getListEntriesForIP(ip string, listType IPListType) ([]IPListEntry, error) {
|
||||||
|
p.dbHandle.Lock()
|
||||||
|
defer p.dbHandle.Unlock()
|
||||||
|
|
||||||
|
if p.dbHandle.isClosed {
|
||||||
|
return nil, errMemoryProviderClosed
|
||||||
|
}
|
||||||
|
entries := make([]IPListEntry, 0, 3)
|
||||||
|
ipAddr, err := netip.ParseAddr(ip)
|
||||||
|
if err != nil {
|
||||||
|
return entries, fmt.Errorf("invalid ip address %s", ip)
|
||||||
|
}
|
||||||
|
var netType int
|
||||||
|
var ipBytes []byte
|
||||||
|
if ipAddr.Is4() || ipAddr.Is4In6() {
|
||||||
|
netType = ipTypeV4
|
||||||
|
as4 := ipAddr.As4()
|
||||||
|
ipBytes = as4[:]
|
||||||
|
} else {
|
||||||
|
netType = ipTypeV6
|
||||||
|
as16 := ipAddr.As16()
|
||||||
|
ipBytes = as16[:]
|
||||||
|
}
|
||||||
|
for _, k := range p.dbHandle.ipListEntriesKeys {
|
||||||
|
e := p.dbHandle.ipListEntries[k]
|
||||||
|
if e.Type == listType && e.IPType == netType && bytes.Compare(ipBytes, e.First) >= 0 && bytes.Compare(ipBytes, e.Last) <= 0 {
|
||||||
|
entry := e.getACopy()
|
||||||
|
entry.PrepareForRendering()
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (p *MemoryProvider) setFirstDownloadTimestamp(username string) error {
|
func (p *MemoryProvider) setFirstDownloadTimestamp(username string) error {
|
||||||
p.dbHandle.Lock()
|
p.dbHandle.Lock()
|
||||||
defer p.dbHandle.Unlock()
|
defer p.dbHandle.Unlock()
|
||||||
|
@ -2720,6 +2927,8 @@ func (p *MemoryProvider) clear() {
|
||||||
p.dbHandle.rulesNames = []string{}
|
p.dbHandle.rulesNames = []string{}
|
||||||
p.dbHandle.roles = map[string]Role{}
|
p.dbHandle.roles = map[string]Role{}
|
||||||
p.dbHandle.roleNames = []string{}
|
p.dbHandle.roleNames = []string{}
|
||||||
|
p.dbHandle.ipListEntries = map[string]IPListEntry{}
|
||||||
|
p.dbHandle.ipListEntriesKeys = []string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *MemoryProvider) reloadConfig() error {
|
func (p *MemoryProvider) reloadConfig() error {
|
||||||
|
@ -2738,8 +2947,8 @@ func (p *MemoryProvider) reloadConfig() error {
|
||||||
providerLog(logger.LevelError, "error loading dump: %v", err)
|
providerLog(logger.LevelError, "error loading dump: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if fi.Size() > 10485760 {
|
if fi.Size() > 20971520 {
|
||||||
err = errors.New("dump configuration file is invalid, its size must be <= 10485760 bytes")
|
err = errors.New("dump configuration file is invalid, its size must be <= 20971520 bytes")
|
||||||
providerLog(logger.LevelError, "error loading dump: %v", err)
|
providerLog(logger.LevelError, "error loading dump: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -2753,12 +2962,16 @@ func (p *MemoryProvider) reloadConfig() error {
|
||||||
providerLog(logger.LevelError, "error loading dump: %v", err)
|
providerLog(logger.LevelError, "error loading dump: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return p.restoreDump(dump)
|
return p.restoreDump(&dump)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *MemoryProvider) restoreDump(dump BackupData) error {
|
func (p *MemoryProvider) restoreDump(dump *BackupData) error {
|
||||||
p.clear()
|
p.clear()
|
||||||
|
|
||||||
|
if err := p.restoreIPListEntries(*dump); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if err := p.restoreRoles(dump); err != nil {
|
if err := p.restoreRoles(dump); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -2799,10 +3012,10 @@ func (p *MemoryProvider) restoreDump(dump BackupData) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *MemoryProvider) restoreEventActions(dump BackupData) error {
|
func (p *MemoryProvider) restoreEventActions(dump *BackupData) error {
|
||||||
for _, action := range dump.EventActions {
|
for idx := range dump.EventActions {
|
||||||
|
action := dump.EventActions[idx]
|
||||||
a, err := p.eventActionExists(action.Name)
|
a, err := p.eventActionExists(action.Name)
|
||||||
action := action // pin
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
action.ID = a.ID
|
action.ID = a.ID
|
||||||
err = UpdateEventAction(&action, ActionExecutorSystem, "", "")
|
err = UpdateEventAction(&action, ActionExecutorSystem, "", "")
|
||||||
|
@ -2821,10 +3034,10 @@ func (p *MemoryProvider) restoreEventActions(dump BackupData) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *MemoryProvider) restoreEventRules(dump BackupData) error {
|
func (p *MemoryProvider) restoreEventRules(dump *BackupData) error {
|
||||||
for _, rule := range dump.EventRules {
|
for idx := range dump.EventRules {
|
||||||
|
rule := dump.EventRules[idx]
|
||||||
r, err := p.eventRuleExists(rule.Name)
|
r, err := p.eventRuleExists(rule.Name)
|
||||||
rule := rule // pin
|
|
||||||
if dump.Version < 15 {
|
if dump.Version < 15 {
|
||||||
rule.Status = 1
|
rule.Status = 1
|
||||||
}
|
}
|
||||||
|
@ -2846,10 +3059,10 @@ func (p *MemoryProvider) restoreEventRules(dump BackupData) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *MemoryProvider) restoreShares(dump BackupData) error {
|
func (p *MemoryProvider) restoreShares(dump *BackupData) error {
|
||||||
for _, share := range dump.Shares {
|
for idx := range dump.Shares {
|
||||||
|
share := dump.Shares[idx]
|
||||||
s, err := p.shareExists(share.ShareID, "")
|
s, err := p.shareExists(share.ShareID, "")
|
||||||
share := share // pin
|
|
||||||
share.IsRestore = true
|
share.IsRestore = true
|
||||||
if err == nil {
|
if err == nil {
|
||||||
share.ID = s.ID
|
share.ID = s.ID
|
||||||
|
@ -2869,13 +3082,13 @@ func (p *MemoryProvider) restoreShares(dump BackupData) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *MemoryProvider) restoreAPIKeys(dump BackupData) error {
|
func (p *MemoryProvider) restoreAPIKeys(dump *BackupData) error {
|
||||||
for _, apiKey := range dump.APIKeys {
|
for idx := range dump.APIKeys {
|
||||||
|
apiKey := dump.APIKeys[idx]
|
||||||
if apiKey.Key == "" {
|
if apiKey.Key == "" {
|
||||||
return fmt.Errorf("cannot restore an empty API key: %+v", apiKey)
|
return fmt.Errorf("cannot restore an empty API key: %+v", apiKey)
|
||||||
}
|
}
|
||||||
k, err := p.apiKeyExists(apiKey.KeyID)
|
k, err := p.apiKeyExists(apiKey.KeyID)
|
||||||
apiKey := apiKey // pin
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
apiKey.ID = k.ID
|
apiKey.ID = k.ID
|
||||||
err = UpdateAPIKey(&apiKey, ActionExecutorSystem, "", "")
|
err = UpdateAPIKey(&apiKey, ActionExecutorSystem, "", "")
|
||||||
|
@ -2894,9 +3107,9 @@ func (p *MemoryProvider) restoreAPIKeys(dump BackupData) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *MemoryProvider) restoreAdmins(dump BackupData) error {
|
func (p *MemoryProvider) restoreAdmins(dump *BackupData) error {
|
||||||
for _, admin := range dump.Admins {
|
for idx := range dump.Admins {
|
||||||
admin := admin // pin
|
admin := dump.Admins[idx]
|
||||||
admin.Username = config.convertName(admin.Username)
|
admin.Username = config.convertName(admin.Username)
|
||||||
a, err := p.adminExists(admin.Username)
|
a, err := p.adminExists(admin.Username)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -2917,9 +3130,30 @@ func (p *MemoryProvider) restoreAdmins(dump BackupData) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *MemoryProvider) restoreRoles(dump BackupData) error {
|
func (p *MemoryProvider) restoreIPListEntries(dump BackupData) error {
|
||||||
for _, role := range dump.Roles {
|
for idx := range dump.IPLists {
|
||||||
role := role // pin
|
entry := dump.IPLists[idx]
|
||||||
|
_, err := p.ipListEntryExists(entry.IPOrNet, entry.Type)
|
||||||
|
if err == nil {
|
||||||
|
err = UpdateIPListEntry(&entry, ActionExecutorSystem, "", "")
|
||||||
|
if err != nil {
|
||||||
|
providerLog(logger.LevelError, "error updating IP list entry %q: %v", entry.getName(), err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = AddIPListEntry(&entry, ActionExecutorSystem, "", "")
|
||||||
|
if err != nil {
|
||||||
|
providerLog(logger.LevelError, "error adding IP list entry %q: %v", entry.getName(), err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MemoryProvider) restoreRoles(dump *BackupData) error {
|
||||||
|
for idx := range dump.Roles {
|
||||||
|
role := dump.Roles[idx]
|
||||||
role.Name = config.convertName(role.Name)
|
role.Name = config.convertName(role.Name)
|
||||||
r, err := p.roleExists(role.Name)
|
r, err := p.roleExists(role.Name)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -2942,9 +3176,9 @@ func (p *MemoryProvider) restoreRoles(dump BackupData) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *MemoryProvider) restoreGroups(dump BackupData) error {
|
func (p *MemoryProvider) restoreGroups(dump *BackupData) error {
|
||||||
for _, group := range dump.Groups {
|
for idx := range dump.Groups {
|
||||||
group := group // pin
|
group := dump.Groups[idx]
|
||||||
group.Name = config.convertName(group.Name)
|
group.Name = config.convertName(group.Name)
|
||||||
g, err := p.groupExists(group.Name)
|
g, err := p.groupExists(group.Name)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -2966,9 +3200,9 @@ func (p *MemoryProvider) restoreGroups(dump BackupData) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *MemoryProvider) restoreFolders(dump BackupData) error {
|
func (p *MemoryProvider) restoreFolders(dump *BackupData) error {
|
||||||
for _, folder := range dump.Folders {
|
for idx := range dump.Folders {
|
||||||
folder := folder // pin
|
folder := dump.Folders[idx]
|
||||||
folder.Name = config.convertName(folder.Name)
|
folder.Name = config.convertName(folder.Name)
|
||||||
f, err := p.getFolderByName(folder.Name)
|
f, err := p.getFolderByName(folder.Name)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -2990,9 +3224,9 @@ func (p *MemoryProvider) restoreFolders(dump BackupData) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *MemoryProvider) restoreUsers(dump BackupData) error {
|
func (p *MemoryProvider) restoreUsers(dump *BackupData) error {
|
||||||
for _, user := range dump.Users {
|
for idx := range dump.Users {
|
||||||
user := user // pin
|
user := dump.Users[idx]
|
||||||
user.Username = config.convertName(user.Username)
|
user.Username = config.convertName(user.Username)
|
||||||
u, err := p.userExists(user.Username, "")
|
u, err := p.userExists(user.Username, "")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|
|
@ -58,6 +58,7 @@ const (
|
||||||
"DROP TABLE IF EXISTS `{{tasks}}` CASCADE;" +
|
"DROP TABLE IF EXISTS `{{tasks}}` CASCADE;" +
|
||||||
"DROP TABLE IF EXISTS `{{nodes}}` CASCADE;" +
|
"DROP TABLE IF EXISTS `{{nodes}}` CASCADE;" +
|
||||||
"DROP TABLE IF EXISTS `{{roles}}` CASCADE;" +
|
"DROP TABLE IF EXISTS `{{roles}}` CASCADE;" +
|
||||||
|
"DROP TABLE IF EXISTS `{{ip_lists}}` CASCADE;" +
|
||||||
"DROP TABLE IF EXISTS `{{schema_version}}` CASCADE;"
|
"DROP TABLE IF EXISTS `{{schema_version}}` CASCADE;"
|
||||||
mysqlInitialSQL = "CREATE TABLE `{{schema_version}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `version` integer NOT NULL);" +
|
mysqlInitialSQL = "CREATE TABLE `{{schema_version}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `version` integer NOT NULL);" +
|
||||||
"CREATE TABLE `{{admins}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `username` varchar(255) NOT NULL UNIQUE, " +
|
"CREATE TABLE `{{admins}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `username` varchar(255) NOT NULL UNIQUE, " +
|
||||||
|
@ -189,6 +190,18 @@ const (
|
||||||
mysqlV26SQL = "ALTER TABLE `{{events_rules}}` ADD COLUMN `status` integer DEFAULT 1 NOT NULL; " +
|
mysqlV26SQL = "ALTER TABLE `{{events_rules}}` ADD COLUMN `status` integer DEFAULT 1 NOT NULL; " +
|
||||||
"ALTER TABLE `{{events_rules}}` ALTER COLUMN `status` DROP DEFAULT; "
|
"ALTER TABLE `{{events_rules}}` ALTER COLUMN `status` DROP DEFAULT; "
|
||||||
mysqlV26DownSQL = "ALTER TABLE `{{events_rules}}` DROP COLUMN `status`; "
|
mysqlV26DownSQL = "ALTER TABLE `{{events_rules}}` DROP COLUMN `status`; "
|
||||||
|
mysqlV27SQL = "CREATE TABLE `{{ip_lists}}` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, `type` integer NOT NULL, " +
|
||||||
|
"`ipornet` varchar(50) NOT NULL, `mode` integer NOT NULL, `description` varchar(512) NULL, " +
|
||||||
|
"`first` VARBINARY(16) NOT NULL, `last` VARBINARY(16) NOT NULL, `ip_type` integer NOT NULL, `protocols` integer NOT NULL, " +
|
||||||
|
"`created_at` bigint NOT NULL, `updated_at` bigint NOT NULL, `deleted_at` bigint NOT NULL);" +
|
||||||
|
"ALTER TABLE `{{ip_lists}}` ADD CONSTRAINT `{{prefix}}unique_ipornet_type_mapping` UNIQUE (`type`, `ipornet`);" +
|
||||||
|
"CREATE INDEX `{{prefix}}ip_lists_type_idx` ON `{{ip_lists}}` (`type`);" +
|
||||||
|
"CREATE INDEX `{{prefix}}ip_lists_ipornet_idx` ON `{{ip_lists}}` (`ipornet`);" +
|
||||||
|
"CREATE INDEX `{{prefix}}ip_lists_ip_type_idx` ON `{{ip_lists}}` (`ip_type`);" +
|
||||||
|
"CREATE INDEX `{{prefix}}ip_lists_updated_at_idx` ON `{{ip_lists}}` (`updated_at`);" +
|
||||||
|
"CREATE INDEX `{{prefix}}ip_lists_deleted_at_idx` ON `{{ip_lists}}` (`deleted_at`);" +
|
||||||
|
"CREATE INDEX `{{prefix}}ip_lists_first_last_idx` ON `{{ip_lists}}` (`first`, `last`);"
|
||||||
|
mysqlV27DownSQL = "DROP TABLE `{{ip_lists}}` CASCADE;"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MySQLProvider defines the auth provider for MySQL/MariaDB database
|
// MySQLProvider defines the auth provider for MySQL/MariaDB database
|
||||||
|
@ -696,6 +709,42 @@ func (p *MySQLProvider) dumpRoles() ([]Role, error) {
|
||||||
return sqlCommonDumpRoles(p.dbHandle)
|
return sqlCommonDumpRoles(p.dbHandle)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *MySQLProvider) ipListEntryExists(ipOrNet string, listType IPListType) (IPListEntry, error) {
|
||||||
|
return sqlCommonGetIPListEntry(ipOrNet, listType, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MySQLProvider) addIPListEntry(entry *IPListEntry) error {
|
||||||
|
return sqlCommonAddIPListEntry(entry, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MySQLProvider) updateIPListEntry(entry *IPListEntry) error {
|
||||||
|
return sqlCommonUpdateIPListEntry(entry, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MySQLProvider) deleteIPListEntry(entry IPListEntry, softDelete bool) error {
|
||||||
|
return sqlCommonDeleteIPListEntry(entry, softDelete, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MySQLProvider) getIPListEntries(listType IPListType, filter, from, order string, limit int) ([]IPListEntry, error) {
|
||||||
|
return sqlCommonGetIPListEntries(listType, filter, from, order, limit, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MySQLProvider) getRecentlyUpdatedIPListEntries(after int64) ([]IPListEntry, error) {
|
||||||
|
return sqlCommonGetRecentlyUpdatedIPListEntries(after, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MySQLProvider) dumpIPListEntries() ([]IPListEntry, error) {
|
||||||
|
return sqlCommonDumpIPListEntries(p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MySQLProvider) countIPListEntries(listType IPListType) (int64, error) {
|
||||||
|
return sqlCommonCountIPListEntries(listType, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *MySQLProvider) getListEntriesForIP(ip string, listType IPListType) ([]IPListEntry, error) {
|
||||||
|
return sqlCommonGetListEntriesForIP(ip, listType, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
func (p *MySQLProvider) setFirstDownloadTimestamp(username string) error {
|
func (p *MySQLProvider) setFirstDownloadTimestamp(username string) error {
|
||||||
return sqlCommonSetFirstDownloadTimestamp(username, p.dbHandle)
|
return sqlCommonSetFirstDownloadTimestamp(username, p.dbHandle)
|
||||||
}
|
}
|
||||||
|
@ -749,6 +798,8 @@ func (p *MySQLProvider) migrateDatabase() error { //nolint:dupl
|
||||||
return updateMySQLDatabaseFromV24(p.dbHandle)
|
return updateMySQLDatabaseFromV24(p.dbHandle)
|
||||||
case version == 25:
|
case version == 25:
|
||||||
return updateMySQLDatabaseFromV25(p.dbHandle)
|
return updateMySQLDatabaseFromV25(p.dbHandle)
|
||||||
|
case version == 26:
|
||||||
|
return updateMySQLDatabaseFromV26(p.dbHandle)
|
||||||
default:
|
default:
|
||||||
if version > sqlDatabaseVersion {
|
if version > sqlDatabaseVersion {
|
||||||
providerLog(logger.LevelError, "database schema version %d is newer than the supported one: %d", version,
|
providerLog(logger.LevelError, "database schema version %d is newer than the supported one: %d", version,
|
||||||
|
@ -777,6 +828,8 @@ func (p *MySQLProvider) revertDatabase(targetVersion int) error {
|
||||||
return downgradeMySQLDatabaseFromV25(p.dbHandle)
|
return downgradeMySQLDatabaseFromV25(p.dbHandle)
|
||||||
case 26:
|
case 26:
|
||||||
return downgradeMySQLDatabaseFromV26(p.dbHandle)
|
return downgradeMySQLDatabaseFromV26(p.dbHandle)
|
||||||
|
case 27:
|
||||||
|
return downgradeMySQLDatabaseFromV27(p.dbHandle)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("database schema version not handled: %d", dbVersion.Version)
|
return fmt.Errorf("database schema version not handled: %d", dbVersion.Version)
|
||||||
}
|
}
|
||||||
|
@ -802,7 +855,14 @@ func updateMySQLDatabaseFromV24(dbHandle *sql.DB) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateMySQLDatabaseFromV25(dbHandle *sql.DB) error {
|
func updateMySQLDatabaseFromV25(dbHandle *sql.DB) error {
|
||||||
return updateMySQLDatabaseFrom25To26(dbHandle)
|
if err := updateMySQLDatabaseFrom25To26(dbHandle); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return updateMySQLDatabaseFromV26(dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateMySQLDatabaseFromV26(dbHandle *sql.DB) error {
|
||||||
|
return updateMySQLDatabaseFrom26To27(dbHandle)
|
||||||
}
|
}
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFromV24(dbHandle *sql.DB) error {
|
func downgradeMySQLDatabaseFromV24(dbHandle *sql.DB) error {
|
||||||
|
@ -823,6 +883,13 @@ func downgradeMySQLDatabaseFromV26(dbHandle *sql.DB) error {
|
||||||
return downgradeMySQLDatabaseFromV25(dbHandle)
|
return downgradeMySQLDatabaseFromV25(dbHandle)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func downgradeMySQLDatabaseFromV27(dbHandle *sql.DB) error {
|
||||||
|
if err := downgradeMySQLDatabaseFrom27To26(dbHandle); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return downgradeMySQLDatabaseFromV26(dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
func updateMySQLDatabaseFrom23To24(dbHandle *sql.DB) error {
|
func updateMySQLDatabaseFrom23To24(dbHandle *sql.DB) error {
|
||||||
logger.InfoToConsole("updating database schema version: 23 -> 24")
|
logger.InfoToConsole("updating database schema version: 23 -> 24")
|
||||||
providerLog(logger.LevelInfo, "updating database schema version: 23 -> 24")
|
providerLog(logger.LevelInfo, "updating database schema version: 23 -> 24")
|
||||||
|
@ -847,6 +914,14 @@ func updateMySQLDatabaseFrom25To26(dbHandle *sql.DB) error {
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 26, true)
|
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 26, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func updateMySQLDatabaseFrom26To27(dbHandle *sql.DB) error {
|
||||||
|
logger.InfoToConsole("updating database schema version: 26 -> 27")
|
||||||
|
providerLog(logger.LevelInfo, "updating database schema version: 26 -> 27")
|
||||||
|
sql := strings.ReplaceAll(mysqlV27SQL, "{{ip_lists}}", sqlTableIPLists)
|
||||||
|
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
||||||
|
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 27, true)
|
||||||
|
}
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFrom24To23(dbHandle *sql.DB) error {
|
func downgradeMySQLDatabaseFrom24To23(dbHandle *sql.DB) error {
|
||||||
logger.InfoToConsole("downgrading database schema version: 24 -> 23")
|
logger.InfoToConsole("downgrading database schema version: 24 -> 23")
|
||||||
providerLog(logger.LevelInfo, "downgrading database schema version: 24 -> 23")
|
providerLog(logger.LevelInfo, "downgrading database schema version: 24 -> 23")
|
||||||
|
@ -870,3 +945,10 @@ func downgradeMySQLDatabaseFrom26To25(dbHandle *sql.DB) error {
|
||||||
sql := strings.ReplaceAll(mysqlV26DownSQL, "{{events_rules}}", sqlTableEventsRules)
|
sql := strings.ReplaceAll(mysqlV26DownSQL, "{{events_rules}}", sqlTableEventsRules)
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 25, false)
|
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 25, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func downgradeMySQLDatabaseFrom27To26(dbHandle *sql.DB) error {
|
||||||
|
logger.InfoToConsole("downgrading database schema version: 27 -> 26")
|
||||||
|
providerLog(logger.LevelInfo, "downgrading database schema version: 27 -> 26")
|
||||||
|
sql := strings.ReplaceAll(mysqlV27DownSQL, "{{ip_lists}}", sqlTableIPLists)
|
||||||
|
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 26, false)
|
||||||
|
}
|
||||||
|
|
|
@ -56,6 +56,7 @@ DROP TABLE IF EXISTS "{{events_rules}}" CASCADE;
|
||||||
DROP TABLE IF EXISTS "{{tasks}}" CASCADE;
|
DROP TABLE IF EXISTS "{{tasks}}" CASCADE;
|
||||||
DROP TABLE IF EXISTS "{{nodes}}" CASCADE;
|
DROP TABLE IF EXISTS "{{nodes}}" CASCADE;
|
||||||
DROP TABLE IF EXISTS "{{roles}}" CASCADE;
|
DROP TABLE IF EXISTS "{{roles}}" CASCADE;
|
||||||
|
DROP TABLE IF EXISTS "{{ip_lists}}" CASCADE;
|
||||||
DROP TABLE IF EXISTS "{{schema_version}}" CASCADE;
|
DROP TABLE IF EXISTS "{{schema_version}}" CASCADE;
|
||||||
`
|
`
|
||||||
pgsqlInitial = `CREATE TABLE "{{schema_version}}" ("id" serial NOT NULL PRIMARY KEY, "version" integer NOT NULL);
|
pgsqlInitial = `CREATE TABLE "{{schema_version}}" ("id" serial NOT NULL PRIMARY KEY, "version" integer NOT NULL);
|
||||||
|
@ -202,6 +203,19 @@ ALTER TABLE "{{users}}" ALTER COLUMN "last_password_change" DROP DEFAULT;
|
||||||
ALTER TABLE "{{events_rules}}" ALTER COLUMN "status" DROP DEFAULT;
|
ALTER TABLE "{{events_rules}}" ALTER COLUMN "status" DROP DEFAULT;
|
||||||
`
|
`
|
||||||
pgsqlV26DownSQL = `ALTER TABLE "{{events_rules}}" DROP COLUMN "status" CASCADE;`
|
pgsqlV26DownSQL = `ALTER TABLE "{{events_rules}}" DROP COLUMN "status" CASCADE;`
|
||||||
|
pgsqlV27SQL = `CREATE TABLE "{{ip_lists}}" ("id" bigserial NOT NULL PRIMARY KEY, "type" integer NOT NULL,
|
||||||
|
"ipornet" varchar(50) NOT NULL, "mode" integer NOT NULL, "description" varchar(512) NULL, "first" inet NOT NULL,
|
||||||
|
"last" inet NOT NULL, "ip_type" integer NOT NULL, "protocols" integer NOT NULL, "created_at" bigint NOT NULL,
|
||||||
|
"updated_at" bigint NOT NULL, "deleted_at" bigint NOT NULL);
|
||||||
|
ALTER TABLE "{{ip_lists}}" ADD CONSTRAINT "{{prefix}}unique_ipornet_type_mapping" UNIQUE ("type", "ipornet");
|
||||||
|
CREATE INDEX "{{prefix}}ip_lists_type_idx" ON "{{ip_lists}}" ("type");
|
||||||
|
CREATE INDEX "{{prefix}}ip_lists_ipornet_idx" ON "{{ip_lists}}" ("ipornet");
|
||||||
|
CREATE INDEX "{{prefix}}ip_lists_ipornet_like_idx" ON "{{ip_lists}}" ("ipornet" varchar_pattern_ops);
|
||||||
|
CREATE INDEX "{{prefix}}ip_lists_updated_at_idx" ON "{{ip_lists}}" ("updated_at");
|
||||||
|
CREATE INDEX "{{prefix}}ip_lists_deleted_at_idx" ON "{{ip_lists}}" ("deleted_at");
|
||||||
|
CREATE INDEX "{{prefix}}ip_lists_first_last_idx" ON "{{ip_lists}}" ("first", "last");
|
||||||
|
`
|
||||||
|
pgsqlV27DownSQL = `DROP TABLE "{{ip_lists}}" CASCADE;`
|
||||||
)
|
)
|
||||||
|
|
||||||
// PGSQLProvider defines the auth provider for PostgreSQL database
|
// PGSQLProvider defines the auth provider for PostgreSQL database
|
||||||
|
@ -668,6 +682,42 @@ func (p *PGSQLProvider) dumpRoles() ([]Role, error) {
|
||||||
return sqlCommonDumpRoles(p.dbHandle)
|
return sqlCommonDumpRoles(p.dbHandle)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *PGSQLProvider) ipListEntryExists(ipOrNet string, listType IPListType) (IPListEntry, error) {
|
||||||
|
return sqlCommonGetIPListEntry(ipOrNet, listType, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PGSQLProvider) addIPListEntry(entry *IPListEntry) error {
|
||||||
|
return sqlCommonAddIPListEntry(entry, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PGSQLProvider) updateIPListEntry(entry *IPListEntry) error {
|
||||||
|
return sqlCommonUpdateIPListEntry(entry, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PGSQLProvider) deleteIPListEntry(entry IPListEntry, softDelete bool) error {
|
||||||
|
return sqlCommonDeleteIPListEntry(entry, softDelete, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PGSQLProvider) getIPListEntries(listType IPListType, filter, from, order string, limit int) ([]IPListEntry, error) {
|
||||||
|
return sqlCommonGetIPListEntries(listType, filter, from, order, limit, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PGSQLProvider) getRecentlyUpdatedIPListEntries(after int64) ([]IPListEntry, error) {
|
||||||
|
return sqlCommonGetRecentlyUpdatedIPListEntries(after, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PGSQLProvider) dumpIPListEntries() ([]IPListEntry, error) {
|
||||||
|
return sqlCommonDumpIPListEntries(p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PGSQLProvider) countIPListEntries(listType IPListType) (int64, error) {
|
||||||
|
return sqlCommonCountIPListEntries(listType, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PGSQLProvider) getListEntriesForIP(ip string, listType IPListType) ([]IPListEntry, error) {
|
||||||
|
return sqlCommonGetListEntriesForIP(ip, listType, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
func (p *PGSQLProvider) setFirstDownloadTimestamp(username string) error {
|
func (p *PGSQLProvider) setFirstDownloadTimestamp(username string) error {
|
||||||
return sqlCommonSetFirstDownloadTimestamp(username, p.dbHandle)
|
return sqlCommonSetFirstDownloadTimestamp(username, p.dbHandle)
|
||||||
}
|
}
|
||||||
|
@ -721,6 +771,8 @@ func (p *PGSQLProvider) migrateDatabase() error { //nolint:dupl
|
||||||
return updatePgSQLDatabaseFromV24(p.dbHandle)
|
return updatePgSQLDatabaseFromV24(p.dbHandle)
|
||||||
case version == 25:
|
case version == 25:
|
||||||
return updatePgSQLDatabaseFromV25(p.dbHandle)
|
return updatePgSQLDatabaseFromV25(p.dbHandle)
|
||||||
|
case version == 26:
|
||||||
|
return updatePgSQLDatabaseFromV26(p.dbHandle)
|
||||||
default:
|
default:
|
||||||
if version > sqlDatabaseVersion {
|
if version > sqlDatabaseVersion {
|
||||||
providerLog(logger.LevelError, "database schema version %d is newer than the supported one: %d", version,
|
providerLog(logger.LevelError, "database schema version %d is newer than the supported one: %d", version,
|
||||||
|
@ -749,6 +801,8 @@ func (p *PGSQLProvider) revertDatabase(targetVersion int) error {
|
||||||
return downgradePgSQLDatabaseFromV25(p.dbHandle)
|
return downgradePgSQLDatabaseFromV25(p.dbHandle)
|
||||||
case 26:
|
case 26:
|
||||||
return downgradePgSQLDatabaseFromV26(p.dbHandle)
|
return downgradePgSQLDatabaseFromV26(p.dbHandle)
|
||||||
|
case 27:
|
||||||
|
return downgradePgSQLDatabaseFromV27(p.dbHandle)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("database schema version not handled: %d", dbVersion.Version)
|
return fmt.Errorf("database schema version not handled: %d", dbVersion.Version)
|
||||||
}
|
}
|
||||||
|
@ -774,7 +828,14 @@ func updatePgSQLDatabaseFromV24(dbHandle *sql.DB) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func updatePgSQLDatabaseFromV25(dbHandle *sql.DB) error {
|
func updatePgSQLDatabaseFromV25(dbHandle *sql.DB) error {
|
||||||
return updatePgSQLDatabaseFrom25To26(dbHandle)
|
if err := updatePgSQLDatabaseFrom25To26(dbHandle); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return updatePgSQLDatabaseFromV26(dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func updatePgSQLDatabaseFromV26(dbHandle *sql.DB) error {
|
||||||
|
return updatePgSQLDatabaseFrom26To27(dbHandle)
|
||||||
}
|
}
|
||||||
|
|
||||||
func downgradePgSQLDatabaseFromV24(dbHandle *sql.DB) error {
|
func downgradePgSQLDatabaseFromV24(dbHandle *sql.DB) error {
|
||||||
|
@ -795,6 +856,13 @@ func downgradePgSQLDatabaseFromV26(dbHandle *sql.DB) error {
|
||||||
return downgradePgSQLDatabaseFromV25(dbHandle)
|
return downgradePgSQLDatabaseFromV25(dbHandle)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func downgradePgSQLDatabaseFromV27(dbHandle *sql.DB) error {
|
||||||
|
if err := downgradePgSQLDatabaseFrom27To26(dbHandle); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return downgradePgSQLDatabaseFromV26(dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
func updatePgSQLDatabaseFrom23To24(dbHandle *sql.DB) error {
|
func updatePgSQLDatabaseFrom23To24(dbHandle *sql.DB) error {
|
||||||
logger.InfoToConsole("updating database schema version: 23 -> 24")
|
logger.InfoToConsole("updating database schema version: 23 -> 24")
|
||||||
providerLog(logger.LevelInfo, "updating database schema version: 23 -> 24")
|
providerLog(logger.LevelInfo, "updating database schema version: 23 -> 24")
|
||||||
|
@ -827,6 +895,18 @@ func updatePgSQLDatabaseFrom25To26(dbHandle *sql.DB) error {
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 26, true)
|
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 26, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func updatePgSQLDatabaseFrom26To27(dbHandle *sql.DB) error {
|
||||||
|
logger.InfoToConsole("updating database schema version: 26 -> 27")
|
||||||
|
providerLog(logger.LevelInfo, "updating database schema version: 26 -> 27")
|
||||||
|
sql := pgsqlV27SQL
|
||||||
|
if config.Driver == CockroachDataProviderName {
|
||||||
|
sql = strings.ReplaceAll(sql, `CREATE INDEX "{{prefix}}ip_lists_ipornet_like_idx" ON "{{ip_lists}}" ("ipornet" varchar_pattern_ops);`, "")
|
||||||
|
}
|
||||||
|
sql = strings.ReplaceAll(sql, "{{ip_lists}}", sqlTableIPLists)
|
||||||
|
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
||||||
|
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 27, true)
|
||||||
|
}
|
||||||
|
|
||||||
func downgradePgSQLDatabaseFrom24To23(dbHandle *sql.DB) error {
|
func downgradePgSQLDatabaseFrom24To23(dbHandle *sql.DB) error {
|
||||||
logger.InfoToConsole("downgrading database schema version: 24 -> 23")
|
logger.InfoToConsole("downgrading database schema version: 24 -> 23")
|
||||||
providerLog(logger.LevelInfo, "downgrading database schema version: 24 -> 23")
|
providerLog(logger.LevelInfo, "downgrading database schema version: 24 -> 23")
|
||||||
|
@ -850,3 +930,10 @@ func downgradePgSQLDatabaseFrom26To25(dbHandle *sql.DB) error {
|
||||||
sql := strings.ReplaceAll(pgsqlV26DownSQL, "{{events_rules}}", sqlTableEventsRules)
|
sql := strings.ReplaceAll(pgsqlV26DownSQL, "{{events_rules}}", sqlTableEventsRules)
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 25, false)
|
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 25, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func downgradePgSQLDatabaseFrom27To26(dbHandle *sql.DB) error {
|
||||||
|
logger.InfoToConsole("downgrading database schema version: 27 -> 26")
|
||||||
|
providerLog(logger.LevelInfo, "downgrading database schema version: 27 -> 26")
|
||||||
|
sql := strings.ReplaceAll(pgsqlV27DownSQL, "{{ip_lists}}", sqlTableIPLists)
|
||||||
|
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 26, false)
|
||||||
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
var (
|
var (
|
||||||
scheduler *cron.Cron
|
scheduler *cron.Cron
|
||||||
lastUserCacheUpdate atomic.Int64
|
lastUserCacheUpdate atomic.Int64
|
||||||
|
lastIPListsCacheUpdate atomic.Int64
|
||||||
// used for bolt and memory providers, so we avoid iterating all users/rules
|
// used for bolt and memory providers, so we avoid iterating all users/rules
|
||||||
// to find recently modified ones
|
// to find recently modified ones
|
||||||
lastUserUpdate atomic.Int64
|
lastUserUpdate atomic.Int64
|
||||||
|
@ -54,9 +55,6 @@ func startScheduler() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if fnReloadRules != nil {
|
|
||||||
fnReloadRules()
|
|
||||||
}
|
|
||||||
if currentNode != nil {
|
if currentNode != nil {
|
||||||
_, err = scheduler.AddFunc("@every 30m", func() {
|
_, err = scheduler.AddFunc("@every 30m", func() {
|
||||||
err := provider.cleanupNodes()
|
err := provider.cleanupNodes()
|
||||||
|
@ -76,6 +74,7 @@ func startScheduler() error {
|
||||||
|
|
||||||
func addScheduledCacheUpdates() error {
|
func addScheduledCacheUpdates() error {
|
||||||
lastUserCacheUpdate.Store(util.GetTimeAsMsSinceEpoch(time.Now()))
|
lastUserCacheUpdate.Store(util.GetTimeAsMsSinceEpoch(time.Now()))
|
||||||
|
lastIPListsCacheUpdate.Store(util.GetTimeAsMsSinceEpoch(time.Now()))
|
||||||
_, err := scheduler.AddFunc("@every 10m", checkCacheUpdates)
|
_, err := scheduler.AddFunc("@every 10m", checkCacheUpdates)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to schedule cache updates: %w", err)
|
return fmt.Errorf("unable to schedule cache updates: %w", err)
|
||||||
|
@ -99,14 +98,24 @@ func checkDataprovider() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkCacheUpdates() {
|
func checkCacheUpdates() {
|
||||||
providerLog(logger.LevelDebug, "start user cache check, update time %v", util.GetTimeFromMsecSinceEpoch(lastUserCacheUpdate.Load()))
|
checkUserCache()
|
||||||
|
checkIPListEntryCache()
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkUserCache() {
|
||||||
|
lastCheck := lastUserCacheUpdate.Load()
|
||||||
|
providerLog(logger.LevelDebug, "start user cache check, update time %v", util.GetTimeFromMsecSinceEpoch(lastCheck))
|
||||||
checkTime := util.GetTimeAsMsSinceEpoch(time.Now())
|
checkTime := util.GetTimeAsMsSinceEpoch(time.Now())
|
||||||
users, err := provider.getRecentlyUpdatedUsers(lastUserCacheUpdate.Load())
|
if config.IsShared == 1 {
|
||||||
|
lastCheck -= 5000
|
||||||
|
}
|
||||||
|
users, err := provider.getRecentlyUpdatedUsers(lastCheck)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
providerLog(logger.LevelError, "unable to get recently updated users: %v", err)
|
providerLog(logger.LevelError, "unable to get recently updated users: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, user := range users {
|
for idx := range users {
|
||||||
|
user := users[idx]
|
||||||
providerLog(logger.LevelDebug, "invalidate caches for user %q", user.Username)
|
providerLog(logger.LevelDebug, "invalidate caches for user %q", user.Username)
|
||||||
if user.DeletedAt > 0 {
|
if user.DeletedAt > 0 {
|
||||||
deletedAt := util.GetTimeFromMsecSinceEpoch(user.DeletedAt)
|
deletedAt := util.GetTimeFromMsecSinceEpoch(user.DeletedAt)
|
||||||
|
@ -121,11 +130,53 @@ func checkCacheUpdates() {
|
||||||
}
|
}
|
||||||
cachedPasswords.Remove(user.Username)
|
cachedPasswords.Remove(user.Username)
|
||||||
}
|
}
|
||||||
|
|
||||||
lastUserCacheUpdate.Store(checkTime)
|
lastUserCacheUpdate.Store(checkTime)
|
||||||
providerLog(logger.LevelDebug, "end user cache check, new update time %v", util.GetTimeFromMsecSinceEpoch(lastUserCacheUpdate.Load()))
|
providerLog(logger.LevelDebug, "end user cache check, new update time %v", util.GetTimeFromMsecSinceEpoch(lastUserCacheUpdate.Load()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func checkIPListEntryCache() {
|
||||||
|
if config.IsShared != 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
hasMemoryLists := false
|
||||||
|
for _, l := range inMemoryLists {
|
||||||
|
if l.isInMemory.Load() {
|
||||||
|
hasMemoryLists = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !hasMemoryLists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
providerLog(logger.LevelDebug, "start IP list cache check, update time %v", util.GetTimeFromMsecSinceEpoch(lastIPListsCacheUpdate.Load()))
|
||||||
|
checkTime := util.GetTimeAsMsSinceEpoch(time.Now())
|
||||||
|
entries, err := provider.getRecentlyUpdatedIPListEntries(lastIPListsCacheUpdate.Load() - 5000)
|
||||||
|
if err != nil {
|
||||||
|
providerLog(logger.LevelError, "unable to get recently updated IP list entries: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for idx := range entries {
|
||||||
|
e := entries[idx]
|
||||||
|
providerLog(logger.LevelDebug, "update cache for IP list entry %q", e.getName())
|
||||||
|
if e.DeletedAt > 0 {
|
||||||
|
deletedAt := util.GetTimeFromMsecSinceEpoch(e.DeletedAt)
|
||||||
|
if deletedAt.Add(30 * time.Minute).Before(time.Now()) {
|
||||||
|
providerLog(logger.LevelDebug, "removing IP list entry %q deleted at %s", e.getName(), deletedAt)
|
||||||
|
go provider.deleteIPListEntry(e, false) //nolint:errcheck
|
||||||
|
}
|
||||||
|
for _, l := range inMemoryLists {
|
||||||
|
l.removeEntry(&e)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, l := range inMemoryLists {
|
||||||
|
l.updateEntry(&e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastIPListsCacheUpdate.Store(checkTime)
|
||||||
|
providerLog(logger.LevelDebug, "end IP list entries cache check, new update time %v", util.GetTimeFromMsecSinceEpoch(lastIPListsCacheUpdate.Load()))
|
||||||
|
}
|
||||||
|
|
||||||
func setLastUserUpdate() {
|
func setLastUserUpdate() {
|
||||||
lastUserUpdate.Store(util.GetTimeAsMsSinceEpoch(time.Now()))
|
lastUserUpdate.Store(util.GetTimeAsMsSinceEpoch(time.Now()))
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/netip"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -34,7 +35,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
sqlDatabaseVersion = 26
|
sqlDatabaseVersion = 27
|
||||||
defaultSQLQueryTimeout = 10 * time.Second
|
defaultSQLQueryTimeout = 10 * time.Second
|
||||||
longSQLQueryTimeout = 60 * time.Second
|
longSQLQueryTimeout = 60 * time.Second
|
||||||
)
|
)
|
||||||
|
@ -79,6 +80,7 @@ func sqlReplaceAll(sql string) string {
|
||||||
sql = strings.ReplaceAll(sql, "{{tasks}}", sqlTableTasks)
|
sql = strings.ReplaceAll(sql, "{{tasks}}", sqlTableTasks)
|
||||||
sql = strings.ReplaceAll(sql, "{{nodes}}", sqlTableNodes)
|
sql = strings.ReplaceAll(sql, "{{nodes}}", sqlTableNodes)
|
||||||
sql = strings.ReplaceAll(sql, "{{roles}}", sqlTableRoles)
|
sql = strings.ReplaceAll(sql, "{{roles}}", sqlTableRoles)
|
||||||
|
sql = strings.ReplaceAll(sql, "{{ip_lists}}", sqlTableIPLists)
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
||||||
return sql
|
return sql
|
||||||
}
|
}
|
||||||
|
@ -538,6 +540,241 @@ func sqlCommonDumpAdmins(dbHandle sqlQuerier) ([]Admin, error) {
|
||||||
return getAdminsWithGroups(ctx, admins, dbHandle)
|
return getAdminsWithGroups(ctx, admins, dbHandle)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sqlCommonGetIPListEntry(ipOrNet string, listType IPListType, dbHandle sqlQuerier) (IPListEntry, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
q := getIPListEntryQuery()
|
||||||
|
row := dbHandle.QueryRowContext(ctx, q, listType, ipOrNet)
|
||||||
|
return getIPListEntryFromDbRow(row)
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqlCommonDumpIPListEntries(dbHandle *sql.DB) ([]IPListEntry, error) {
|
||||||
|
count, err := sqlCommonCountIPListEntries(0, dbHandle)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if count > ipListMemoryLimit {
|
||||||
|
providerLog(logger.LevelInfo, "IP lists excluded from dump, too many entries: %d", count)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
entries := make([]IPListEntry, 0, 100)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
q := getDumpListEntriesQuery()
|
||||||
|
|
||||||
|
rows, err := dbHandle.QueryContext(ctx, q)
|
||||||
|
if err != nil {
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
entry, err := getIPListEntryFromDbRow(rows)
|
||||||
|
if err != nil {
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
return entries, rows.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqlCommonCountIPListEntries(listType IPListType, dbHandle *sql.DB) (int64, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var q string
|
||||||
|
var args []any
|
||||||
|
if listType == 0 {
|
||||||
|
q = getCountAllIPListEntriesQuery()
|
||||||
|
} else {
|
||||||
|
q = getCountIPListEntriesQuery()
|
||||||
|
args = append(args, listType)
|
||||||
|
}
|
||||||
|
var count int64
|
||||||
|
err := dbHandle.QueryRowContext(ctx, q, args...).Scan(&count)
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqlCommonGetIPListEntries(listType IPListType, filter, from, order string, limit int, dbHandle sqlQuerier) ([]IPListEntry, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
q := getIPListEntriesQuery(filter, from, order, limit)
|
||||||
|
args := []any{listType}
|
||||||
|
if from != "" {
|
||||||
|
args = append(args, from)
|
||||||
|
}
|
||||||
|
if filter != "" {
|
||||||
|
args = append(args, filter+"%")
|
||||||
|
}
|
||||||
|
if limit > 0 {
|
||||||
|
args = append(args, limit)
|
||||||
|
}
|
||||||
|
entries := make([]IPListEntry, 0, limit)
|
||||||
|
rows, err := dbHandle.QueryContext(ctx, q, args...)
|
||||||
|
if err != nil {
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
entry, err := getIPListEntryFromDbRow(rows)
|
||||||
|
if err != nil {
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
return entries, rows.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqlCommonGetRecentlyUpdatedIPListEntries(after int64, dbHandle sqlQuerier) ([]IPListEntry, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
q := getRecentlyUpdatedIPListQuery()
|
||||||
|
entries := make([]IPListEntry, 0, 5)
|
||||||
|
rows, err := dbHandle.QueryContext(ctx, q, after)
|
||||||
|
if err != nil {
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
entry, err := getIPListEntryFromDbRow(rows)
|
||||||
|
if err != nil {
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
return entries, rows.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqlCommonGetListEntriesForIP(ip string, listType IPListType, dbHandle sqlQuerier) ([]IPListEntry, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var rows *sql.Rows
|
||||||
|
var err error
|
||||||
|
|
||||||
|
entries := make([]IPListEntry, 0, 2)
|
||||||
|
if config.Driver == PGSQLDataProviderName || config.Driver == CockroachDataProviderName {
|
||||||
|
rows, err = dbHandle.QueryContext(ctx, getIPListEntriesForIPQueryPg(), listType, ip)
|
||||||
|
if err != nil {
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ipAddr, err := netip.ParseAddr(ip)
|
||||||
|
if err != nil {
|
||||||
|
return entries, fmt.Errorf("invalid ip address %s", ip)
|
||||||
|
}
|
||||||
|
var netType int
|
||||||
|
var ipBytes []byte
|
||||||
|
if ipAddr.Is4() || ipAddr.Is4In6() {
|
||||||
|
netType = ipTypeV4
|
||||||
|
as4 := ipAddr.As4()
|
||||||
|
ipBytes = as4[:]
|
||||||
|
} else {
|
||||||
|
netType = ipTypeV6
|
||||||
|
as16 := ipAddr.As16()
|
||||||
|
ipBytes = as16[:]
|
||||||
|
}
|
||||||
|
rows, err = dbHandle.QueryContext(ctx, getIPListEntriesForIPQueryNoPg(), listType, netType, ipBytes)
|
||||||
|
if err != nil {
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
entry, err := getIPListEntryFromDbRow(rows)
|
||||||
|
if err != nil {
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
return entries, rows.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqlCommonAddIPListEntry(entry *IPListEntry, dbHandle *sql.DB) error {
|
||||||
|
if err := entry.validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
q := getAddIPListEntryQuery()
|
||||||
|
first := entry.getFirst()
|
||||||
|
last := entry.getLast()
|
||||||
|
var netType int
|
||||||
|
if first.Is4() {
|
||||||
|
netType = ipTypeV4
|
||||||
|
} else {
|
||||||
|
netType = ipTypeV6
|
||||||
|
}
|
||||||
|
if config.IsShared == 1 {
|
||||||
|
return sqlCommonExecuteTx(ctx, dbHandle, func(tx *sql.Tx) error {
|
||||||
|
_, err := tx.ExecContext(ctx, getRemoveSoftDeletedIPListEntryQuery(), entry.Type, entry.IPOrNet)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if config.Driver == PGSQLDataProviderName || config.Driver == CockroachDataProviderName {
|
||||||
|
_, err = tx.ExecContext(ctx, q, entry.Type, entry.IPOrNet, first.String(), last.String(),
|
||||||
|
netType, entry.Protocols, entry.Description, entry.Mode, util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||||
|
util.GetTimeAsMsSinceEpoch(time.Now()))
|
||||||
|
} else {
|
||||||
|
_, err = tx.ExecContext(ctx, q, entry.Type, entry.IPOrNet, entry.First, entry.Last,
|
||||||
|
netType, entry.Protocols, entry.Description, entry.Mode, util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||||
|
util.GetTimeAsMsSinceEpoch(time.Now()))
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if config.Driver == PGSQLDataProviderName || config.Driver == CockroachDataProviderName {
|
||||||
|
_, err = dbHandle.ExecContext(ctx, q, entry.Type, entry.IPOrNet, first.String(), last.String(),
|
||||||
|
netType, entry.Protocols, entry.Description, entry.Mode, util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||||
|
util.GetTimeAsMsSinceEpoch(time.Now()))
|
||||||
|
} else {
|
||||||
|
_, err = dbHandle.ExecContext(ctx, q, entry.Type, entry.IPOrNet, entry.First, entry.Last,
|
||||||
|
netType, entry.Protocols, entry.Description, entry.Mode, util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||||
|
util.GetTimeAsMsSinceEpoch(time.Now()))
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqlCommonUpdateIPListEntry(entry *IPListEntry, dbHandle *sql.DB) error {
|
||||||
|
if err := entry.validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
q := getUpdateIPListEntryQuery()
|
||||||
|
_, err := dbHandle.ExecContext(ctx, q, entry.Mode, entry.Protocols, entry.Description,
|
||||||
|
util.GetTimeAsMsSinceEpoch(time.Now()), entry.Type, entry.IPOrNet)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqlCommonDeleteIPListEntry(entry IPListEntry, softDelete bool, dbHandle *sql.DB) error {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
q := getDeleteIPListEntryQuery(softDelete)
|
||||||
|
var args []any
|
||||||
|
if softDelete {
|
||||||
|
ts := util.GetTimeAsMsSinceEpoch(time.Now())
|
||||||
|
args = append(args, ts, ts)
|
||||||
|
}
|
||||||
|
args = append(args, entry.Type, entry.IPOrNet)
|
||||||
|
res, err := dbHandle.ExecContext(ctx, q, args...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return sqlCommonRequireRowAffected(res)
|
||||||
|
}
|
||||||
|
|
||||||
func sqlCommonGetRoleByName(name string, dbHandle sqlQuerier) (Role, error) {
|
func sqlCommonGetRoleByName(name string, dbHandle sqlQuerier) (Role, error) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -1872,6 +2109,24 @@ func getEventRuleFromDbRow(row sqlScanner) (EventRule, error) {
|
||||||
return rule, nil
|
return rule, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getIPListEntryFromDbRow(row sqlScanner) (IPListEntry, error) {
|
||||||
|
var entry IPListEntry
|
||||||
|
var description sql.NullString
|
||||||
|
|
||||||
|
err := row.Scan(&entry.Type, &entry.IPOrNet, &entry.Mode, &entry.Protocols, &description,
|
||||||
|
&entry.CreatedAt, &entry.UpdatedAt, &entry.DeletedAt)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return entry, util.NewRecordNotFoundError(err.Error())
|
||||||
|
}
|
||||||
|
return entry, err
|
||||||
|
}
|
||||||
|
if description.Valid {
|
||||||
|
entry.Description = description.String
|
||||||
|
}
|
||||||
|
return entry, err
|
||||||
|
}
|
||||||
|
|
||||||
func getRoleFromDbRow(row sqlScanner) (Role, error) {
|
func getRoleFromDbRow(row sqlScanner) (Role, error) {
|
||||||
var role Role
|
var role Role
|
||||||
var description sql.NullString
|
var description sql.NullString
|
||||||
|
|
|
@ -57,6 +57,7 @@ DROP TABLE IF EXISTS "{{events_rules}}";
|
||||||
DROP TABLE IF EXISTS "{{events_actions}}";
|
DROP TABLE IF EXISTS "{{events_actions}}";
|
||||||
DROP TABLE IF EXISTS "{{tasks}}";
|
DROP TABLE IF EXISTS "{{tasks}}";
|
||||||
DROP TABLE IF EXISTS "{{roles}}";
|
DROP TABLE IF EXISTS "{{roles}}";
|
||||||
|
DROP TABLE IF EXISTS "{{ip_lists}}";
|
||||||
DROP TABLE IF EXISTS "{{schema_version}}";
|
DROP TABLE IF EXISTS "{{schema_version}}";
|
||||||
`
|
`
|
||||||
sqliteInitialSQL = `CREATE TABLE "{{schema_version}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "version" integer NOT NULL);
|
sqliteInitialSQL = `CREATE TABLE "{{schema_version}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "version" integer NOT NULL);
|
||||||
|
@ -179,6 +180,19 @@ DROP TABLE "{{roles}}";
|
||||||
sqliteV25DownSQL = `ALTER TABLE "{{users}}" DROP COLUMN "last_password_change";`
|
sqliteV25DownSQL = `ALTER TABLE "{{users}}" DROP COLUMN "last_password_change";`
|
||||||
sqliteV26SQL = `ALTER TABLE "{{events_rules}}" ADD COLUMN "status" integer DEFAULT 1 NOT NULL;`
|
sqliteV26SQL = `ALTER TABLE "{{events_rules}}" ADD COLUMN "status" integer DEFAULT 1 NOT NULL;`
|
||||||
sqliteV26DownSQL = `ALTER TABLE "{{events_rules}}" DROP COLUMN "status";`
|
sqliteV26DownSQL = `ALTER TABLE "{{events_rules}}" DROP COLUMN "status";`
|
||||||
|
sqliteV27SQL = `CREATE TABLE "{{ip_lists}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
|
"type" integer NOT NULL, "ipornet" varchar(50) NOT NULL, "mode" integer NOT NULL, "description" varchar(512) NULL,
|
||||||
|
"first" BLOB NOT NULL, "last" BLOB NOT NULL, "ip_type" integer NOT NULL, "protocols" integer NOT NULL,
|
||||||
|
"created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "deleted_at" bigint NOT NULL,
|
||||||
|
CONSTRAINT "{{prefix}}unique_ipornet_type_mapping" UNIQUE ("type", "ipornet"));
|
||||||
|
CREATE INDEX "{{prefix}}ip_lists_type_idx" ON "{{ip_lists}}" ("type");
|
||||||
|
CREATE INDEX "{{prefix}}ip_lists_ipornet_idx" ON "{{ip_lists}}" ("ipornet");
|
||||||
|
CREATE INDEX "{{prefix}}ip_lists_ip_type_idx" ON "{{ip_lists}}" ("ip_type");
|
||||||
|
CREATE INDEX "{{prefix}}ip_lists_ip_updated_at_idx" ON "{{ip_lists}}" ("updated_at");
|
||||||
|
CREATE INDEX "{{prefix}}ip_lists_ip_deleted_at_idx" ON "{{ip_lists}}" ("deleted_at");
|
||||||
|
CREATE INDEX "{{prefix}}ip_lists_first_last_idx" ON "{{ip_lists}}" ("first", "last");
|
||||||
|
`
|
||||||
|
sqliteV27DownSQL = `DROP TABLE "{{ip_lists}}";`
|
||||||
)
|
)
|
||||||
|
|
||||||
// SQLiteProvider defines the auth provider for SQLite database
|
// SQLiteProvider defines the auth provider for SQLite database
|
||||||
|
@ -624,6 +638,42 @@ func (p *SQLiteProvider) dumpRoles() ([]Role, error) {
|
||||||
return sqlCommonDumpRoles(p.dbHandle)
|
return sqlCommonDumpRoles(p.dbHandle)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *SQLiteProvider) ipListEntryExists(ipOrNet string, listType IPListType) (IPListEntry, error) {
|
||||||
|
return sqlCommonGetIPListEntry(ipOrNet, listType, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SQLiteProvider) addIPListEntry(entry *IPListEntry) error {
|
||||||
|
return sqlCommonAddIPListEntry(entry, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SQLiteProvider) updateIPListEntry(entry *IPListEntry) error {
|
||||||
|
return sqlCommonUpdateIPListEntry(entry, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SQLiteProvider) deleteIPListEntry(entry IPListEntry, softDelete bool) error {
|
||||||
|
return sqlCommonDeleteIPListEntry(entry, softDelete, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SQLiteProvider) getIPListEntries(listType IPListType, filter, from, order string, limit int) ([]IPListEntry, error) {
|
||||||
|
return sqlCommonGetIPListEntries(listType, filter, from, order, limit, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SQLiteProvider) getRecentlyUpdatedIPListEntries(after int64) ([]IPListEntry, error) {
|
||||||
|
return sqlCommonGetRecentlyUpdatedIPListEntries(after, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SQLiteProvider) dumpIPListEntries() ([]IPListEntry, error) {
|
||||||
|
return sqlCommonDumpIPListEntries(p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SQLiteProvider) countIPListEntries(listType IPListType) (int64, error) {
|
||||||
|
return sqlCommonCountIPListEntries(listType, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SQLiteProvider) getListEntriesForIP(ip string, listType IPListType) ([]IPListEntry, error) {
|
||||||
|
return sqlCommonGetListEntriesForIP(ip, listType, p.dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
func (p *SQLiteProvider) setFirstDownloadTimestamp(username string) error {
|
func (p *SQLiteProvider) setFirstDownloadTimestamp(username string) error {
|
||||||
return sqlCommonSetFirstDownloadTimestamp(username, p.dbHandle)
|
return sqlCommonSetFirstDownloadTimestamp(username, p.dbHandle)
|
||||||
}
|
}
|
||||||
|
@ -652,7 +702,6 @@ func (p *SQLiteProvider) initializeDatabase() error {
|
||||||
logger.InfoToConsole("creating initial database schema, version 23")
|
logger.InfoToConsole("creating initial database schema, version 23")
|
||||||
providerLog(logger.LevelInfo, "creating initial database schema, version 23")
|
providerLog(logger.LevelInfo, "creating initial database schema, version 23")
|
||||||
sql := sqlReplaceAll(sqliteInitialSQL)
|
sql := sqlReplaceAll(sqliteInitialSQL)
|
||||||
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{sql}, 23, true)
|
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{sql}, 23, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -677,6 +726,8 @@ func (p *SQLiteProvider) migrateDatabase() error { //nolint:dupl
|
||||||
return updateSQLiteDatabaseFromV24(p.dbHandle)
|
return updateSQLiteDatabaseFromV24(p.dbHandle)
|
||||||
case version == 25:
|
case version == 25:
|
||||||
return updateSQLiteDatabaseFromV25(p.dbHandle)
|
return updateSQLiteDatabaseFromV25(p.dbHandle)
|
||||||
|
case version == 26:
|
||||||
|
return updateSQLiteDatabaseFromV26(p.dbHandle)
|
||||||
default:
|
default:
|
||||||
if version > sqlDatabaseVersion {
|
if version > sqlDatabaseVersion {
|
||||||
providerLog(logger.LevelError, "database schema version %d is newer than the supported one: %d", version,
|
providerLog(logger.LevelError, "database schema version %d is newer than the supported one: %d", version,
|
||||||
|
@ -705,6 +756,8 @@ func (p *SQLiteProvider) revertDatabase(targetVersion int) error {
|
||||||
return downgradeSQLiteDatabaseFromV25(p.dbHandle)
|
return downgradeSQLiteDatabaseFromV25(p.dbHandle)
|
||||||
case 26:
|
case 26:
|
||||||
return downgradeSQLiteDatabaseFromV26(p.dbHandle)
|
return downgradeSQLiteDatabaseFromV26(p.dbHandle)
|
||||||
|
case 27:
|
||||||
|
return downgradeSQLiteDatabaseFromV27(p.dbHandle)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("database schema version not handled: %d", dbVersion.Version)
|
return fmt.Errorf("database schema version not handled: %d", dbVersion.Version)
|
||||||
}
|
}
|
||||||
|
@ -730,7 +783,14 @@ func updateSQLiteDatabaseFromV24(dbHandle *sql.DB) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateSQLiteDatabaseFromV25(dbHandle *sql.DB) error {
|
func updateSQLiteDatabaseFromV25(dbHandle *sql.DB) error {
|
||||||
return updateSQLiteDatabaseFrom25To26(dbHandle)
|
if err := updateSQLiteDatabaseFrom25To26(dbHandle); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return updateSQLiteDatabaseFromV26(dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateSQLiteDatabaseFromV26(dbHandle *sql.DB) error {
|
||||||
|
return updateSQLiteDatabaseFrom26To27(dbHandle)
|
||||||
}
|
}
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFromV24(dbHandle *sql.DB) error {
|
func downgradeSQLiteDatabaseFromV24(dbHandle *sql.DB) error {
|
||||||
|
@ -751,6 +811,13 @@ func downgradeSQLiteDatabaseFromV26(dbHandle *sql.DB) error {
|
||||||
return downgradeSQLiteDatabaseFromV25(dbHandle)
|
return downgradeSQLiteDatabaseFromV25(dbHandle)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func downgradeSQLiteDatabaseFromV27(dbHandle *sql.DB) error {
|
||||||
|
if err := downgradeSQLiteDatabaseFrom27To26(dbHandle); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return downgradeSQLiteDatabaseFromV26(dbHandle)
|
||||||
|
}
|
||||||
|
|
||||||
func updateSQLiteDatabaseFrom23To24(dbHandle *sql.DB) error {
|
func updateSQLiteDatabaseFrom23To24(dbHandle *sql.DB) error {
|
||||||
logger.InfoToConsole("updating database schema version: 23 -> 24")
|
logger.InfoToConsole("updating database schema version: 23 -> 24")
|
||||||
providerLog(logger.LevelInfo, "updating database schema version: 23 -> 24")
|
providerLog(logger.LevelInfo, "updating database schema version: 23 -> 24")
|
||||||
|
@ -775,6 +842,14 @@ func updateSQLiteDatabaseFrom25To26(dbHandle *sql.DB) error {
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 26, true)
|
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 26, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func updateSQLiteDatabaseFrom26To27(dbHandle *sql.DB) error {
|
||||||
|
logger.InfoToConsole("updating database schema version: 26 -> 27")
|
||||||
|
providerLog(logger.LevelInfo, "updating database schema version: 26 -> 27")
|
||||||
|
sql := strings.ReplaceAll(sqliteV27SQL, "{{ip_lists}}", sqlTableIPLists)
|
||||||
|
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
||||||
|
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 27, true)
|
||||||
|
}
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFrom24To23(dbHandle *sql.DB) error {
|
func downgradeSQLiteDatabaseFrom24To23(dbHandle *sql.DB) error {
|
||||||
logger.InfoToConsole("downgrading database schema version: 24 -> 23")
|
logger.InfoToConsole("downgrading database schema version: 24 -> 23")
|
||||||
providerLog(logger.LevelInfo, "downgrading database schema version: 24 -> 23")
|
providerLog(logger.LevelInfo, "downgrading database schema version: 24 -> 23")
|
||||||
|
@ -799,6 +874,13 @@ func downgradeSQLiteDatabaseFrom26To25(dbHandle *sql.DB) error {
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 25, false)
|
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 25, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func downgradeSQLiteDatabaseFrom27To26(dbHandle *sql.DB) error {
|
||||||
|
logger.InfoToConsole("downgrading database schema version: 27 -> 26")
|
||||||
|
providerLog(logger.LevelInfo, "downgrading database schema version: 27 -> 26")
|
||||||
|
sql := strings.ReplaceAll(sqliteV27DownSQL, "{{ip_lists}}", sqlTableIPLists)
|
||||||
|
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 26, false)
|
||||||
|
}
|
||||||
|
|
||||||
/*func setPragmaFK(dbHandle *sql.DB, value string) error {
|
/*func setPragmaFK(dbHandle *sql.DB, value string) error {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
|
@ -36,6 +36,7 @@ const (
|
||||||
selectGroupFields = "id,name,description,created_at,updated_at,user_settings"
|
selectGroupFields = "id,name,description,created_at,updated_at,user_settings"
|
||||||
selectEventActionFields = "id,name,description,type,options"
|
selectEventActionFields = "id,name,description,type,options"
|
||||||
selectRoleFields = "id,name,description,created_at,updated_at"
|
selectRoleFields = "id,name,description,created_at,updated_at"
|
||||||
|
selectIPListEntryFields = "type,ipornet,mode,protocols,description,created_at,updated_at,deleted_at"
|
||||||
selectMinimalFields = "id,name"
|
selectMinimalFields = "id,name"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -179,6 +180,100 @@ func getDefenderEventsCleanupQuery() string {
|
||||||
return fmt.Sprintf(`DELETE FROM %s WHERE date_time < %s`, sqlTableDefenderEvents, sqlPlaceholders[0])
|
return fmt.Sprintf(`DELETE FROM %s WHERE date_time < %s`, sqlTableDefenderEvents, sqlPlaceholders[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getIPListEntryQuery() string {
|
||||||
|
return fmt.Sprintf(`SELECT %s FROM %s WHERE type = %s AND ipornet = %s AND deleted_at = 0`,
|
||||||
|
selectIPListEntryFields, sqlTableIPLists, sqlPlaceholders[0], sqlPlaceholders[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIPListEntriesQuery(filter, from, order string, limit int) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
var idx int
|
||||||
|
|
||||||
|
sb.WriteString("SELECT ")
|
||||||
|
sb.WriteString(selectIPListEntryFields)
|
||||||
|
sb.WriteString(" FROM ")
|
||||||
|
sb.WriteString(sqlTableIPLists)
|
||||||
|
sb.WriteString(" WHERE type = ")
|
||||||
|
sb.WriteString(sqlPlaceholders[idx])
|
||||||
|
idx++
|
||||||
|
if from != "" {
|
||||||
|
if order == OrderASC {
|
||||||
|
sb.WriteString(" AND ipornet > ")
|
||||||
|
} else {
|
||||||
|
sb.WriteString(" AND ipornet < ")
|
||||||
|
}
|
||||||
|
sb.WriteString(sqlPlaceholders[idx])
|
||||||
|
idx++
|
||||||
|
}
|
||||||
|
if filter != "" {
|
||||||
|
sb.WriteString(" AND ipornet LIKE ")
|
||||||
|
sb.WriteString(sqlPlaceholders[idx])
|
||||||
|
idx++
|
||||||
|
}
|
||||||
|
sb.WriteString(" AND deleted_at = 0 ")
|
||||||
|
sb.WriteString(" ORDER BY ipornet ")
|
||||||
|
sb.WriteString(order)
|
||||||
|
if limit > 0 {
|
||||||
|
sb.WriteString(" LIMIT ")
|
||||||
|
sb.WriteString(sqlPlaceholders[idx])
|
||||||
|
}
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCountIPListEntriesQuery() string {
|
||||||
|
return fmt.Sprintf(`SELECT count(ipornet) FROM %s WHERE type = %s AND deleted_at = 0`, sqlTableIPLists, sqlPlaceholders[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCountAllIPListEntriesQuery() string {
|
||||||
|
return fmt.Sprintf(`SELECT count(ipornet) FROM %s WHERE deleted_at = 0`, sqlTableIPLists)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIPListEntriesForIPQueryPg() string {
|
||||||
|
return fmt.Sprintf(`SELECT %s FROM %s WHERE type = %s AND deleted_at = 0 AND %s::inet BETWEEN first AND last`,
|
||||||
|
selectIPListEntryFields, sqlTableIPLists, sqlPlaceholders[0], sqlPlaceholders[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIPListEntriesForIPQueryNoPg() string {
|
||||||
|
return fmt.Sprintf(`SELECT %s FROM %s WHERE type = %s AND deleted_at = 0 AND ip_type = %s AND %s BETWEEN first AND last`,
|
||||||
|
selectIPListEntryFields, sqlTableIPLists, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2])
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRecentlyUpdatedIPListQuery() string {
|
||||||
|
return fmt.Sprintf(`SELECT %s FROM %s WHERE updated_at >= %s OR deleted_at > 0`,
|
||||||
|
selectIPListEntryFields, sqlTableIPLists, sqlPlaceholders[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDumpListEntriesQuery() string {
|
||||||
|
return fmt.Sprintf(`SELECT %s FROM %s WHERE deleted_at = 0`, selectIPListEntryFields, sqlTableIPLists)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAddIPListEntryQuery() string {
|
||||||
|
return fmt.Sprintf(`INSERT INTO %s (type,ipornet,first,last,ip_type,protocols,description,mode,created_at,updated_at,deleted_at)
|
||||||
|
VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,0)`, sqlTableIPLists, sqlPlaceholders[0], sqlPlaceholders[1],
|
||||||
|
sqlPlaceholders[2], sqlPlaceholders[3], sqlPlaceholders[4], sqlPlaceholders[5],
|
||||||
|
sqlPlaceholders[6], sqlPlaceholders[7], sqlPlaceholders[8], sqlPlaceholders[9])
|
||||||
|
}
|
||||||
|
|
||||||
|
func getUpdateIPListEntryQuery() string {
|
||||||
|
return fmt.Sprintf(`UPDATE %s SET mode=%s,protocols=%s,description=%s,updated_at=%s WHERE type = %s AND ipornet = %s`,
|
||||||
|
sqlTableIPLists, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3],
|
||||||
|
sqlPlaceholders[4], sqlPlaceholders[5])
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDeleteIPListEntryQuery(softDelete bool) string {
|
||||||
|
if softDelete {
|
||||||
|
return fmt.Sprintf(`UPDATE %s SET updated_at=%s,deleted_at=%s WHERE type = %s AND ipornet = %s`,
|
||||||
|
sqlTableIPLists, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(`DELETE FROM %s WHERE type = %s AND ipornet = %s`,
|
||||||
|
sqlTableIPLists, sqlPlaceholders[0], sqlPlaceholders[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRemoveSoftDeletedIPListEntryQuery() string {
|
||||||
|
return fmt.Sprintf(`DELETE FROM %s WHERE type = %s AND ipornet = %s AND deleted_at > 0`,
|
||||||
|
sqlTableIPLists, sqlPlaceholders[0], sqlPlaceholders[1])
|
||||||
|
}
|
||||||
|
|
||||||
func getRoleByNameQuery() string {
|
func getRoleByNameQuery() string {
|
||||||
return fmt.Sprintf(`SELECT %s FROM %s WHERE name = %s`, selectRoleFields, sqlTableRoles,
|
return fmt.Sprintf(`SELECT %s FROM %s WHERE name = %s`, selectRoleFields, sqlTableRoles,
|
||||||
sqlPlaceholders[0])
|
sqlPlaceholders[0])
|
||||||
|
|
|
@ -473,7 +473,7 @@ func (u *User) SetEmptySecrets() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPermissionsForPath returns the permissions for the given path.
|
// GetPermissionsForPath returns the permissions for the given path.
|
||||||
// The path must be a SFTPGo exposed path
|
// The path must be a SFTPGo virtual path
|
||||||
func (u *User) GetPermissionsForPath(p string) []string {
|
func (u *User) GetPermissionsForPath(p string) []string {
|
||||||
permissions := []string{}
|
permissions := []string{}
|
||||||
if perms, ok := u.Permissions["/"]; ok {
|
if perms, ok := u.Permissions["/"]; ok {
|
||||||
|
|
|
@ -68,9 +68,9 @@ type Binding struct {
|
||||||
CertificateKeyFile string `json:"certificate_key_file" mapstructure:"certificate_key_file"`
|
CertificateKeyFile string `json:"certificate_key_file" mapstructure:"certificate_key_file"`
|
||||||
// Defines the minimum TLS version. 13 means TLS 1.3, default is TLS 1.2
|
// Defines the minimum TLS version. 13 means TLS 1.3, default is TLS 1.2
|
||||||
MinTLSVersion int `json:"min_tls_version" mapstructure:"min_tls_version"`
|
MinTLSVersion int `json:"min_tls_version" mapstructure:"min_tls_version"`
|
||||||
// External IP address to expose for passive connections.
|
// External IP address for passive connections.
|
||||||
ForcePassiveIP string `json:"force_passive_ip" mapstructure:"force_passive_ip"`
|
ForcePassiveIP string `json:"force_passive_ip" mapstructure:"force_passive_ip"`
|
||||||
// PassiveIPOverrides allows to define different IP addresses to expose for passive connections
|
// PassiveIPOverrides allows to define different IP addresses for passive connections
|
||||||
// based on the client IP address
|
// based on the client IP address
|
||||||
PassiveIPOverrides []PassiveIPOverride `json:"passive_ip_overrides" mapstructure:"passive_ip_overrides"`
|
PassiveIPOverrides []PassiveIPOverride `json:"passive_ip_overrides" mapstructure:"passive_ip_overrides"`
|
||||||
// Set to 1 to require client certificate authentication.
|
// Set to 1 to require client certificate authentication.
|
||||||
|
|
|
@ -309,16 +309,16 @@ func TestMain(m *testing.M) {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = common.Initialize(commonConf, 0)
|
|
||||||
if err != nil {
|
|
||||||
logger.WarnToConsole("error initializing common: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.ErrorToConsole("error initializing data provider: %v", err)
|
logger.ErrorToConsole("error initializing data provider: %v", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
err = common.Initialize(commonConf, 0)
|
||||||
|
if err != nil {
|
||||||
|
logger.WarnToConsole("error initializing common: %v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
httpConfig := config.GetHTTPConfig()
|
httpConfig := config.GetHTTPConfig()
|
||||||
httpConfig.Initialize(configDir) //nolint:errcheck
|
httpConfig.Initialize(configDir) //nolint:errcheck
|
||||||
|
|
|
@ -160,11 +160,11 @@ func (s *Server) ClientConnected(cc ftpserver.ClientContext) (string, error) {
|
||||||
cc.SetDebug(s.binding.Debug)
|
cc.SetDebug(s.binding.Debug)
|
||||||
ipAddr := util.GetIPFromRemoteAddress(cc.RemoteAddr().String())
|
ipAddr := util.GetIPFromRemoteAddress(cc.RemoteAddr().String())
|
||||||
common.Connections.AddClientConnection(ipAddr)
|
common.Connections.AddClientConnection(ipAddr)
|
||||||
if common.IsBanned(ipAddr) {
|
if common.IsBanned(ipAddr, common.ProtocolFTP) {
|
||||||
logger.Log(logger.LevelDebug, common.ProtocolFTP, "", "connection refused, ip %#v is banned", ipAddr)
|
logger.Log(logger.LevelDebug, common.ProtocolFTP, "", "connection refused, ip %#v is banned", ipAddr)
|
||||||
return "Access denied: banned client IP", common.ErrConnectionDenied
|
return "Access denied: banned client IP", common.ErrConnectionDenied
|
||||||
}
|
}
|
||||||
if err := common.Connections.IsNewConnectionAllowed(ipAddr); err != nil {
|
if err := common.Connections.IsNewConnectionAllowed(ipAddr, common.ProtocolFTP); err != nil {
|
||||||
logger.Log(logger.LevelDebug, common.ProtocolFTP, "", "connection not allowed from ip %q: %v", ipAddr, err)
|
logger.Log(logger.LevelDebug, common.ProtocolFTP, "", "connection not allowed from ip %q: %v", ipAddr, err)
|
||||||
return "Access denied", err
|
return "Access denied", err
|
||||||
}
|
}
|
||||||
|
@ -429,7 +429,7 @@ func updateLoginMetrics(user *dataprovider.User, ip, loginMethod string, err err
|
||||||
if errors.Is(err, util.ErrNotFound) {
|
if errors.Is(err, util.ErrNotFound) {
|
||||||
event = common.HostEventUserNotFound
|
event = common.HostEventUserNotFound
|
||||||
}
|
}
|
||||||
common.AddDefenderEvent(ip, event)
|
common.AddDefenderEvent(ip, common.ProtocolFTP, event)
|
||||||
}
|
}
|
||||||
metric.AddLoginResult(loginMethod, err)
|
metric.AddLoginResult(loginMethod, err)
|
||||||
dataprovider.ExecutePostLoginHook(user, loginMethod, ip, common.ProtocolFTP, err)
|
dataprovider.ExecutePostLoginHook(user, loginMethod, ip, common.ProtocolFTP, err)
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/go-chi/jwtauth/v5"
|
"github.com/go-chi/jwtauth/v5"
|
||||||
"github.com/go-chi/render"
|
"github.com/go-chi/render"
|
||||||
|
@ -83,7 +84,7 @@ func addAdmin(w http.ResponseWriter, r *http.Request) {
|
||||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.Header().Add("Location", fmt.Sprintf("%s/%s", adminPath, admin.Username))
|
w.Header().Add("Location", fmt.Sprintf("%s/%s", adminPath, url.PathEscape(admin.Username)))
|
||||||
renderAdmin(w, r, admin.Username, http.StatusCreated)
|
renderAdmin(w, r, admin.Username, http.StatusCreated)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/go-chi/render"
|
"github.com/go-chi/render"
|
||||||
|
|
||||||
|
@ -82,7 +83,7 @@ func addEventAction(w http.ResponseWriter, r *http.Request) {
|
||||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.Header().Add("Location", fmt.Sprintf("%s/%s", eventActionsPath, action.Name))
|
w.Header().Add("Location", fmt.Sprintf("%s/%s", eventActionsPath, url.PathEscape(action.Name)))
|
||||||
renderEventAction(w, r, action.Name, http.StatusCreated)
|
renderEventAction(w, r, action.Name, http.StatusCreated)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,7 +198,7 @@ func addEventRule(w http.ResponseWriter, r *http.Request) {
|
||||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.Header().Add("Location", fmt.Sprintf("%s/%s", eventRulesPath, rule.Name))
|
w.Header().Add("Location", fmt.Sprintf("%s/%s", eventRulesPath, url.PathEscape(rule.Name)))
|
||||||
renderEventRule(w, r, rule.Name, http.StatusCreated)
|
renderEventRule(w, r, rule.Name, http.StatusCreated)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/go-chi/render"
|
"github.com/go-chi/render"
|
||||||
|
|
||||||
|
@ -60,7 +61,7 @@ func addFolder(w http.ResponseWriter, r *http.Request) {
|
||||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.Header().Add("Location", fmt.Sprintf("%s/%s", folderPath, folder.Name))
|
w.Header().Add("Location", fmt.Sprintf("%s/%s", folderPath, url.PathEscape(folder.Name)))
|
||||||
renderFolder(w, r, folder.Name, http.StatusCreated)
|
renderFolder(w, r, folder.Name, http.StatusCreated)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/go-chi/render"
|
"github.com/go-chi/render"
|
||||||
|
|
||||||
|
@ -59,7 +60,7 @@ func addGroup(w http.ResponseWriter, r *http.Request) {
|
||||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.Header().Add("Location", fmt.Sprintf("%s/%s", groupPath, group.Name))
|
w.Header().Add("Location", fmt.Sprintf("%s/%s", groupPath, url.PathEscape(group.Name)))
|
||||||
renderGroup(w, r, group.Name, http.StatusCreated)
|
renderGroup(w, r, group.Name, http.StatusCreated)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
157
internal/httpd/api_iplist.go
Normal file
157
internal/httpd/api_iplist.go
Normal file
|
@ -0,0 +1,157 @@
|
||||||
|
// Copyright (C) 2019-2023 Nicola Murino
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published
|
||||||
|
// by the Free Software Foundation, version 3.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package httpd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
"github.com/go-chi/render"
|
||||||
|
|
||||||
|
"github.com/drakkan/sftpgo/v2/internal/dataprovider"
|
||||||
|
"github.com/drakkan/sftpgo/v2/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getIPListEntries(w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
||||||
|
limit, _, order, err := getSearchFilters(w, r)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
listType, _, err := getIPListPathParams(r)
|
||||||
|
if err != nil {
|
||||||
|
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
entries, err := dataprovider.GetIPListEntries(listType, r.URL.Query().Get("filter"), r.URL.Query().Get("from"),
|
||||||
|
order, limit)
|
||||||
|
if err != nil {
|
||||||
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
render.JSON(w, r, entries)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIPListEntry(w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
||||||
|
|
||||||
|
listType, ipOrNet, err := getIPListPathParams(r)
|
||||||
|
if err != nil {
|
||||||
|
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
entry, err := dataprovider.IPListEntryExists(ipOrNet, listType)
|
||||||
|
if err != nil {
|
||||||
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
render.JSON(w, r, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
func addIPListEntry(w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
||||||
|
|
||||||
|
claims, err := getTokenClaims(r)
|
||||||
|
if err != nil || claims.Username == "" {
|
||||||
|
sendAPIResponse(w, r, err, "Invalid token claims", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var entry dataprovider.IPListEntry
|
||||||
|
err = render.DecodeJSON(r.Body, &entry)
|
||||||
|
if err != nil {
|
||||||
|
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = dataprovider.AddIPListEntry(&entry, claims.Username, util.GetIPFromRemoteAddress(r.RemoteAddr), claims.Role)
|
||||||
|
if err != nil {
|
||||||
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Header().Add("Location", fmt.Sprintf("%s/%d/%s", ipListsPath, entry.Type, url.PathEscape(entry.IPOrNet)))
|
||||||
|
sendAPIResponse(w, r, nil, "Entry added", http.StatusCreated)
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateIPListEntry(w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
||||||
|
|
||||||
|
claims, err := getTokenClaims(r)
|
||||||
|
if err != nil || claims.Username == "" {
|
||||||
|
sendAPIResponse(w, r, err, "Invalid token claims", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
listType, ipOrNet, err := getIPListPathParams(r)
|
||||||
|
if err != nil {
|
||||||
|
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
entry, err := dataprovider.IPListEntryExists(ipOrNet, listType)
|
||||||
|
if err != nil {
|
||||||
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var updatedEntry dataprovider.IPListEntry
|
||||||
|
err = render.DecodeJSON(r.Body, &updatedEntry)
|
||||||
|
if err != nil {
|
||||||
|
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
updatedEntry.Type = entry.Type
|
||||||
|
updatedEntry.IPOrNet = entry.IPOrNet
|
||||||
|
err = dataprovider.UpdateIPListEntry(&updatedEntry, claims.Username, util.GetIPFromRemoteAddress(r.RemoteAddr), claims.Role)
|
||||||
|
if err != nil {
|
||||||
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sendAPIResponse(w, r, nil, "Entry updated", http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteIPListEntry(w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
||||||
|
|
||||||
|
claims, err := getTokenClaims(r)
|
||||||
|
if err != nil || claims.Username == "" {
|
||||||
|
sendAPIResponse(w, r, err, "Invalid token claims", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
listType, ipOrNet, err := getIPListPathParams(r)
|
||||||
|
if err != nil {
|
||||||
|
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = dataprovider.DeleteIPListEntry(ipOrNet, listType, claims.Username, util.GetIPFromRemoteAddress(r.RemoteAddr),
|
||||||
|
claims.Role)
|
||||||
|
if err != nil {
|
||||||
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sendAPIResponse(w, r, err, "Entry deleted", http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIPListPathParams(r *http.Request) (dataprovider.IPListType, string, error) {
|
||||||
|
listTypeString := chi.URLParam(r, "type")
|
||||||
|
listType, err := strconv.Atoi(listTypeString)
|
||||||
|
if err != nil {
|
||||||
|
return dataprovider.IPListType(listType), "", errors.New("invalid list type")
|
||||||
|
}
|
||||||
|
if err := dataprovider.CheckIPListType(dataprovider.IPListType(listType)); err != nil {
|
||||||
|
return dataprovider.IPListType(listType), "", err
|
||||||
|
}
|
||||||
|
return dataprovider.IPListType(listType), getURLParam(r, "ipornet"), nil
|
||||||
|
}
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/go-chi/render"
|
"github.com/go-chi/render"
|
||||||
|
|
||||||
|
@ -78,7 +79,7 @@ func addAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||||
response := make(map[string]string)
|
response := make(map[string]string)
|
||||||
response["message"] = "API key created. This is the only time the API key is visible, please save it."
|
response["message"] = "API key created. This is the only time the API key is visible, please save it."
|
||||||
response["key"] = apiKey.DisplayKey()
|
response["key"] = apiKey.DisplayKey()
|
||||||
w.Header().Add("Location", fmt.Sprintf("%v/%v", apiKeysPath, apiKey.KeyID))
|
w.Header().Add("Location", fmt.Sprintf("%s/%s", apiKeysPath, url.PathEscape(apiKey.KeyID)))
|
||||||
w.Header().Add("X-Object-ID", apiKey.KeyID)
|
w.Header().Add("X-Object-ID", apiKey.KeyID)
|
||||||
ctx := context.WithValue(r.Context(), render.StatusCtxKey, http.StatusCreated)
|
ctx := context.WithValue(r.Context(), render.StatusCtxKey, http.StatusCreated)
|
||||||
render.JSON(w, r.WithContext(ctx), response)
|
render.JSON(w, r.WithContext(ctx), response)
|
||||||
|
|
|
@ -39,10 +39,10 @@ func validateBackupFile(outputFile string) (string, error) {
|
||||||
return "", errors.New("invalid or missing output-file")
|
return "", errors.New("invalid or missing output-file")
|
||||||
}
|
}
|
||||||
if filepath.IsAbs(outputFile) {
|
if filepath.IsAbs(outputFile) {
|
||||||
return "", fmt.Errorf("invalid output-file %#v: it must be a relative path", outputFile)
|
return "", fmt.Errorf("invalid output-file %q: it must be a relative path", outputFile)
|
||||||
}
|
}
|
||||||
if strings.Contains(outputFile, "..") {
|
if strings.Contains(outputFile, "..") {
|
||||||
return "", fmt.Errorf("invalid output-file %#v", outputFile)
|
return "", fmt.Errorf("invalid output-file %q", outputFile)
|
||||||
}
|
}
|
||||||
outputFile = filepath.Join(dataprovider.GetBackupsPath(), outputFile)
|
outputFile = filepath.Join(dataprovider.GetBackupsPath(), outputFile)
|
||||||
return outputFile, nil
|
return outputFile, nil
|
||||||
|
@ -71,16 +71,16 @@ func dumpData(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
err = os.MkdirAll(filepath.Dir(outputFile), 0700)
|
err = os.MkdirAll(filepath.Dir(outputFile), 0700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(logSender, "", "dumping data error: %v, output file: %#v", err, outputFile)
|
logger.Error(logSender, "", "dumping data error: %v, output file: %q", err, outputFile)
|
||||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Debug(logSender, "", "dumping data to: %#v", outputFile)
|
logger.Debug(logSender, "", "dumping data to: %q", outputFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
backup, err := dataprovider.DumpData()
|
backup, err := dataprovider.DumpData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(logSender, "", "dumping data error: %v, output file: %#v", err, outputFile)
|
logger.Error(logSender, "", "dumping data error: %v, output file: %q", err, outputFile)
|
||||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -101,11 +101,11 @@ func dumpData(w http.ResponseWriter, r *http.Request) {
|
||||||
err = os.WriteFile(outputFile, dump, 0600)
|
err = os.WriteFile(outputFile, dump, 0600)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Warn(logSender, "", "dumping data error: %v, output file: %#v", err, outputFile)
|
logger.Warn(logSender, "", "dumping data error: %v, output file: %q", err, outputFile)
|
||||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Debug(logSender, "", "dumping data completed, output file: %#v, error: %v", outputFile, err)
|
logger.Debug(logSender, "", "dumping data completed, output file: %q, error: %v", outputFile, err)
|
||||||
sendAPIResponse(w, r, err, "Data saved", http.StatusOK)
|
sendAPIResponse(w, r, err, "Data saved", http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,7 +150,8 @@ func loadData(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !filepath.IsAbs(inputFile) {
|
if !filepath.IsAbs(inputFile) {
|
||||||
sendAPIResponse(w, r, fmt.Errorf("invalid input_file %#v: it must be an absolute path", inputFile), "", http.StatusBadRequest)
|
sendAPIResponse(w, r, fmt.Errorf("invalid input_file %q: it must be an absolute path", inputFile), "",
|
||||||
|
http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
fi, err := os.Stat(inputFile)
|
fi, err := os.Stat(inputFile)
|
||||||
|
@ -159,7 +160,7 @@ func loadData(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if fi.Size() > MaxRestoreSize {
|
if fi.Size() > MaxRestoreSize {
|
||||||
sendAPIResponse(w, r, err, fmt.Sprintf("Unable to restore input file: %#v size too big: %v/%v bytes",
|
sendAPIResponse(w, r, err, fmt.Sprintf("Unable to restore input file: %q size too big: %d/%d bytes",
|
||||||
inputFile, fi.Size(), MaxRestoreSize), http.StatusBadRequest)
|
inputFile, fi.Size(), MaxRestoreSize), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -182,6 +183,10 @@ func restoreBackup(content []byte, inputFile string, scanQuota, mode int, execut
|
||||||
return util.NewValidationError(fmt.Sprintf("unable to parse backup content: %v", err))
|
return util.NewValidationError(fmt.Sprintf("unable to parse backup content: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = RestoreIPListEntries(dump.IPLists, inputFile, mode, executor, ipAddress, role); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if err = RestoreRoles(dump.Roles, inputFile, mode, executor, ipAddress, role); err != nil {
|
if err = RestoreRoles(dump.Roles, inputFile, mode, executor, ipAddress, role); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -251,29 +256,29 @@ func getLoaddataOptions(r *http.Request) (string, int, int, error) {
|
||||||
|
|
||||||
// RestoreFolders restores the specified folders
|
// RestoreFolders restores the specified folders
|
||||||
func RestoreFolders(folders []vfs.BaseVirtualFolder, inputFile string, mode, scanQuota int, executor, ipAddress, role string) error {
|
func RestoreFolders(folders []vfs.BaseVirtualFolder, inputFile string, mode, scanQuota int, executor, ipAddress, role string) error {
|
||||||
for _, folder := range folders {
|
for idx := range folders {
|
||||||
folder := folder // pin
|
folder := folders[idx]
|
||||||
f, err := dataprovider.GetFolderByName(folder.Name)
|
f, err := dataprovider.GetFolderByName(folder.Name)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if mode == 1 {
|
if mode == 1 {
|
||||||
logger.Debug(logSender, "", "loaddata mode 1, existing folder %#v not updated", folder.Name)
|
logger.Debug(logSender, "", "loaddata mode 1, existing folder %q not updated", folder.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
folder.ID = f.ID
|
folder.ID = f.ID
|
||||||
folder.Name = f.Name
|
folder.Name = f.Name
|
||||||
err = dataprovider.UpdateFolder(&folder, f.Users, f.Groups, executor, ipAddress, role)
|
err = dataprovider.UpdateFolder(&folder, f.Users, f.Groups, executor, ipAddress, role)
|
||||||
logger.Debug(logSender, "", "restoring existing folder %#v, dump file: %#v, error: %v", folder.Name, inputFile, err)
|
logger.Debug(logSender, "", "restoring existing folder %q, dump file: %q, error: %v", folder.Name, inputFile, err)
|
||||||
} else {
|
} else {
|
||||||
folder.Users = nil
|
folder.Users = nil
|
||||||
err = dataprovider.AddFolder(&folder, executor, ipAddress, role)
|
err = dataprovider.AddFolder(&folder, executor, ipAddress, role)
|
||||||
logger.Debug(logSender, "", "adding new folder %#v, dump file: %#v, error: %v", folder.Name, inputFile, err)
|
logger.Debug(logSender, "", "adding new folder %q, dump file: %q, error: %v", folder.Name, inputFile, err)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore folder %#v: %w", folder.Name, err)
|
return fmt.Errorf("unable to restore folder %q: %w", folder.Name, err)
|
||||||
}
|
}
|
||||||
if scanQuota >= 1 {
|
if scanQuota >= 1 {
|
||||||
if common.QuotaScans.AddVFolderQuotaScan(folder.Name) {
|
if common.QuotaScans.AddVFolderQuotaScan(folder.Name) {
|
||||||
logger.Debug(logSender, "", "starting quota scan for restored folder: %#v", folder.Name)
|
logger.Debug(logSender, "", "starting quota scan for restored folder: %q", folder.Name)
|
||||||
go doFolderQuotaScan(folder) //nolint:errcheck
|
go doFolderQuotaScan(folder) //nolint:errcheck
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -285,21 +290,21 @@ func RestoreFolders(folders []vfs.BaseVirtualFolder, inputFile string, mode, sca
|
||||||
func RestoreShares(shares []dataprovider.Share, inputFile string, mode int, executor,
|
func RestoreShares(shares []dataprovider.Share, inputFile string, mode int, executor,
|
||||||
ipAddress, role string,
|
ipAddress, role string,
|
||||||
) error {
|
) error {
|
||||||
for _, share := range shares {
|
for idx := range shares {
|
||||||
share := share // pin
|
share := shares[idx]
|
||||||
share.IsRestore = true
|
share.IsRestore = true
|
||||||
s, err := dataprovider.ShareExists(share.ShareID, "")
|
s, err := dataprovider.ShareExists(share.ShareID, "")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if mode == 1 {
|
if mode == 1 {
|
||||||
logger.Debug(logSender, "", "loaddata mode 1, existing share %#v not updated", share.ShareID)
|
logger.Debug(logSender, "", "loaddata mode 1, existing share %q not updated", share.ShareID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
share.ID = s.ID
|
share.ID = s.ID
|
||||||
err = dataprovider.UpdateShare(&share, executor, ipAddress, role)
|
err = dataprovider.UpdateShare(&share, executor, ipAddress, role)
|
||||||
logger.Debug(logSender, "", "restoring existing share %#v, dump file: %#v, error: %v", share.ShareID, inputFile, err)
|
logger.Debug(logSender, "", "restoring existing share %q, dump file: %q, error: %v", share.ShareID, inputFile, err)
|
||||||
} else {
|
} else {
|
||||||
err = dataprovider.AddShare(&share, executor, ipAddress, role)
|
err = dataprovider.AddShare(&share, executor, ipAddress, role)
|
||||||
logger.Debug(logSender, "", "adding new share %#v, dump file: %#v, error: %v", share.ShareID, inputFile, err)
|
logger.Debug(logSender, "", "adding new share %q, dump file: %q, error: %v", share.ShareID, inputFile, err)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore share %q: %w", share.ShareID, err)
|
return fmt.Errorf("unable to restore share %q: %w", share.ShareID, err)
|
||||||
|
@ -310,8 +315,8 @@ func RestoreShares(shares []dataprovider.Share, inputFile string, mode int, exec
|
||||||
|
|
||||||
// RestoreEventActions restores the specified event actions
|
// RestoreEventActions restores the specified event actions
|
||||||
func RestoreEventActions(actions []dataprovider.BaseEventAction, inputFile string, mode int, executor, ipAddress, role string) error {
|
func RestoreEventActions(actions []dataprovider.BaseEventAction, inputFile string, mode int, executor, ipAddress, role string) error {
|
||||||
for _, action := range actions {
|
for idx := range actions {
|
||||||
action := action // pin
|
action := actions[idx]
|
||||||
a, err := dataprovider.EventActionExists(action.Name)
|
a, err := dataprovider.EventActionExists(action.Name)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if mode == 1 {
|
if mode == 1 {
|
||||||
|
@ -336,8 +341,8 @@ func RestoreEventActions(actions []dataprovider.BaseEventAction, inputFile strin
|
||||||
func RestoreEventRules(rules []dataprovider.EventRule, inputFile string, mode int, executor, ipAddress,
|
func RestoreEventRules(rules []dataprovider.EventRule, inputFile string, mode int, executor, ipAddress,
|
||||||
role string, dumpVersion int,
|
role string, dumpVersion int,
|
||||||
) error {
|
) error {
|
||||||
for _, rule := range rules {
|
for idx := range rules {
|
||||||
rule := rule // pin
|
rule := rules[idx]
|
||||||
if dumpVersion < 15 {
|
if dumpVersion < 15 {
|
||||||
rule.Status = 1
|
rule.Status = 1
|
||||||
}
|
}
|
||||||
|
@ -363,8 +368,8 @@ func RestoreEventRules(rules []dataprovider.EventRule, inputFile string, mode in
|
||||||
|
|
||||||
// RestoreAPIKeys restores the specified API keys
|
// RestoreAPIKeys restores the specified API keys
|
||||||
func RestoreAPIKeys(apiKeys []dataprovider.APIKey, inputFile string, mode int, executor, ipAddress, role string) error {
|
func RestoreAPIKeys(apiKeys []dataprovider.APIKey, inputFile string, mode int, executor, ipAddress, role string) error {
|
||||||
for _, apiKey := range apiKeys {
|
for idx := range apiKeys {
|
||||||
apiKey := apiKey // pin
|
apiKey := apiKeys[idx]
|
||||||
if apiKey.Key == "" {
|
if apiKey.Key == "" {
|
||||||
logger.Warn(logSender, "", "cannot restore empty API key")
|
logger.Warn(logSender, "", "cannot restore empty API key")
|
||||||
return fmt.Errorf("cannot restore an empty API key: %+v", apiKey)
|
return fmt.Errorf("cannot restore an empty API key: %+v", apiKey)
|
||||||
|
@ -372,18 +377,18 @@ func RestoreAPIKeys(apiKeys []dataprovider.APIKey, inputFile string, mode int, e
|
||||||
k, err := dataprovider.APIKeyExists(apiKey.KeyID)
|
k, err := dataprovider.APIKeyExists(apiKey.KeyID)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if mode == 1 {
|
if mode == 1 {
|
||||||
logger.Debug(logSender, "", "loaddata mode 1, existing API key %#v not updated", apiKey.KeyID)
|
logger.Debug(logSender, "", "loaddata mode 1, existing API key %q not updated", apiKey.KeyID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
apiKey.ID = k.ID
|
apiKey.ID = k.ID
|
||||||
err = dataprovider.UpdateAPIKey(&apiKey, executor, ipAddress, role)
|
err = dataprovider.UpdateAPIKey(&apiKey, executor, ipAddress, role)
|
||||||
logger.Debug(logSender, "", "restoring existing API key %#v, dump file: %#v, error: %v", apiKey.KeyID, inputFile, err)
|
logger.Debug(logSender, "", "restoring existing API key %q, dump file: %q, error: %v", apiKey.KeyID, inputFile, err)
|
||||||
} else {
|
} else {
|
||||||
err = dataprovider.AddAPIKey(&apiKey, executor, ipAddress, role)
|
err = dataprovider.AddAPIKey(&apiKey, executor, ipAddress, role)
|
||||||
logger.Debug(logSender, "", "adding new API key %#v, dump file: %#v, error: %v", apiKey.KeyID, inputFile, err)
|
logger.Debug(logSender, "", "adding new API key %q, dump file: %q, error: %v", apiKey.KeyID, inputFile, err)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore API key %#v: %w", apiKey.KeyID, err)
|
return fmt.Errorf("unable to restore API key %q: %w", apiKey.KeyID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -391,34 +396,62 @@ func RestoreAPIKeys(apiKeys []dataprovider.APIKey, inputFile string, mode int, e
|
||||||
|
|
||||||
// RestoreAdmins restores the specified admins
|
// RestoreAdmins restores the specified admins
|
||||||
func RestoreAdmins(admins []dataprovider.Admin, inputFile string, mode int, executor, ipAddress, role string) error {
|
func RestoreAdmins(admins []dataprovider.Admin, inputFile string, mode int, executor, ipAddress, role string) error {
|
||||||
for _, admin := range admins {
|
for idx := range admins {
|
||||||
admin := admin // pin
|
admin := admins[idx]
|
||||||
a, err := dataprovider.AdminExists(admin.Username)
|
a, err := dataprovider.AdminExists(admin.Username)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if mode == 1 {
|
if mode == 1 {
|
||||||
logger.Debug(logSender, "", "loaddata mode 1, existing admin %#v not updated", a.Username)
|
logger.Debug(logSender, "", "loaddata mode 1, existing admin %q not updated", a.Username)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
admin.ID = a.ID
|
admin.ID = a.ID
|
||||||
admin.Username = a.Username
|
admin.Username = a.Username
|
||||||
err = dataprovider.UpdateAdmin(&admin, executor, ipAddress, role)
|
err = dataprovider.UpdateAdmin(&admin, executor, ipAddress, role)
|
||||||
logger.Debug(logSender, "", "restoring existing admin %#v, dump file: %#v, error: %v", admin.Username, inputFile, err)
|
logger.Debug(logSender, "", "restoring existing admin %q, dump file: %q, error: %v", admin.Username, inputFile, err)
|
||||||
} else {
|
} else {
|
||||||
err = dataprovider.AddAdmin(&admin, executor, ipAddress, role)
|
err = dataprovider.AddAdmin(&admin, executor, ipAddress, role)
|
||||||
logger.Debug(logSender, "", "adding new admin %#v, dump file: %#v, error: %v", admin.Username, inputFile, err)
|
logger.Debug(logSender, "", "adding new admin %q, dump file: %q, error: %v", admin.Username, inputFile, err)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore admin %#v: %w", admin.Username, err)
|
return fmt.Errorf("unable to restore admin %q: %w", admin.Username, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RestoreIPListEntries restores the specified IP list entries
|
||||||
|
func RestoreIPListEntries(entries []dataprovider.IPListEntry, inputFile string, mode int, executor, ipAddress,
|
||||||
|
executorRole string,
|
||||||
|
) error {
|
||||||
|
for idx := range entries {
|
||||||
|
entry := entries[idx]
|
||||||
|
e, err := dataprovider.IPListEntryExists(entry.IPOrNet, entry.Type)
|
||||||
|
if err == nil {
|
||||||
|
if mode == 1 {
|
||||||
|
logger.Debug(logSender, "", "loaddata mode 1, existing IP list entry %s-%s not updated",
|
||||||
|
e.Type.AsString(), e.IPOrNet)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = dataprovider.UpdateIPListEntry(&entry, executor, ipAddress, executorRole)
|
||||||
|
logger.Debug(logSender, "", "restoring existing IP list entry: %s-%s, dump file: %q, error: %v",
|
||||||
|
entry.Type.AsString(), entry.IPOrNet, inputFile, err)
|
||||||
|
} else {
|
||||||
|
err = dataprovider.AddIPListEntry(&entry, executor, ipAddress, executorRole)
|
||||||
|
logger.Debug(logSender, "", "adding new IP list entry %s-%s, dump file: %q, error: %v",
|
||||||
|
entry.Type.AsString(), entry.IPOrNet, inputFile, err)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to restore IP list entry %s-%s: %w", entry.Type.AsString(), entry.IPOrNet, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// RestoreRoles restores the specified roles
|
// RestoreRoles restores the specified roles
|
||||||
func RestoreRoles(roles []dataprovider.Role, inputFile string, mode int, executor, ipAddress, executorRole string) error {
|
func RestoreRoles(roles []dataprovider.Role, inputFile string, mode int, executor, ipAddress, executorRole string) error {
|
||||||
for _, role := range roles {
|
for idx := range roles {
|
||||||
role := role // pin
|
role := roles[idx]
|
||||||
r, err := dataprovider.RoleExists(role.Name)
|
r, err := dataprovider.RoleExists(role.Name)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if mode == 1 {
|
if mode == 1 {
|
||||||
|
@ -427,13 +460,13 @@ func RestoreRoles(roles []dataprovider.Role, inputFile string, mode int, executo
|
||||||
}
|
}
|
||||||
role.ID = r.ID
|
role.ID = r.ID
|
||||||
err = dataprovider.UpdateRole(&role, executor, ipAddress, executorRole)
|
err = dataprovider.UpdateRole(&role, executor, ipAddress, executorRole)
|
||||||
logger.Debug(logSender, "", "restoring existing role: %q, dump file: %#v, error: %v", role.Name, inputFile, err)
|
logger.Debug(logSender, "", "restoring existing role: %q, dump file: %q, error: %v", role.Name, inputFile, err)
|
||||||
} else {
|
} else {
|
||||||
err = dataprovider.AddRole(&role, executor, ipAddress, executorRole)
|
err = dataprovider.AddRole(&role, executor, ipAddress, executorRole)
|
||||||
logger.Debug(logSender, "", "adding new role: %q, dump file: %q, error: %v", role.Name, inputFile, err)
|
logger.Debug(logSender, "", "adding new role: %q, dump file: %q, error: %v", role.Name, inputFile, err)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore role %#v: %w", role.Name, err)
|
return fmt.Errorf("unable to restore role %q: %w", role.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -441,24 +474,24 @@ func RestoreRoles(roles []dataprovider.Role, inputFile string, mode int, executo
|
||||||
|
|
||||||
// RestoreGroups restores the specified groups
|
// RestoreGroups restores the specified groups
|
||||||
func RestoreGroups(groups []dataprovider.Group, inputFile string, mode int, executor, ipAddress, role string) error {
|
func RestoreGroups(groups []dataprovider.Group, inputFile string, mode int, executor, ipAddress, role string) error {
|
||||||
for _, group := range groups {
|
for idx := range groups {
|
||||||
group := group // pin
|
group := groups[idx]
|
||||||
g, err := dataprovider.GroupExists(group.Name)
|
g, err := dataprovider.GroupExists(group.Name)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if mode == 1 {
|
if mode == 1 {
|
||||||
logger.Debug(logSender, "", "loaddata mode 1, existing group %#v not updated", g.Name)
|
logger.Debug(logSender, "", "loaddata mode 1, existing group %q not updated", g.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
group.ID = g.ID
|
group.ID = g.ID
|
||||||
group.Name = g.Name
|
group.Name = g.Name
|
||||||
err = dataprovider.UpdateGroup(&group, g.Users, executor, ipAddress, role)
|
err = dataprovider.UpdateGroup(&group, g.Users, executor, ipAddress, role)
|
||||||
logger.Debug(logSender, "", "restoring existing group: %#v, dump file: %#v, error: %v", group.Name, inputFile, err)
|
logger.Debug(logSender, "", "restoring existing group: %q, dump file: %q, error: %v", group.Name, inputFile, err)
|
||||||
} else {
|
} else {
|
||||||
err = dataprovider.AddGroup(&group, executor, ipAddress, role)
|
err = dataprovider.AddGroup(&group, executor, ipAddress, role)
|
||||||
logger.Debug(logSender, "", "adding new group: %#v, dump file: %#v, error: %v", group.Name, inputFile, err)
|
logger.Debug(logSender, "", "adding new group: %q, dump file: %q, error: %v", group.Name, inputFile, err)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore group %#v: %w", group.Name, err)
|
return fmt.Errorf("unable to restore group %q: %w", group.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -466,31 +499,31 @@ func RestoreGroups(groups []dataprovider.Group, inputFile string, mode int, exec
|
||||||
|
|
||||||
// RestoreUsers restores the specified users
|
// RestoreUsers restores the specified users
|
||||||
func RestoreUsers(users []dataprovider.User, inputFile string, mode, scanQuota int, executor, ipAddress, role string) error {
|
func RestoreUsers(users []dataprovider.User, inputFile string, mode, scanQuota int, executor, ipAddress, role string) error {
|
||||||
for _, user := range users {
|
for idx := range users {
|
||||||
user := user // pin
|
user := users[idx]
|
||||||
u, err := dataprovider.UserExists(user.Username, "")
|
u, err := dataprovider.UserExists(user.Username, "")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if mode == 1 {
|
if mode == 1 {
|
||||||
logger.Debug(logSender, "", "loaddata mode 1, existing user %#v not updated", u.Username)
|
logger.Debug(logSender, "", "loaddata mode 1, existing user %q not updated", u.Username)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
user.ID = u.ID
|
user.ID = u.ID
|
||||||
user.Username = u.Username
|
user.Username = u.Username
|
||||||
err = dataprovider.UpdateUser(&user, executor, ipAddress, role)
|
err = dataprovider.UpdateUser(&user, executor, ipAddress, role)
|
||||||
logger.Debug(logSender, "", "restoring existing user: %#v, dump file: %#v, error: %v", user.Username, inputFile, err)
|
logger.Debug(logSender, "", "restoring existing user: %q, dump file: %q, error: %v", user.Username, inputFile, err)
|
||||||
if mode == 2 && err == nil {
|
if mode == 2 && err == nil {
|
||||||
disconnectUser(user.Username, executor, role)
|
disconnectUser(user.Username, executor, role)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err = dataprovider.AddUser(&user, executor, ipAddress, role)
|
err = dataprovider.AddUser(&user, executor, ipAddress, role)
|
||||||
logger.Debug(logSender, "", "adding new user: %#v, dump file: %#v, error: %v", user.Username, inputFile, err)
|
logger.Debug(logSender, "", "adding new user: %q, dump file: %q, error: %v", user.Username, inputFile, err)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore user %#v: %w", user.Username, err)
|
return fmt.Errorf("unable to restore user %q: %w", user.Username, err)
|
||||||
}
|
}
|
||||||
if scanQuota == 1 || (scanQuota == 2 && user.HasQuotaRestrictions()) {
|
if scanQuota == 1 || (scanQuota == 2 && user.HasQuotaRestrictions()) {
|
||||||
if common.QuotaScans.AddUserQuotaScan(user.Username, user.Role) {
|
if common.QuotaScans.AddUserQuotaScan(user.Username, user.Role) {
|
||||||
logger.Debug(logSender, "", "starting quota scan for restored user: %#v", user.Username)
|
logger.Debug(logSender, "", "starting quota scan for restored user: %q", user.Username)
|
||||||
go doUserQuotaScan(user) //nolint:errcheck
|
go doUserQuotaScan(user) //nolint:errcheck
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/go-chi/render"
|
"github.com/go-chi/render"
|
||||||
|
|
||||||
|
@ -59,7 +60,7 @@ func addRole(w http.ResponseWriter, r *http.Request) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
} else {
|
} else {
|
||||||
w.Header().Add("Location", fmt.Sprintf("%s/%s", rolesPath, role.Name))
|
w.Header().Add("Location", fmt.Sprintf("%s/%s", rolesPath, url.PathEscape(role.Name)))
|
||||||
renderRole(w, r, role.Name, http.StatusCreated)
|
renderRole(w, r, role.Name, http.StatusCreated)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -112,7 +113,7 @@ func addShare(w http.ResponseWriter, r *http.Request) {
|
||||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.Header().Add("Location", fmt.Sprintf("%v/%v", userSharesPath, share.ShareID))
|
w.Header().Add("Location", fmt.Sprintf("%s/%s", userSharesPath, url.PathEscape(share.ShareID)))
|
||||||
w.Header().Add("X-Object-ID", share.ShareID)
|
w.Header().Add("X-Object-ID", share.ShareID)
|
||||||
sendAPIResponse(w, r, nil, "Share created", http.StatusCreated)
|
sendAPIResponse(w, r, nil, "Share created", http.StatusCreated)
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -114,7 +115,7 @@ func addUser(w http.ResponseWriter, r *http.Request) {
|
||||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.Header().Add("Location", fmt.Sprintf("%s/%s", userPath, user.Username))
|
w.Header().Add("Location", fmt.Sprintf("%s/%s", userPath, url.PathEscape(user.Username)))
|
||||||
renderUser(w, r, user.Username, claims.Role, http.StatusCreated)
|
renderUser(w, r, user.Username, claims.Role, http.StatusCreated)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -603,7 +603,7 @@ func updateLoginMetrics(user *dataprovider.User, loginMethod, ip string, err err
|
||||||
if errors.Is(err, util.ErrNotFound) {
|
if errors.Is(err, util.ErrNotFound) {
|
||||||
event = common.HostEventUserNotFound
|
event = common.HostEventUserNotFound
|
||||||
}
|
}
|
||||||
common.AddDefenderEvent(ip, event)
|
common.AddDefenderEvent(ip, common.ProtocolHTTP, event)
|
||||||
}
|
}
|
||||||
metric.AddLoginResult(loginMethod, err)
|
metric.AddLoginResult(loginMethod, err)
|
||||||
dataprovider.ExecutePostLoginHook(user, loginMethod, ip, protocol, err)
|
dataprovider.ExecutePostLoginHook(user, loginMethod, ip, protocol, err)
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// Package httpd implements REST API and Web interface for SFTPGo.
|
// Package httpd implements REST API and Web interface for SFTPGo.
|
||||||
// The OpenAPI 3 schema for the exposed API can be found inside the source tree:
|
// The OpenAPI 3 schema for the supported API can be found inside the source tree:
|
||||||
// https://github.com/drakkan/sftpgo/blob/main/openapi/openapi.yaml
|
// https://github.com/drakkan/sftpgo/blob/main/openapi/openapi.yaml
|
||||||
package httpd
|
package httpd
|
||||||
|
|
||||||
|
@ -93,6 +93,7 @@ const (
|
||||||
eventActionsPath = "/api/v2/eventactions"
|
eventActionsPath = "/api/v2/eventactions"
|
||||||
eventRulesPath = "/api/v2/eventrules"
|
eventRulesPath = "/api/v2/eventrules"
|
||||||
rolesPath = "/api/v2/roles"
|
rolesPath = "/api/v2/roles"
|
||||||
|
ipListsPath = "/api/v2/iplists"
|
||||||
healthzPath = "/healthz"
|
healthzPath = "/healthz"
|
||||||
robotsTxtPath = "/robots.txt"
|
robotsTxtPath = "/robots.txt"
|
||||||
webRootPathDefault = "/"
|
webRootPathDefault = "/"
|
||||||
|
@ -139,6 +140,8 @@ const (
|
||||||
webTemplateUserDefault = "/web/admin/template/user"
|
webTemplateUserDefault = "/web/admin/template/user"
|
||||||
webTemplateFolderDefault = "/web/admin/template/folder"
|
webTemplateFolderDefault = "/web/admin/template/folder"
|
||||||
webDefenderPathDefault = "/web/admin/defender"
|
webDefenderPathDefault = "/web/admin/defender"
|
||||||
|
webIPListsPathDefault = "/web/admin/ip-lists"
|
||||||
|
webIPListPathDefault = "/web/admin/ip-list"
|
||||||
webDefenderHostsPathDefault = "/web/admin/defender/hosts"
|
webDefenderHostsPathDefault = "/web/admin/defender/hosts"
|
||||||
webEventsPathDefault = "/web/admin/events"
|
webEventsPathDefault = "/web/admin/events"
|
||||||
webEventsFsSearchPathDefault = "/web/admin/events/fs"
|
webEventsFsSearchPathDefault = "/web/admin/events/fs"
|
||||||
|
@ -171,11 +174,11 @@ const (
|
||||||
webStaticFilesPathDefault = "/static"
|
webStaticFilesPathDefault = "/static"
|
||||||
webOpenAPIPathDefault = "/openapi"
|
webOpenAPIPathDefault = "/openapi"
|
||||||
// MaxRestoreSize defines the max size for the loaddata input file
|
// MaxRestoreSize defines the max size for the loaddata input file
|
||||||
MaxRestoreSize = 10485760 // 10 MB
|
MaxRestoreSize = 20 * 1048576 // 20 MB
|
||||||
maxRequestSize = 1048576 // 1MB
|
maxRequestSize = 1048576 // 1MB
|
||||||
maxLoginBodySize = 262144 // 256 KB
|
maxLoginBodySize = 262144 // 256 KB
|
||||||
httpdMaxEditFileSize = 1048576 // 1 MB
|
httpdMaxEditFileSize = 1048576 // 1 MB
|
||||||
maxMultipartMem = 10485760 // 10 MB
|
maxMultipartMem = 10 * 1048576 // 10 MB
|
||||||
osWindows = "windows"
|
osWindows = "windows"
|
||||||
otpHeaderCode = "X-SFTPGO-OTP"
|
otpHeaderCode = "X-SFTPGO-OTP"
|
||||||
mTimeHeader = "X-SFTPGO-MTIME"
|
mTimeHeader = "X-SFTPGO-MTIME"
|
||||||
|
@ -231,6 +234,8 @@ var (
|
||||||
webTemplateUser string
|
webTemplateUser string
|
||||||
webTemplateFolder string
|
webTemplateFolder string
|
||||||
webDefenderPath string
|
webDefenderPath string
|
||||||
|
webIPListPath string
|
||||||
|
webIPListsPath string
|
||||||
webEventsPath string
|
webEventsPath string
|
||||||
webEventsFsSearchPath string
|
webEventsFsSearchPath string
|
||||||
webEventsProviderSearchPath string
|
webEventsProviderSearchPath string
|
||||||
|
@ -636,6 +641,20 @@ type defenderStatus struct {
|
||||||
IsActive bool `json:"is_active"`
|
IsActive bool `json:"is_active"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type allowListStatus struct {
|
||||||
|
IsActive bool `json:"is_active"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type rateLimiters struct {
|
||||||
|
IsActive bool `json:"is_active"`
|
||||||
|
Protocols []string `json:"protocols"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetProtocolsAsString returns the enabled protocols as comma separated string
|
||||||
|
func (r *rateLimiters) GetProtocolsAsString() string {
|
||||||
|
return strings.Join(r.Protocols, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
// ServicesStatus keep the state of the running services
|
// ServicesStatus keep the state of the running services
|
||||||
type ServicesStatus struct {
|
type ServicesStatus struct {
|
||||||
SSH sftpd.ServiceStatus `json:"ssh"`
|
SSH sftpd.ServiceStatus `json:"ssh"`
|
||||||
|
@ -644,6 +663,8 @@ type ServicesStatus struct {
|
||||||
DataProvider dataprovider.ProviderStatus `json:"data_provider"`
|
DataProvider dataprovider.ProviderStatus `json:"data_provider"`
|
||||||
Defender defenderStatus `json:"defender"`
|
Defender defenderStatus `json:"defender"`
|
||||||
MFA mfa.ServiceStatus `json:"mfa"`
|
MFA mfa.ServiceStatus `json:"mfa"`
|
||||||
|
AllowList allowListStatus `json:"allow_list"`
|
||||||
|
RateLimiters rateLimiters `json:"rate_limiters"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetupConfig defines the configuration parameters for the initial web admin setup
|
// SetupConfig defines the configuration parameters for the initial web admin setup
|
||||||
|
@ -924,6 +945,7 @@ func getConfigPath(name, configDir string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getServicesStatus() *ServicesStatus {
|
func getServicesStatus() *ServicesStatus {
|
||||||
|
rtlEnabled, rtlProtocols := common.Config.GetRateLimitersStatus()
|
||||||
status := &ServicesStatus{
|
status := &ServicesStatus{
|
||||||
SSH: sftpd.GetStatus(),
|
SSH: sftpd.GetStatus(),
|
||||||
FTP: ftpd.GetStatus(),
|
FTP: ftpd.GetStatus(),
|
||||||
|
@ -933,6 +955,13 @@ func getServicesStatus() *ServicesStatus {
|
||||||
IsActive: common.Config.DefenderConfig.Enabled,
|
IsActive: common.Config.DefenderConfig.Enabled,
|
||||||
},
|
},
|
||||||
MFA: mfa.GetStatus(),
|
MFA: mfa.GetStatus(),
|
||||||
|
AllowList: allowListStatus{
|
||||||
|
IsActive: common.Config.IsAllowListEnabled(),
|
||||||
|
},
|
||||||
|
RateLimiters: rateLimiters{
|
||||||
|
IsActive: rtlEnabled,
|
||||||
|
Protocols: rtlProtocols,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
return status
|
return status
|
||||||
}
|
}
|
||||||
|
@ -1035,6 +1064,8 @@ func updateWebAdminURLs(baseURL string) {
|
||||||
webTemplateFolder = path.Join(baseURL, webTemplateFolderDefault)
|
webTemplateFolder = path.Join(baseURL, webTemplateFolderDefault)
|
||||||
webDefenderHostsPath = path.Join(baseURL, webDefenderHostsPathDefault)
|
webDefenderHostsPath = path.Join(baseURL, webDefenderHostsPathDefault)
|
||||||
webDefenderPath = path.Join(baseURL, webDefenderPathDefault)
|
webDefenderPath = path.Join(baseURL, webDefenderPathDefault)
|
||||||
|
webIPListPath = path.Join(baseURL, webIPListPathDefault)
|
||||||
|
webIPListsPath = path.Join(baseURL, webIPListsPathDefault)
|
||||||
webEventsPath = path.Join(baseURL, webEventsPathDefault)
|
webEventsPath = path.Join(baseURL, webEventsPathDefault)
|
||||||
webEventsFsSearchPath = path.Join(baseURL, webEventsFsSearchPathDefault)
|
webEventsFsSearchPath = path.Join(baseURL, webEventsFsSearchPathDefault)
|
||||||
webEventsProviderSearchPath = path.Join(baseURL, webEventsProviderSearchPathDefault)
|
webEventsProviderSearchPath = path.Join(baseURL, webEventsProviderSearchPathDefault)
|
||||||
|
|
|
@ -127,6 +127,7 @@ const (
|
||||||
eventActionsPath = "/api/v2/eventactions"
|
eventActionsPath = "/api/v2/eventactions"
|
||||||
eventRulesPath = "/api/v2/eventrules"
|
eventRulesPath = "/api/v2/eventrules"
|
||||||
rolesPath = "/api/v2/roles"
|
rolesPath = "/api/v2/roles"
|
||||||
|
ipListsPath = "/api/v2/iplists"
|
||||||
healthzPath = "/healthz"
|
healthzPath = "/healthz"
|
||||||
robotsTxtPath = "/robots.txt"
|
robotsTxtPath = "/robots.txt"
|
||||||
webBasePath = "/web"
|
webBasePath = "/web"
|
||||||
|
@ -151,6 +152,8 @@ const (
|
||||||
webTemplateUser = "/web/admin/template/user"
|
webTemplateUser = "/web/admin/template/user"
|
||||||
webTemplateFolder = "/web/admin/template/folder"
|
webTemplateFolder = "/web/admin/template/folder"
|
||||||
webDefenderPath = "/web/admin/defender"
|
webDefenderPath = "/web/admin/defender"
|
||||||
|
webIPListsPath = "/web/admin/ip-lists"
|
||||||
|
webIPListPath = "/web/admin/ip-list"
|
||||||
webAdminTwoFactorPath = "/web/admin/twofactor"
|
webAdminTwoFactorPath = "/web/admin/twofactor"
|
||||||
webAdminTwoFactorRecoveryPath = "/web/admin/twofactor-recovery"
|
webAdminTwoFactorRecoveryPath = "/web/admin/twofactor-recovery"
|
||||||
webAdminMFAPath = "/web/admin/mfa"
|
webAdminMFAPath = "/web/admin/mfa"
|
||||||
|
@ -330,12 +333,6 @@ func TestMain(m *testing.M) {
|
||||||
providerConf := config.GetProviderConf()
|
providerConf := config.GetProviderConf()
|
||||||
logger.InfoToConsole("Starting HTTPD tests, provider: %v", providerConf.Driver)
|
logger.InfoToConsole("Starting HTTPD tests, provider: %v", providerConf.Driver)
|
||||||
|
|
||||||
err = common.Initialize(config.GetCommonConfig(), 0)
|
|
||||||
if err != nil {
|
|
||||||
logger.WarnToConsole("error initializing common: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
backupsPath = filepath.Join(os.TempDir(), "test_backups")
|
backupsPath = filepath.Join(os.TempDir(), "test_backups")
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
err = os.MkdirAll(backupsPath, os.ModePerm)
|
err = os.MkdirAll(backupsPath, os.ModePerm)
|
||||||
|
@ -350,6 +347,12 @@ func TestMain(m *testing.M) {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = common.Initialize(config.GetCommonConfig(), 0)
|
||||||
|
if err != nil {
|
||||||
|
logger.WarnToConsole("error initializing common: %v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
postConnectPath = filepath.Join(homeBasePath, "postconnect.sh")
|
postConnectPath = filepath.Join(homeBasePath, "postconnect.sh")
|
||||||
preActionPath = filepath.Join(homeBasePath, "preaction.sh")
|
preActionPath = filepath.Join(homeBasePath, "preaction.sh")
|
||||||
|
|
||||||
|
@ -1281,6 +1284,206 @@ func TestGroupSettingsOverride(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBasicIPListEntriesHandling(t *testing.T) {
|
||||||
|
entry := dataprovider.IPListEntry{
|
||||||
|
IPOrNet: "::ffff:12.34.56.78",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Description: "test desc",
|
||||||
|
}
|
||||||
|
_, _, err := httpdtest.GetIPListEntry(entry.IPOrNet, -1, http.StatusBadRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, _, err = httpdtest.UpdateIPListEntry(entry, http.StatusNotFound)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, _, err = httpdtest.AddIPListEntry(entry, http.StatusCreated)
|
||||||
|
assert.Error(t, err)
|
||||||
|
// IPv4 address in IPv6 will be converted to standard IPv4
|
||||||
|
entry1, _, err := httpdtest.GetIPListEntry("12.34.56.78/32", dataprovider.IPListTypeAllowList, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
entry = dataprovider.IPListEntry{
|
||||||
|
IPOrNet: "192.168.0.0/24",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
}
|
||||||
|
entry2, _, err := httpdtest.AddIPListEntry(entry, http.StatusCreated)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// adding the same entry again should fail
|
||||||
|
_, _, err = httpdtest.AddIPListEntry(entry, http.StatusInternalServerError)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// adding an entry with an invalid IP should fail
|
||||||
|
entry.IPOrNet = "invalid"
|
||||||
|
_, _, err = httpdtest.AddIPListEntry(entry, http.StatusBadRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// adding an entry with an incompatible mode should fail
|
||||||
|
entry.IPOrNet = entry2.IPOrNet
|
||||||
|
entry.Mode = -1
|
||||||
|
_, _, err = httpdtest.AddIPListEntry(entry, http.StatusBadRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
entry.Type = -1
|
||||||
|
_, _, err = httpdtest.UpdateIPListEntry(entry, http.StatusBadRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
entry = dataprovider.IPListEntry{
|
||||||
|
IPOrNet: "2001:4860:4860::8888/120",
|
||||||
|
Type: dataprovider.IPListTypeRateLimiterSafeList,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
}
|
||||||
|
_, _, err = httpdtest.AddIPListEntry(entry, http.StatusBadRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
entry.Mode = dataprovider.ListModeAllow
|
||||||
|
_, _, err = httpdtest.AddIPListEntry(entry, http.StatusCreated)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
entry.Protocols = 3
|
||||||
|
entry3, _, err := httpdtest.UpdateIPListEntry(entry, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
entry.Mode = dataprovider.ListModeDeny
|
||||||
|
_, _, err = httpdtest.UpdateIPListEntry(entry, http.StatusBadRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
for _, tt := range []dataprovider.IPListType{dataprovider.IPListTypeAllowList, dataprovider.IPListTypeDefender, dataprovider.IPListTypeRateLimiterSafeList} {
|
||||||
|
entries, _, err := httpdtest.GetIPListEntries(tt, "", "", dataprovider.OrderASC, 0, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
if assert.Len(t, entries, 1) {
|
||||||
|
switch tt {
|
||||||
|
case dataprovider.IPListTypeAllowList:
|
||||||
|
assert.Equal(t, entry1, entries[0])
|
||||||
|
case dataprovider.IPListTypeDefender:
|
||||||
|
assert.Equal(t, entry2, entries[0])
|
||||||
|
case dataprovider.IPListTypeRateLimiterSafeList:
|
||||||
|
assert.Equal(t, entry3, entries[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err = httpdtest.GetIPListEntries(dataprovider.IPListTypeAllowList, "", "", "invalid order", 0, http.StatusBadRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, _, err = httpdtest.GetIPListEntries(-1, "", "", dataprovider.OrderASC, 0, http.StatusBadRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = httpdtest.RemoveIPListEntry(entry1, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = httpdtest.RemoveIPListEntry(entry1, http.StatusNotFound)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = httpdtest.RemoveIPListEntry(entry2, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
entry2.Type = -1
|
||||||
|
_, err = httpdtest.RemoveIPListEntry(entry2, http.StatusBadRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = httpdtest.RemoveIPListEntry(entry3, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSearchIPListEntries(t *testing.T) {
|
||||||
|
entries := []dataprovider.IPListEntry{
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.0.0/24",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.0.1/24",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.0.2/24",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.0.3/24",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 8,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "10.8.0.0/24",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "10.8.1.0/24",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 8,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "10.8.2.0/24",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, e := range entries {
|
||||||
|
_, _, err := httpdtest.AddIPListEntry(e, http.StatusCreated)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
results, _, err := httpdtest.GetIPListEntries(dataprovider.IPListTypeAllowList, "", "", dataprovider.OrderASC, 20, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
if assert.Equal(t, len(entries), len(results)) {
|
||||||
|
assert.Equal(t, "10.8.0.0/24", results[0].IPOrNet)
|
||||||
|
}
|
||||||
|
results, _, err = httpdtest.GetIPListEntries(dataprovider.IPListTypeAllowList, "", "", dataprovider.OrderDESC, 20, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
if assert.Equal(t, len(entries), len(results)) {
|
||||||
|
assert.Equal(t, "192.168.0.3/24", results[0].IPOrNet)
|
||||||
|
}
|
||||||
|
results, _, err = httpdtest.GetIPListEntries(dataprovider.IPListTypeAllowList, "", "192.168.0.1/24", dataprovider.OrderASC, 1, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
if assert.Equal(t, 1, len(results), results) {
|
||||||
|
assert.Equal(t, "192.168.0.2/24", results[0].IPOrNet)
|
||||||
|
}
|
||||||
|
results, _, err = httpdtest.GetIPListEntries(dataprovider.IPListTypeAllowList, "", "10.8.2.0/24", dataprovider.OrderDESC, 1, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
if assert.Equal(t, 1, len(results), results) {
|
||||||
|
assert.Equal(t, "10.8.1.0/24", results[0].IPOrNet)
|
||||||
|
}
|
||||||
|
results, _, err = httpdtest.GetIPListEntries(dataprovider.IPListTypeAllowList, "10.", "", dataprovider.OrderASC, 20, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, 3, len(results))
|
||||||
|
results, _, err = httpdtest.GetIPListEntries(dataprovider.IPListTypeAllowList, "192", "", dataprovider.OrderASC, 20, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, 4, len(results))
|
||||||
|
results, _, err = httpdtest.GetIPListEntries(dataprovider.IPListTypeAllowList, "1", "", dataprovider.OrderASC, 20, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, 7, len(results))
|
||||||
|
results, _, err = httpdtest.GetIPListEntries(dataprovider.IPListTypeAllowList, "108", "", dataprovider.OrderASC, 20, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, 0, len(results))
|
||||||
|
|
||||||
|
for _, e := range entries {
|
||||||
|
_, err := httpdtest.RemoveIPListEntry(e, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIPListEntriesValidation(t *testing.T) {
|
||||||
|
entry := dataprovider.IPListEntry{
|
||||||
|
IPOrNet: "::ffff:34.56.78.90/120",
|
||||||
|
Type: -1,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
}
|
||||||
|
_, resp, err := httpdtest.AddIPListEntry(entry, http.StatusBadRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Contains(t, string(resp), "invalid list type")
|
||||||
|
entry.Type = dataprovider.IPListTypeRateLimiterSafeList
|
||||||
|
_, resp, err = httpdtest.AddIPListEntry(entry, http.StatusBadRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Contains(t, string(resp), "invalid list mode")
|
||||||
|
entry.Type = dataprovider.IPListTypeDefender
|
||||||
|
_, _, err = httpdtest.AddIPListEntry(entry, http.StatusCreated)
|
||||||
|
assert.Error(t, err)
|
||||||
|
entry.IPOrNet = "34.56.78.0/24"
|
||||||
|
_, err = httpdtest.RemoveIPListEntry(entry, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestBasicActionRulesHandling(t *testing.T) {
|
func TestBasicActionRulesHandling(t *testing.T) {
|
||||||
actionName := "test action"
|
actionName := "test action"
|
||||||
a := dataprovider.BaseEventAction{
|
a := dataprovider.BaseEventAction{
|
||||||
|
@ -6503,6 +6706,8 @@ func TestProviderErrors(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, _, err = httpdtest.GetEventRules(1, 0, http.StatusInternalServerError)
|
_, _, err = httpdtest.GetEventRules(1, 0, http.StatusInternalServerError)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
_, _, err = httpdtest.GetIPListEntries(dataprovider.IPListTypeDefender, "", "", dataprovider.OrderASC, 10, http.StatusInternalServerError)
|
||||||
|
assert.NoError(t, err)
|
||||||
_, _, err = httpdtest.GetRoles(1, 0, http.StatusInternalServerError)
|
_, _, err = httpdtest.GetRoles(1, 0, http.StatusInternalServerError)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, _, err = httpdtest.UpdateRole(getTestRole(), http.StatusInternalServerError)
|
_, _, err = httpdtest.UpdateRole(getTestRole(), http.StatusInternalServerError)
|
||||||
|
@ -6696,6 +6901,22 @@ func TestProviderErrors(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, _, err = httpdtest.Loaddata(backupFilePath, "", "", http.StatusInternalServerError)
|
_, _, err = httpdtest.Loaddata(backupFilePath, "", "", http.StatusInternalServerError)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
backupData = dataprovider.BackupData{
|
||||||
|
IPLists: []dataprovider.IPListEntry{
|
||||||
|
{
|
||||||
|
IPOrNet: "192.168.1.1/24",
|
||||||
|
Type: dataprovider.IPListTypeRateLimiterSafeList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Version: dataprovider.DumpVersion,
|
||||||
|
}
|
||||||
|
backupContent, err = json.Marshal(backupData)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.WriteFile(backupFilePath, backupContent, os.ModePerm)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, _, err = httpdtest.Loaddata(backupFilePath, "", "", http.StatusInternalServerError)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
err = os.Remove(backupFilePath)
|
err = os.Remove(backupFilePath)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -6973,11 +7194,11 @@ func TestDefenderAPI(t *testing.T) {
|
||||||
_, err = httpdtest.RemoveDefenderHostByIP(ip, http.StatusNotFound)
|
_, err = httpdtest.RemoveDefenderHostByIP(ip, http.StatusNotFound)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
common.AddDefenderEvent(ip, common.HostEventNoLoginTried)
|
common.AddDefenderEvent(ip, common.ProtocolHTTP, common.HostEventNoLoginTried)
|
||||||
hosts, _, err = httpdtest.GetDefenderHosts(http.StatusOK)
|
hosts, _, err = httpdtest.GetDefenderHosts(http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, hosts, 0)
|
assert.Len(t, hosts, 0)
|
||||||
common.AddDefenderEvent(ip, common.HostEventUserNotFound)
|
common.AddDefenderEvent(ip, common.ProtocolHTTP, common.HostEventUserNotFound)
|
||||||
hosts, _, err = httpdtest.GetDefenderHosts(http.StatusOK)
|
hosts, _, err = httpdtest.GetDefenderHosts(http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
if assert.Len(t, hosts, 1) {
|
if assert.Len(t, hosts, 1) {
|
||||||
|
@ -6991,7 +7212,7 @@ func TestDefenderAPI(t *testing.T) {
|
||||||
assert.Empty(t, host.GetBanTime())
|
assert.Empty(t, host.GetBanTime())
|
||||||
assert.Equal(t, 2, host.Score)
|
assert.Equal(t, 2, host.Score)
|
||||||
|
|
||||||
common.AddDefenderEvent(ip, common.HostEventUserNotFound)
|
common.AddDefenderEvent(ip, common.ProtocolHTTP, common.HostEventUserNotFound)
|
||||||
hosts, _, err = httpdtest.GetDefenderHosts(http.StatusOK)
|
hosts, _, err = httpdtest.GetDefenderHosts(http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
if assert.Len(t, hosts, 1) {
|
if assert.Len(t, hosts, 1) {
|
||||||
|
@ -7011,8 +7232,8 @@ func TestDefenderAPI(t *testing.T) {
|
||||||
_, _, err = httpdtest.GetDefenderHostByIP(ip, http.StatusNotFound)
|
_, _, err = httpdtest.GetDefenderHostByIP(ip, http.StatusNotFound)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
common.AddDefenderEvent(ip, common.HostEventUserNotFound)
|
common.AddDefenderEvent(ip, common.ProtocolHTTP, common.HostEventUserNotFound)
|
||||||
common.AddDefenderEvent(ip, common.HostEventUserNotFound)
|
common.AddDefenderEvent(ip, common.ProtocolHTTP, common.HostEventUserNotFound)
|
||||||
hosts, _, err = httpdtest.GetDefenderHosts(http.StatusOK)
|
hosts, _, err = httpdtest.GetDefenderHosts(http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, hosts, 1)
|
assert.Len(t, hosts, 1)
|
||||||
|
@ -7289,6 +7510,13 @@ func TestLoaddata(t *testing.T) {
|
||||||
Name: group.Name,
|
Name: group.Name,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
ipListEntry := dataprovider.IPListEntry{
|
||||||
|
IPOrNet: "172.16.2.4/32",
|
||||||
|
Description: "entry desc",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
Protocols: 3,
|
||||||
|
}
|
||||||
apiKey := dataprovider.APIKey{
|
apiKey := dataprovider.APIKey{
|
||||||
Name: util.GenerateUniqueID(),
|
Name: util.GenerateUniqueID(),
|
||||||
Scope: dataprovider.APIKeyScopeAdmin,
|
Scope: dataprovider.APIKeyScopeAdmin,
|
||||||
|
@ -7361,6 +7589,7 @@ func TestLoaddata(t *testing.T) {
|
||||||
backupData.Shares = append(backupData.Shares, share)
|
backupData.Shares = append(backupData.Shares, share)
|
||||||
backupData.EventActions = append(backupData.EventActions, action)
|
backupData.EventActions = append(backupData.EventActions, action)
|
||||||
backupData.EventRules = append(backupData.EventRules, rule)
|
backupData.EventRules = append(backupData.EventRules, rule)
|
||||||
|
backupData.IPLists = append(backupData.IPLists, ipListEntry)
|
||||||
backupContent, err := json.Marshal(backupData)
|
backupContent, err := json.Marshal(backupData)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
backupFilePath := filepath.Join(backupsPath, "backup.json")
|
backupFilePath := filepath.Join(backupsPath, "backup.json")
|
||||||
|
@ -7412,6 +7641,14 @@ func TestLoaddata(t *testing.T) {
|
||||||
action, _, err = httpdtest.GetEventActionByName(action.Name, http.StatusOK)
|
action, _, err = httpdtest.GetEventActionByName(action.Name, http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
entry, _, err := httpdtest.GetIPListEntry(ipListEntry.IPOrNet, ipListEntry.Type, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Greater(t, entry.CreatedAt, int64(0))
|
||||||
|
assert.Greater(t, entry.UpdatedAt, int64(0))
|
||||||
|
assert.Equal(t, ipListEntry.Description, entry.Description)
|
||||||
|
assert.Equal(t, ipListEntry.Protocols, entry.Protocols)
|
||||||
|
assert.Equal(t, ipListEntry.Mode, entry.Mode)
|
||||||
|
|
||||||
rule, _, err = httpdtest.GetEventRuleByName(rule.Name, http.StatusOK)
|
rule, _, err = httpdtest.GetEventRuleByName(rule.Name, http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 1, rule.Status)
|
assert.Equal(t, 1, rule.Status)
|
||||||
|
@ -7493,10 +7730,12 @@ func TestLoaddata(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = httpdtest.RemoveRole(role, http.StatusOK)
|
_, err = httpdtest.RemoveRole(role, http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
_, err = httpdtest.RemoveIPListEntry(entry, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
err = os.Remove(backupFilePath)
|
err = os.Remove(backupFilePath)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = createTestFile(backupFilePath, 10485761)
|
err = createTestFile(backupFilePath, 20*1048576+1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, _, err = httpdtest.Loaddata(backupFilePath, "1", "0", http.StatusBadRequest)
|
_, _, err = httpdtest.Loaddata(backupFilePath, "1", "0", http.StatusBadRequest)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -7581,6 +7820,13 @@ func TestLoaddataMode(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
ipListEntry := dataprovider.IPListEntry{
|
||||||
|
IPOrNet: "10.8.3.9/32",
|
||||||
|
Description: "note",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
Protocols: 7,
|
||||||
|
}
|
||||||
backupData := dataprovider.BackupData{
|
backupData := dataprovider.BackupData{
|
||||||
Version: dataprovider.DumpVersion,
|
Version: dataprovider.DumpVersion,
|
||||||
}
|
}
|
||||||
|
@ -7606,6 +7852,7 @@ func TestLoaddataMode(t *testing.T) {
|
||||||
}
|
}
|
||||||
backupData.APIKeys = append(backupData.APIKeys, apiKey)
|
backupData.APIKeys = append(backupData.APIKeys, apiKey)
|
||||||
backupData.Shares = append(backupData.Shares, share)
|
backupData.Shares = append(backupData.Shares, share)
|
||||||
|
backupData.IPLists = append(backupData.IPLists, ipListEntry)
|
||||||
backupContent, _ := json.Marshal(backupData)
|
backupContent, _ := json.Marshal(backupData)
|
||||||
backupFilePath := filepath.Join(backupsPath, "backup.json")
|
backupFilePath := filepath.Join(backupsPath, "backup.json")
|
||||||
err := os.WriteFile(backupFilePath, backupContent, os.ModePerm)
|
err := os.WriteFile(backupFilePath, backupContent, os.ModePerm)
|
||||||
|
@ -7676,6 +7923,13 @@ func TestLoaddataMode(t *testing.T) {
|
||||||
rule, _, err = httpdtest.UpdateEventRule(rule, http.StatusOK)
|
rule, _, err = httpdtest.UpdateEventRule(rule, http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
entry, _, err := httpdtest.GetIPListEntry(ipListEntry.IPOrNet, ipListEntry.Type, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
oldEntryDesc := entry.Description
|
||||||
|
entry.Description = "new note"
|
||||||
|
entry, _, err = httpdtest.UpdateIPListEntry(entry, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
backupData.Folders = []vfs.BaseVirtualFolder{
|
backupData.Folders = []vfs.BaseVirtualFolder{
|
||||||
{
|
{
|
||||||
MappedPath: mappedPath,
|
MappedPath: mappedPath,
|
||||||
|
@ -7700,6 +7954,9 @@ func TestLoaddataMode(t *testing.T) {
|
||||||
rule, _, err = httpdtest.GetEventRuleByName(rule.Name, http.StatusOK)
|
rule, _, err = httpdtest.GetEventRuleByName(rule.Name, http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotEqual(t, oldRuleDesc, rule.Description)
|
assert.NotEqual(t, oldRuleDesc, rule.Description)
|
||||||
|
entry, _, err = httpdtest.GetIPListEntry(ipListEntry.IPOrNet, ipListEntry.Type, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotEqual(t, oldEntryDesc, entry.Description)
|
||||||
|
|
||||||
c := common.NewBaseConnection("connID", common.ProtocolFTP, "", "", user)
|
c := common.NewBaseConnection("connID", common.ProtocolFTP, "", "", user)
|
||||||
fakeConn := &fakeConnection{
|
fakeConn := &fakeConnection{
|
||||||
|
@ -7757,6 +8014,8 @@ func TestLoaddataMode(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = httpdtest.RemoveRole(role, http.StatusOK)
|
_, err = httpdtest.RemoveRole(role, http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
_, err = httpdtest.RemoveIPListEntry(entry, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
err = os.Remove(backupFilePath)
|
err = os.Remove(backupFilePath)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
@ -7946,6 +8205,48 @@ func TestAddRoleInvalidJsonMock(t *testing.T) {
|
||||||
checkResponseCode(t, http.StatusBadRequest, rr)
|
checkResponseCode(t, http.StatusBadRequest, rr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIPListEntriesErrorsMock(t *testing.T) {
|
||||||
|
token, err := getJWTAPITokenFromTestServer(defaultTokenAuthUser, defaultTokenAuthPass)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req, err := http.NewRequest(http.MethodGet, ipListsPath+"/a/b", nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setBearerForReq(req, token)
|
||||||
|
rr := executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusBadRequest, rr)
|
||||||
|
assert.Contains(t, rr.Body.String(), "invalid list type")
|
||||||
|
req, err = http.NewRequest(http.MethodGet, ipListsPath+"/invalid", nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setBearerForReq(req, token)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusBadRequest, rr)
|
||||||
|
assert.Contains(t, rr.Body.String(), "invalid list type")
|
||||||
|
|
||||||
|
reqBody := bytes.NewBuffer([]byte("{"))
|
||||||
|
req, err = http.NewRequest(http.MethodPost, ipListsPath+"/2", reqBody)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setBearerForReq(req, token)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusBadRequest, rr)
|
||||||
|
|
||||||
|
entry := dataprovider.IPListEntry{
|
||||||
|
IPOrNet: "172.120.1.1/32",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 0,
|
||||||
|
}
|
||||||
|
_, _, err = httpdtest.AddIPListEntry(entry, http.StatusCreated)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodPut, path.Join(ipListsPath, "1", url.PathEscape(entry.IPOrNet)), reqBody)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setBearerForReq(req, token)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusBadRequest, rr)
|
||||||
|
|
||||||
|
_, err = httpdtest.RemoveIPListEntry(entry, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestRoleErrorsMock(t *testing.T) {
|
func TestRoleErrorsMock(t *testing.T) {
|
||||||
token, err := getJWTAPITokenFromTestServer(defaultTokenAuthUser, defaultTokenAuthPass)
|
token, err := getJWTAPITokenFromTestServer(defaultTokenAuthUser, defaultTokenAuthPass)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -15903,23 +16204,38 @@ func TestWebAdminSetupMock(t *testing.T) {
|
||||||
os.Setenv("SFTPGO_DATA_PROVIDER__CREATE_DEFAULT_ADMIN", "1")
|
os.Setenv("SFTPGO_DATA_PROVIDER__CREATE_DEFAULT_ADMIN", "1")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWhitelist(t *testing.T) {
|
func TestAllowList(t *testing.T) {
|
||||||
configCopy := common.Config
|
configCopy := common.Config
|
||||||
|
|
||||||
common.Config.MaxTotalConnections = 1
|
entries := []dataprovider.IPListEntry{
|
||||||
wlFile := filepath.Join(os.TempDir(), "wl.json")
|
{
|
||||||
common.Config.WhiteListFile = wlFile
|
IPOrNet: "172.120.1.1/32",
|
||||||
wl := common.HostListFile{
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
IPAddresses: []string{"172.120.1.1", "172.120.1.2"},
|
Mode: dataprovider.ListModeAllow,
|
||||||
CIDRNetworks: []string{"192.8.7.0/22"},
|
Protocols: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "172.120.1.2/32",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPOrNet: "192.8.7.0/22",
|
||||||
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
|
Mode: dataprovider.ListModeAllow,
|
||||||
|
Protocols: 8,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
data, err := json.Marshal(wl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(wlFile, data, 0664)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
defer os.Remove(wlFile)
|
|
||||||
|
|
||||||
err = common.Initialize(common.Config, 0)
|
for _, e := range entries {
|
||||||
|
_, _, err := httpdtest.AddIPListEntry(e, http.StatusCreated)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
common.Config.MaxTotalConnections = 1
|
||||||
|
common.Config.AllowListStatus = 1
|
||||||
|
err := common.Initialize(common.Config, 0)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, webLoginPath, nil)
|
req, _ := http.NewRequest(http.MethodGet, webLoginPath, nil)
|
||||||
|
@ -15931,7 +16247,8 @@ func TestWhitelist(t *testing.T) {
|
||||||
rr = executeRequest(req)
|
rr = executeRequest(req)
|
||||||
checkResponseCode(t, http.StatusOK, rr)
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
|
||||||
req.RemoteAddr = "172.120.1.3"
|
testIP := "172.120.1.3"
|
||||||
|
req.RemoteAddr = testIP
|
||||||
rr = executeRequest(req)
|
rr = executeRequest(req)
|
||||||
checkResponseCode(t, http.StatusForbidden, rr)
|
checkResponseCode(t, http.StatusForbidden, rr)
|
||||||
assert.Contains(t, rr.Body.String(), common.ErrConnectionDenied.Error())
|
assert.Contains(t, rr.Body.String(), common.ErrConnectionDenied.Error())
|
||||||
|
@ -15940,21 +16257,35 @@ func TestWhitelist(t *testing.T) {
|
||||||
rr = executeRequest(req)
|
rr = executeRequest(req)
|
||||||
checkResponseCode(t, http.StatusOK, rr)
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
|
||||||
wl.IPAddresses = append(wl.IPAddresses, "172.120.1.3")
|
entry := dataprovider.IPListEntry{
|
||||||
data, err = json.Marshal(wl)
|
IPOrNet: "172.120.1.3/32",
|
||||||
assert.NoError(t, err)
|
Type: dataprovider.IPListTypeAllowList,
|
||||||
err = os.WriteFile(wlFile, data, 0664)
|
Mode: dataprovider.ListModeAllow,
|
||||||
assert.NoError(t, err)
|
Protocols: 8,
|
||||||
err = common.Reload()
|
}
|
||||||
|
err = dataprovider.AddIPListEntry(&entry, "", "", "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
req.RemoteAddr = "172.120.1.3"
|
req.RemoteAddr = testIP
|
||||||
rr = executeRequest(req)
|
rr = executeRequest(req)
|
||||||
checkResponseCode(t, http.StatusOK, rr)
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
|
||||||
|
err = dataprovider.DeleteIPListEntry(entry.IPOrNet, entry.Type, "", "", "")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
req.RemoteAddr = testIP
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusForbidden, rr)
|
||||||
|
assert.Contains(t, rr.Body.String(), common.ErrConnectionDenied.Error())
|
||||||
|
|
||||||
common.Config = configCopy
|
common.Config = configCopy
|
||||||
err = common.Initialize(common.Config, 0)
|
err = common.Initialize(common.Config, 0)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
for _, e := range entries {
|
||||||
|
_, err := httpdtest.RemoveIPListEntry(e, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWebAdminLoginMock(t *testing.T) {
|
func TestWebAdminLoginMock(t *testing.T) {
|
||||||
|
@ -17178,7 +17509,7 @@ func TestRenderDefenderPageMock(t *testing.T) {
|
||||||
setJWTCookieForReq(req, token)
|
setJWTCookieForReq(req, token)
|
||||||
rr := executeRequest(req)
|
rr := executeRequest(req)
|
||||||
checkResponseCode(t, http.StatusOK, rr)
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
assert.Contains(t, rr.Body.String(), "View and manage blocklist")
|
assert.Contains(t, rr.Body.String(), "View and manage auto blocklist")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWebAdminBasicMock(t *testing.T) {
|
func TestWebAdminBasicMock(t *testing.T) {
|
||||||
|
@ -20876,6 +21207,192 @@ func TestWebEventRule(t *testing.T) {
|
||||||
checkResponseCode(t, http.StatusOK, rr)
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestWebIPListEntries(t *testing.T) {
|
||||||
|
webToken, err := getJWTWebTokenFromTestServer(defaultTokenAuthUser, defaultTokenAuthPass)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
csrfToken, err := getCSRFToken(httpBaseURL + webLoginPath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodGet, webIPListPath+"/mode", nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr := executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusBadRequest, rr)
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, webIPListPath+"/mode/a", nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusBadRequest, rr)
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, webIPListPath+"/1/a", nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusNotFound, rr)
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, webIPListPath+"/1", nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, webIPListsPath, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
|
||||||
|
entry := dataprovider.IPListEntry{
|
||||||
|
IPOrNet: "12.34.56.78/20",
|
||||||
|
Type: dataprovider.IPListTypeDefender,
|
||||||
|
Mode: dataprovider.ListModeDeny,
|
||||||
|
Description: "note",
|
||||||
|
Protocols: 5,
|
||||||
|
}
|
||||||
|
form := make(url.Values)
|
||||||
|
form.Set("ipornet", entry.IPOrNet)
|
||||||
|
form.Set("description", entry.Description)
|
||||||
|
form.Set("mode", "a")
|
||||||
|
req, err = http.NewRequest(http.MethodPost, webIPListPath+"/mode", bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusBadRequest, rr)
|
||||||
|
assert.Contains(t, rr.Body.String(), "invalid list type")
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodPost, webIPListPath+"/1", bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusForbidden, rr)
|
||||||
|
assert.Contains(t, rr.Body.String(), "unable to verify form token")
|
||||||
|
|
||||||
|
form.Set(csrfFormToken, csrfToken)
|
||||||
|
req, err = http.NewRequest(http.MethodPost, webIPListPath+"/2", bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
assert.Contains(t, rr.Body.String(), "invalid mode")
|
||||||
|
|
||||||
|
form.Set("mode", "2")
|
||||||
|
form.Set("protocols", "a")
|
||||||
|
form.Add("protocols", "1")
|
||||||
|
form.Add("protocols", "4")
|
||||||
|
req, err = http.NewRequest(http.MethodPost, webIPListPath+"/2", bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusSeeOther, rr)
|
||||||
|
|
||||||
|
entry1, _, err := httpdtest.GetIPListEntry(entry.IPOrNet, dataprovider.IPListTypeDefender, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, entry.Description, entry1.Description)
|
||||||
|
assert.Equal(t, entry.Mode, entry1.Mode)
|
||||||
|
assert.Equal(t, entry.Protocols, entry1.Protocols)
|
||||||
|
|
||||||
|
form.Set("ipornet", "1111.11.11.11")
|
||||||
|
req, err = http.NewRequest(http.MethodPost, webIPListPath+"/1", bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
assert.Contains(t, rr.Body.String(), "invalid IP")
|
||||||
|
|
||||||
|
form.Set("ipornet", entry.IPOrNet)
|
||||||
|
form.Set("mode", "invalid") // ignored for list type 1
|
||||||
|
req, err = http.NewRequest(http.MethodPost, webIPListPath+"/1", bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusSeeOther, rr)
|
||||||
|
|
||||||
|
entry2, _, err := httpdtest.GetIPListEntry(entry.IPOrNet, dataprovider.IPListTypeAllowList, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, entry.Description, entry2.Description)
|
||||||
|
assert.Equal(t, dataprovider.ListModeAllow, entry2.Mode)
|
||||||
|
assert.Equal(t, entry.Protocols, entry2.Protocols)
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, webIPListPath+"/1/"+url.PathEscape(entry2.IPOrNet), nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
|
||||||
|
form.Set("protocols", "1")
|
||||||
|
req, err = http.NewRequest(http.MethodPost, webIPListPath+"/1/"+url.PathEscape(entry.IPOrNet),
|
||||||
|
bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusSeeOther, rr)
|
||||||
|
entry2, _, err = httpdtest.GetIPListEntry(entry.IPOrNet, dataprovider.IPListTypeAllowList, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, entry.Description, entry2.Description)
|
||||||
|
assert.Equal(t, dataprovider.ListModeAllow, entry2.Mode)
|
||||||
|
assert.Equal(t, 1, entry2.Protocols)
|
||||||
|
|
||||||
|
form.Del(csrfFormToken)
|
||||||
|
req, err = http.NewRequest(http.MethodPost, webIPListPath+"/1/"+url.PathEscape(entry.IPOrNet),
|
||||||
|
bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusForbidden, rr)
|
||||||
|
assert.Contains(t, rr.Body.String(), "unable to verify form token")
|
||||||
|
|
||||||
|
form.Set(csrfFormToken, csrfToken)
|
||||||
|
req, err = http.NewRequest(http.MethodPost, webIPListPath+"/a/"+url.PathEscape(entry.IPOrNet),
|
||||||
|
bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusBadRequest, rr)
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodPost, webIPListPath+"/1/"+url.PathEscape(entry.IPOrNet)+"a",
|
||||||
|
bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusNotFound, rr)
|
||||||
|
|
||||||
|
form.Set("mode", "a")
|
||||||
|
req, err = http.NewRequest(http.MethodPost, webIPListPath+"/2/"+url.PathEscape(entry.IPOrNet),
|
||||||
|
bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
assert.Contains(t, rr.Body.String(), "invalid mode")
|
||||||
|
|
||||||
|
form.Set("mode", "100")
|
||||||
|
req, err = http.NewRequest(http.MethodPost, webIPListPath+"/2/"+url.PathEscape(entry.IPOrNet),
|
||||||
|
bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
setJWTCookieForReq(req, webToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
assert.Contains(t, rr.Body.String(), "invalid list mode")
|
||||||
|
|
||||||
|
_, err = httpdtest.RemoveIPListEntry(entry1, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = httpdtest.RemoveIPListEntry(entry2, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestWebRole(t *testing.T) {
|
func TestWebRole(t *testing.T) {
|
||||||
webToken, err := getJWTWebTokenFromTestServer(defaultTokenAuthUser, defaultTokenAuthPass)
|
webToken, err := getJWTWebTokenFromTestServer(defaultTokenAuthUser, defaultTokenAuthPass)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -22324,6 +22841,18 @@ func TestProviderClosedMock(t *testing.T) {
|
||||||
rr = executeRequest(req)
|
rr = executeRequest(req)
|
||||||
checkResponseCode(t, http.StatusInternalServerError, rr)
|
checkResponseCode(t, http.StatusInternalServerError, rr)
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, webIPListPath+"/1/a", nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setJWTCookieForReq(req, token)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusInternalServerError, rr)
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodPost, webIPListPath+"/1/a", nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setJWTCookieForReq(req, token)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusInternalServerError, rr)
|
||||||
|
|
||||||
req, err = http.NewRequest(http.MethodGet, webAdminRolesPath, nil)
|
req, err = http.NewRequest(http.MethodGet, webAdminRolesPath, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
setJWTCookieForReq(req, token)
|
setJWTCookieForReq(req, token)
|
||||||
|
@ -22387,12 +22916,31 @@ func TestWebConnectionsMock(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetWebStatusMock(t *testing.T) {
|
func TestGetWebStatusMock(t *testing.T) {
|
||||||
|
oldConfig := config.GetCommonConfig()
|
||||||
|
|
||||||
|
cfg := config.GetCommonConfig()
|
||||||
|
cfg.RateLimitersConfig = []common.RateLimiterConfig{
|
||||||
|
{
|
||||||
|
Average: 1,
|
||||||
|
Period: 1000,
|
||||||
|
Burst: 1,
|
||||||
|
Type: 1,
|
||||||
|
Protocols: []string{common.ProtocolFTP},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := common.Initialize(cfg, 0)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
token, err := getJWTWebTokenFromTestServer(defaultTokenAuthUser, defaultTokenAuthPass)
|
token, err := getJWTWebTokenFromTestServer(defaultTokenAuthUser, defaultTokenAuthPass)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
req, _ := http.NewRequest(http.MethodGet, webStatusPath, nil)
|
req, _ := http.NewRequest(http.MethodGet, webStatusPath, nil)
|
||||||
setJWTCookieForReq(req, token)
|
setJWTCookieForReq(req, token)
|
||||||
rr := executeRequest(req)
|
rr := executeRequest(req)
|
||||||
checkResponseCode(t, http.StatusOK, rr)
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
|
||||||
|
err = common.Initialize(oldConfig, 0)
|
||||||
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStaticFilesMock(t *testing.T) {
|
func TestStaticFilesMock(t *testing.T) {
|
||||||
|
|
|
@ -756,6 +756,21 @@ func TestInvalidToken(t *testing.T) {
|
||||||
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
||||||
assert.Contains(t, rr.Body.String(), "Invalid token claims")
|
assert.Contains(t, rr.Body.String(), "Invalid token claims")
|
||||||
|
|
||||||
|
rr = httptest.NewRecorder()
|
||||||
|
addIPListEntry(rr, req)
|
||||||
|
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
||||||
|
assert.Contains(t, rr.Body.String(), "Invalid token claims")
|
||||||
|
|
||||||
|
rr = httptest.NewRecorder()
|
||||||
|
updateIPListEntry(rr, req)
|
||||||
|
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
||||||
|
assert.Contains(t, rr.Body.String(), "Invalid token claims")
|
||||||
|
|
||||||
|
rr = httptest.NewRecorder()
|
||||||
|
deleteIPListEntry(rr, req)
|
||||||
|
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
||||||
|
assert.Contains(t, rr.Body.String(), "Invalid token claims")
|
||||||
|
|
||||||
rr = httptest.NewRecorder()
|
rr = httptest.NewRecorder()
|
||||||
server.handleGetWebUsers(rr, req)
|
server.handleGetWebUsers(rr, req)
|
||||||
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
||||||
|
@ -811,6 +826,11 @@ func TestInvalidToken(t *testing.T) {
|
||||||
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
||||||
assert.Contains(t, rr.Body.String(), "invalid token claims")
|
assert.Contains(t, rr.Body.String(), "invalid token claims")
|
||||||
|
|
||||||
|
rr = httptest.NewRecorder()
|
||||||
|
server.handleWebUpdateIPListEntryPost(rr, req)
|
||||||
|
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
||||||
|
assert.Contains(t, rr.Body.String(), "invalid token claims")
|
||||||
|
|
||||||
rr = httptest.NewRecorder()
|
rr = httptest.NewRecorder()
|
||||||
server.handleWebClientTwoFactorRecoveryPost(rr, req)
|
server.handleWebClientTwoFactorRecoveryPost(rr, req)
|
||||||
assert.Equal(t, http.StatusNotFound, rr.Code)
|
assert.Equal(t, http.StatusNotFound, rr.Code)
|
||||||
|
@ -826,6 +846,22 @@ func TestInvalidToken(t *testing.T) {
|
||||||
rr = httptest.NewRecorder()
|
rr = httptest.NewRecorder()
|
||||||
server.handleWebAdminTwoFactorPost(rr, req)
|
server.handleWebAdminTwoFactorPost(rr, req)
|
||||||
assert.Equal(t, http.StatusNotFound, rr.Code)
|
assert.Equal(t, http.StatusNotFound, rr.Code)
|
||||||
|
|
||||||
|
rr = httptest.NewRecorder()
|
||||||
|
server.handleWebUpdateIPListEntryPost(rr, req)
|
||||||
|
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
||||||
|
assert.Contains(t, rr.Body.String(), "invalid token claims")
|
||||||
|
|
||||||
|
form := make(url.Values)
|
||||||
|
req, _ = http.NewRequest(http.MethodPost, webIPListPath+"/1", bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
rctx = chi.NewRouteContext()
|
||||||
|
rctx.URLParams.Add("type", "1")
|
||||||
|
req = req.WithContext(context.WithValue(req.Context(), chi.RouteCtxKey, rctx))
|
||||||
|
rr = httptest.NewRecorder()
|
||||||
|
server.handleWebAddIPListEntryPost(rr, req)
|
||||||
|
assert.Equal(t, http.StatusBadRequest, rr.Code, rr.Body.String())
|
||||||
|
assert.Contains(t, rr.Body.String(), "invalid token claims")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateWebAdminInvalidClaims(t *testing.T) {
|
func TestUpdateWebAdminInvalidClaims(t *testing.T) {
|
||||||
|
@ -1046,6 +1082,11 @@ func TestCreateTokenError(t *testing.T) {
|
||||||
_, err = getEventRuleFromPostFields(req)
|
_, err = getEventRuleFromPostFields(req)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
req, _ = http.NewRequest(http.MethodPost, webIPListPath+"/1?a=a%C3%AO%GG", nil)
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
_, err = getIPListEntryFromPostFields(req, dataprovider.IPListTypeAllowList)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
req, _ = http.NewRequest(http.MethodPost, webClientLoginPath+"?a=a%C3%AO%GG", bytes.NewBuffer([]byte(form.Encode())))
|
req, _ = http.NewRequest(http.MethodPost, webClientLoginPath+"?a=a%C3%AO%GG", bytes.NewBuffer([]byte(form.Encode())))
|
||||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
rr = httptest.NewRecorder()
|
rr = httptest.NewRecorder()
|
||||||
|
@ -1135,7 +1176,6 @@ func TestCreateTokenError(t *testing.T) {
|
||||||
assert.Contains(t, rr.Body.String(), "invalid URL escape")
|
assert.Contains(t, rr.Body.String(), "invalid URL escape")
|
||||||
|
|
||||||
req, _ = http.NewRequest(http.MethodPost, webChangeClientPwdPath+"?a=a%K3%AO%GA", bytes.NewBuffer([]byte(form.Encode())))
|
req, _ = http.NewRequest(http.MethodPost, webChangeClientPwdPath+"?a=a%K3%AO%GA", bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
|
||||||
_, err = getShareFromPostFields(req)
|
_, err = getShareFromPostFields(req)
|
||||||
if assert.Error(t, err) {
|
if assert.Error(t, err) {
|
||||||
assert.Contains(t, err.Error(), "invalid URL escape")
|
assert.Contains(t, err.Error(), "invalid URL escape")
|
||||||
|
|
|
@ -1038,12 +1038,12 @@ func (s *httpdServer) checkConnection(next http.Handler) http.Handler {
|
||||||
common.Connections.AddClientConnection(ipAddr)
|
common.Connections.AddClientConnection(ipAddr)
|
||||||
defer common.Connections.RemoveClientConnection(ipAddr)
|
defer common.Connections.RemoveClientConnection(ipAddr)
|
||||||
|
|
||||||
if err := common.Connections.IsNewConnectionAllowed(ipAddr); err != nil {
|
if err := common.Connections.IsNewConnectionAllowed(ipAddr, common.ProtocolHTTP); err != nil {
|
||||||
logger.Log(logger.LevelDebug, common.ProtocolHTTP, "", "connection not allowed from ip %q: %v", ipAddr, err)
|
logger.Log(logger.LevelDebug, common.ProtocolHTTP, "", "connection not allowed from ip %q: %v", ipAddr, err)
|
||||||
s.sendForbiddenResponse(w, r, err.Error())
|
s.sendForbiddenResponse(w, r, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if common.IsBanned(ipAddr) {
|
if common.IsBanned(ipAddr, common.ProtocolHTTP) {
|
||||||
s.sendForbiddenResponse(w, r, "your IP address is banned")
|
s.sendForbiddenResponse(w, r, "your IP address is banned")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1189,7 +1189,7 @@ func (s *httpdServer) initializeRouter() {
|
||||||
})
|
})
|
||||||
|
|
||||||
if s.enableRESTAPI {
|
if s.enableRESTAPI {
|
||||||
// share API exposed to external users
|
// share API available to external users
|
||||||
s.router.Get(sharesPath+"/{id}", s.downloadFromShare)
|
s.router.Get(sharesPath+"/{id}", s.downloadFromShare)
|
||||||
s.router.Post(sharesPath+"/{id}", s.uploadFilesToShare)
|
s.router.Post(sharesPath+"/{id}", s.uploadFilesToShare)
|
||||||
s.router.Post(sharesPath+"/{id}/{name}", s.uploadFileToShare)
|
s.router.Post(sharesPath+"/{id}/{name}", s.uploadFileToShare)
|
||||||
|
@ -1304,10 +1304,15 @@ func (s *httpdServer) initializeRouter() {
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminManageEventRules)).Delete(eventRulesPath+"/{name}", deleteEventRule)
|
router.With(s.checkPerm(dataprovider.PermAdminManageEventRules)).Delete(eventRulesPath+"/{name}", deleteEventRule)
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminManageEventRules)).Post(eventRulesPath+"/run/{name}", runOnDemandRule)
|
router.With(s.checkPerm(dataprovider.PermAdminManageEventRules)).Post(eventRulesPath+"/run/{name}", runOnDemandRule)
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminManageRoles)).Get(rolesPath, getRoles)
|
router.With(s.checkPerm(dataprovider.PermAdminManageRoles)).Get(rolesPath, getRoles)
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminManageRoles)).Get(rolesPath+"/{name}", getRoleByName)
|
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminManageRoles)).Post(rolesPath, addRole)
|
router.With(s.checkPerm(dataprovider.PermAdminManageRoles)).Post(rolesPath, addRole)
|
||||||
|
router.With(s.checkPerm(dataprovider.PermAdminManageRoles)).Get(rolesPath+"/{name}", getRoleByName)
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminManageRoles)).Put(rolesPath+"/{name}", updateRole)
|
router.With(s.checkPerm(dataprovider.PermAdminManageRoles)).Put(rolesPath+"/{name}", updateRole)
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminManageRoles)).Delete(rolesPath+"/{name}", deleteRole)
|
router.With(s.checkPerm(dataprovider.PermAdminManageRoles)).Delete(rolesPath+"/{name}", deleteRole)
|
||||||
|
router.With(s.checkPerm(dataprovider.PermAdminManageIPLists), compressor.Handler).Get(ipListsPath+"/{type}", getIPListEntries)
|
||||||
|
router.With(s.checkPerm(dataprovider.PermAdminManageIPLists)).Post(ipListsPath+"/{type}", addIPListEntry)
|
||||||
|
router.With(s.checkPerm(dataprovider.PermAdminManageIPLists)).Get(ipListsPath+"/{type}/{ipornet}", getIPListEntry)
|
||||||
|
router.With(s.checkPerm(dataprovider.PermAdminManageIPLists)).Put(ipListsPath+"/{type}/{ipornet}", updateIPListEntry)
|
||||||
|
router.With(s.checkPerm(dataprovider.PermAdminManageIPLists)).Delete(ipListsPath+"/{type}/{ipornet}", deleteIPListEntry)
|
||||||
})
|
})
|
||||||
|
|
||||||
s.router.Get(userTokenPath, s.getUserToken)
|
s.router.Get(userTokenPath, s.getUserToken)
|
||||||
|
@ -1441,7 +1446,7 @@ func (s *httpdServer) setupWebClientRoutes() {
|
||||||
s.jwtAuthenticatorPartial(tokenAudienceWebClientPartial)).
|
s.jwtAuthenticatorPartial(tokenAudienceWebClientPartial)).
|
||||||
Post(webClientTwoFactorRecoveryPath, s.handleWebClientTwoFactorRecoveryPost)
|
Post(webClientTwoFactorRecoveryPath, s.handleWebClientTwoFactorRecoveryPost)
|
||||||
}
|
}
|
||||||
// share routes exposed to external users
|
// share routes available to external users
|
||||||
s.router.Get(webClientPubSharesPath+"/{id}", s.downloadFromShare)
|
s.router.Get(webClientPubSharesPath+"/{id}", s.downloadFromShare)
|
||||||
s.router.Get(webClientPubSharesPath+"/{id}/partial", s.handleClientSharePartialDownload)
|
s.router.Get(webClientPubSharesPath+"/{id}/partial", s.handleClientSharePartialDownload)
|
||||||
s.router.Get(webClientPubSharesPath+"/{id}/browse", s.handleShareGetFiles)
|
s.router.Get(webClientPubSharesPath+"/{id}/browse", s.handleShareGetFiles)
|
||||||
|
@ -1677,6 +1682,19 @@ func (s *httpdServer) setupWebAdminRoutes() {
|
||||||
Get(webEventsFsSearchPath, searchFsEvents)
|
Get(webEventsFsSearchPath, searchFsEvents)
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminViewEvents), compressor.Handler, s.refreshCookie).
|
router.With(s.checkPerm(dataprovider.PermAdminViewEvents), compressor.Handler, s.refreshCookie).
|
||||||
Get(webEventsProviderSearchPath, searchProviderEvents)
|
Get(webEventsProviderSearchPath, searchProviderEvents)
|
||||||
|
router.With(s.checkPerm(dataprovider.PermAdminManageIPLists)).Get(webIPListsPath, s.handleWebIPListsPage)
|
||||||
|
router.With(s.checkPerm(dataprovider.PermAdminManageIPLists), compressor.Handler, s.refreshCookie).
|
||||||
|
Get(webIPListsPath+"/{type}", getIPListEntries)
|
||||||
|
router.With(s.checkPerm(dataprovider.PermAdminManageIPLists)).Get(webIPListPath+"/{type}",
|
||||||
|
s.handleWebAddIPListEntryGet)
|
||||||
|
router.With(s.checkPerm(dataprovider.PermAdminManageIPLists)).Post(webIPListPath+"/{type}",
|
||||||
|
s.handleWebAddIPListEntryPost)
|
||||||
|
router.With(s.checkPerm(dataprovider.PermAdminManageIPLists)).Get(webIPListPath+"/{type}/{ipornet}",
|
||||||
|
s.handleWebUpdateIPListEntryGet)
|
||||||
|
router.With(s.checkPerm(dataprovider.PermAdminManageIPLists)).Post(webIPListPath+"/{type}/{ipornet}",
|
||||||
|
s.handleWebUpdateIPListEntryPost)
|
||||||
|
router.With(s.checkPerm(dataprovider.PermAdminManageIPLists), verifyCSRFHeader).
|
||||||
|
Delete(webIPListPath+"/{type}/{ipornet}", deleteIPListEntry)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,6 +92,8 @@ const (
|
||||||
templateStatus = "status.html"
|
templateStatus = "status.html"
|
||||||
templateLogin = "login.html"
|
templateLogin = "login.html"
|
||||||
templateDefender = "defender.html"
|
templateDefender = "defender.html"
|
||||||
|
templateIPLists = "iplists.html"
|
||||||
|
templateIPList = "iplist.html"
|
||||||
templateProfile = "profile.html"
|
templateProfile = "profile.html"
|
||||||
templateChangePwd = "changepassword.html"
|
templateChangePwd = "changepassword.html"
|
||||||
templateMaintenance = "maintenance.html"
|
templateMaintenance = "maintenance.html"
|
||||||
|
@ -109,7 +111,8 @@ const (
|
||||||
pageProfileTitle = "My profile"
|
pageProfileTitle = "My profile"
|
||||||
pageChangePwdTitle = "Change password"
|
pageChangePwdTitle = "Change password"
|
||||||
pageMaintenanceTitle = "Maintenance"
|
pageMaintenanceTitle = "Maintenance"
|
||||||
pageDefenderTitle = "Defender"
|
pageDefenderTitle = "Auto Blocklist"
|
||||||
|
pageIPListsTitle = "IP Lists"
|
||||||
pageEventsTitle = "Logs"
|
pageEventsTitle = "Logs"
|
||||||
pageForgotPwdTitle = "SFTPGo Admin - Forgot password"
|
pageForgotPwdTitle = "SFTPGo Admin - Forgot password"
|
||||||
pageResetPwdTitle = "SFTPGo Admin - Reset password"
|
pageResetPwdTitle = "SFTPGo Admin - Reset password"
|
||||||
|
@ -138,6 +141,8 @@ type basePage struct {
|
||||||
FolderURL string
|
FolderURL string
|
||||||
FolderTemplateURL string
|
FolderTemplateURL string
|
||||||
DefenderURL string
|
DefenderURL string
|
||||||
|
IPListsURL string
|
||||||
|
IPListURL string
|
||||||
EventsURL string
|
EventsURL string
|
||||||
LogoutURL string
|
LogoutURL string
|
||||||
ProfileURL string
|
ProfileURL string
|
||||||
|
@ -164,10 +169,12 @@ type basePage struct {
|
||||||
StatusTitle string
|
StatusTitle string
|
||||||
MaintenanceTitle string
|
MaintenanceTitle string
|
||||||
DefenderTitle string
|
DefenderTitle string
|
||||||
|
IPListsTitle string
|
||||||
EventsTitle string
|
EventsTitle string
|
||||||
Version string
|
Version string
|
||||||
CSRFToken string
|
CSRFToken string
|
||||||
IsEventManagerPage bool
|
IsEventManagerPage bool
|
||||||
|
IsIPManagerPage bool
|
||||||
HasDefender bool
|
HasDefender bool
|
||||||
HasSearcher bool
|
HasSearcher bool
|
||||||
HasExternalLogin bool
|
HasExternalLogin bool
|
||||||
|
@ -292,6 +299,21 @@ type defenderHostsPage struct {
|
||||||
DefenderHostsURL string
|
DefenderHostsURL string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ipListsPage struct {
|
||||||
|
basePage
|
||||||
|
IPListsSearchURL string
|
||||||
|
RateLimitersStatus bool
|
||||||
|
RateLimitersProtocols string
|
||||||
|
IsAllowListEnabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type ipListPage struct {
|
||||||
|
basePage
|
||||||
|
Entry *dataprovider.IPListEntry
|
||||||
|
Error string
|
||||||
|
Mode genericPageMode
|
||||||
|
}
|
||||||
|
|
||||||
type setupPage struct {
|
type setupPage struct {
|
||||||
basePage
|
basePage
|
||||||
Username string
|
Username string
|
||||||
|
@ -479,6 +501,16 @@ func loadAdminTemplates(templatesPath string) {
|
||||||
filepath.Join(templatesPath, templateAdminDir, templateBase),
|
filepath.Join(templatesPath, templateAdminDir, templateBase),
|
||||||
filepath.Join(templatesPath, templateAdminDir, templateDefender),
|
filepath.Join(templatesPath, templateAdminDir, templateDefender),
|
||||||
}
|
}
|
||||||
|
ipListsPaths := []string{
|
||||||
|
filepath.Join(templatesPath, templateCommonDir, templateCommonCSS),
|
||||||
|
filepath.Join(templatesPath, templateAdminDir, templateBase),
|
||||||
|
filepath.Join(templatesPath, templateAdminDir, templateIPLists),
|
||||||
|
}
|
||||||
|
ipListPaths := []string{
|
||||||
|
filepath.Join(templatesPath, templateCommonDir, templateCommonCSS),
|
||||||
|
filepath.Join(templatesPath, templateAdminDir, templateBase),
|
||||||
|
filepath.Join(templatesPath, templateAdminDir, templateIPList),
|
||||||
|
}
|
||||||
mfaPaths := []string{
|
mfaPaths := []string{
|
||||||
filepath.Join(templatesPath, templateCommonDir, templateCommonCSS),
|
filepath.Join(templatesPath, templateCommonDir, templateCommonCSS),
|
||||||
filepath.Join(templatesPath, templateAdminDir, templateBase),
|
filepath.Join(templatesPath, templateAdminDir, templateBase),
|
||||||
|
@ -552,6 +584,8 @@ func loadAdminTemplates(templatesPath string) {
|
||||||
changePwdTmpl := util.LoadTemplate(nil, changePwdPaths...)
|
changePwdTmpl := util.LoadTemplate(nil, changePwdPaths...)
|
||||||
maintenanceTmpl := util.LoadTemplate(nil, maintenancePaths...)
|
maintenanceTmpl := util.LoadTemplate(nil, maintenancePaths...)
|
||||||
defenderTmpl := util.LoadTemplate(nil, defenderPaths...)
|
defenderTmpl := util.LoadTemplate(nil, defenderPaths...)
|
||||||
|
ipListsTmpl := util.LoadTemplate(nil, ipListsPaths...)
|
||||||
|
ipListTmpl := util.LoadTemplate(nil, ipListPaths...)
|
||||||
mfaTmpl := util.LoadTemplate(nil, mfaPaths...)
|
mfaTmpl := util.LoadTemplate(nil, mfaPaths...)
|
||||||
twoFactorTmpl := util.LoadTemplate(nil, twoFactorPaths...)
|
twoFactorTmpl := util.LoadTemplate(nil, twoFactorPaths...)
|
||||||
twoFactorRecoveryTmpl := util.LoadTemplate(nil, twoFactorRecoveryPaths...)
|
twoFactorRecoveryTmpl := util.LoadTemplate(nil, twoFactorRecoveryPaths...)
|
||||||
|
@ -582,6 +616,8 @@ func loadAdminTemplates(templatesPath string) {
|
||||||
adminTemplates[templateChangePwd] = changePwdTmpl
|
adminTemplates[templateChangePwd] = changePwdTmpl
|
||||||
adminTemplates[templateMaintenance] = maintenanceTmpl
|
adminTemplates[templateMaintenance] = maintenanceTmpl
|
||||||
adminTemplates[templateDefender] = defenderTmpl
|
adminTemplates[templateDefender] = defenderTmpl
|
||||||
|
adminTemplates[templateIPLists] = ipListsTmpl
|
||||||
|
adminTemplates[templateIPList] = ipListTmpl
|
||||||
adminTemplates[templateMFA] = mfaTmpl
|
adminTemplates[templateMFA] = mfaTmpl
|
||||||
adminTemplates[templateTwoFactor] = twoFactorTmpl
|
adminTemplates[templateTwoFactor] = twoFactorTmpl
|
||||||
adminTemplates[templateTwoFactorRecovery] = twoFactorRecoveryTmpl
|
adminTemplates[templateTwoFactorRecovery] = twoFactorRecoveryTmpl
|
||||||
|
@ -609,6 +645,19 @@ func isEventManagerResource(currentURL string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isIPListsResource(currentURL string) bool {
|
||||||
|
if currentURL == webDefenderPath {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if currentURL == webIPListsPath {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(currentURL, webIPListPath+"/") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (s *httpdServer) getBasePageData(title, currentURL string, r *http.Request) basePage {
|
func (s *httpdServer) getBasePageData(title, currentURL string, r *http.Request) basePage {
|
||||||
var csrfToken string
|
var csrfToken string
|
||||||
if currentURL != "" {
|
if currentURL != "" {
|
||||||
|
@ -628,6 +677,8 @@ func (s *httpdServer) getBasePageData(title, currentURL string, r *http.Request)
|
||||||
FolderURL: webFolderPath,
|
FolderURL: webFolderPath,
|
||||||
FolderTemplateURL: webTemplateFolder,
|
FolderTemplateURL: webTemplateFolder,
|
||||||
DefenderURL: webDefenderPath,
|
DefenderURL: webDefenderPath,
|
||||||
|
IPListsURL: webIPListsPath,
|
||||||
|
IPListURL: webIPListPath,
|
||||||
EventsURL: webEventsPath,
|
EventsURL: webEventsPath,
|
||||||
LogoutURL: webLogoutPath,
|
LogoutURL: webLogoutPath,
|
||||||
ProfileURL: webAdminProfilePath,
|
ProfileURL: webAdminProfilePath,
|
||||||
|
@ -656,10 +707,12 @@ func (s *httpdServer) getBasePageData(title, currentURL string, r *http.Request)
|
||||||
StatusTitle: pageStatusTitle,
|
StatusTitle: pageStatusTitle,
|
||||||
MaintenanceTitle: pageMaintenanceTitle,
|
MaintenanceTitle: pageMaintenanceTitle,
|
||||||
DefenderTitle: pageDefenderTitle,
|
DefenderTitle: pageDefenderTitle,
|
||||||
|
IPListsTitle: pageIPListsTitle,
|
||||||
EventsTitle: pageEventsTitle,
|
EventsTitle: pageEventsTitle,
|
||||||
Version: version.GetAsString(),
|
Version: version.GetAsString(),
|
||||||
LoggedAdmin: getAdminFromToken(r),
|
LoggedAdmin: getAdminFromToken(r),
|
||||||
IsEventManagerPage: isEventManagerResource(currentURL),
|
IsEventManagerPage: isEventManagerResource(currentURL),
|
||||||
|
IsIPManagerPage: isIPListsResource(currentURL),
|
||||||
HasDefender: common.Config.DefenderConfig.Enabled,
|
HasDefender: common.Config.DefenderConfig.Enabled,
|
||||||
HasSearcher: plugin.Handler.HasSearcher(),
|
HasSearcher: plugin.Handler.HasSearcher(),
|
||||||
HasExternalLogin: isLoggedInWithOIDC(r),
|
HasExternalLogin: isLoggedInWithOIDC(r),
|
||||||
|
@ -937,6 +990,27 @@ func (s *httpdServer) renderUserPage(w http.ResponseWriter, r *http.Request, use
|
||||||
renderAdminTemplate(w, templateUser, data)
|
renderAdminTemplate(w, templateUser, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *httpdServer) renderIPListPage(w http.ResponseWriter, r *http.Request, entry dataprovider.IPListEntry,
|
||||||
|
mode genericPageMode, error string,
|
||||||
|
) {
|
||||||
|
var title, currentURL string
|
||||||
|
switch mode {
|
||||||
|
case genericPageModeAdd:
|
||||||
|
title = "Add a new IP List entry"
|
||||||
|
currentURL = fmt.Sprintf("%s/%d", webIPListPath, entry.Type)
|
||||||
|
case genericPageModeUpdate:
|
||||||
|
title = "Update IP List entry"
|
||||||
|
currentURL = fmt.Sprintf("%s/%d/%s", webIPListPath, entry.Type, url.PathEscape(entry.IPOrNet))
|
||||||
|
}
|
||||||
|
data := ipListPage{
|
||||||
|
basePage: s.getBasePageData(title, currentURL, r),
|
||||||
|
Error: error,
|
||||||
|
Entry: &entry,
|
||||||
|
Mode: mode,
|
||||||
|
}
|
||||||
|
renderAdminTemplate(w, templateIPList, data)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *httpdServer) renderRolePage(w http.ResponseWriter, r *http.Request, role dataprovider.Role,
|
func (s *httpdServer) renderRolePage(w http.ResponseWriter, r *http.Request, role dataprovider.Role,
|
||||||
mode genericPageMode, error string,
|
mode genericPageMode, error string,
|
||||||
) {
|
) {
|
||||||
|
@ -2378,6 +2452,36 @@ func getRoleFromPostFields(r *http.Request) (dataprovider.Role, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getIPListEntryFromPostFields(r *http.Request, listType dataprovider.IPListType) (dataprovider.IPListEntry, error) {
|
||||||
|
err := r.ParseForm()
|
||||||
|
if err != nil {
|
||||||
|
return dataprovider.IPListEntry{}, err
|
||||||
|
}
|
||||||
|
var mode int
|
||||||
|
if listType == dataprovider.IPListTypeDefender {
|
||||||
|
mode, err = strconv.Atoi(r.Form.Get("mode"))
|
||||||
|
if err != nil {
|
||||||
|
return dataprovider.IPListEntry{}, fmt.Errorf("invalid mode: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
mode = 1
|
||||||
|
}
|
||||||
|
protocols := 0
|
||||||
|
for _, proto := range r.Form["protocols"] {
|
||||||
|
p, err := strconv.Atoi(proto)
|
||||||
|
if err == nil {
|
||||||
|
protocols += p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dataprovider.IPListEntry{
|
||||||
|
IPOrNet: r.Form.Get("ipornet"),
|
||||||
|
Mode: mode,
|
||||||
|
Protocols: protocols,
|
||||||
|
Description: r.Form.Get("description"),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *httpdServer) handleWebAdminForgotPwd(w http.ResponseWriter, r *http.Request) {
|
func (s *httpdServer) handleWebAdminForgotPwd(w http.ResponseWriter, r *http.Request) {
|
||||||
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
||||||
if !smtp.IsEnabled() {
|
if !smtp.IsEnabled() {
|
||||||
|
@ -3673,7 +3777,7 @@ func (s *httpdServer) handleWebUpdateRolePost(w http.ResponseWriter, r *http.Req
|
||||||
|
|
||||||
updatedRole, err := getRoleFromPostFields(r)
|
updatedRole, err := getRoleFromPostFields(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.renderRolePage(w, r, role, genericPageModeAdd, err.Error())
|
s.renderRolePage(w, r, role, genericPageModeUpdate, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ipAddr := util.GetIPFromRemoteAddress(r.RemoteAddr)
|
ipAddr := util.GetIPFromRemoteAddress(r.RemoteAddr)
|
||||||
|
@ -3701,3 +3805,114 @@ func (s *httpdServer) handleWebGetEvents(w http.ResponseWriter, r *http.Request)
|
||||||
}
|
}
|
||||||
renderAdminTemplate(w, templateEvents, data)
|
renderAdminTemplate(w, templateEvents, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *httpdServer) handleWebIPListsPage(w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
||||||
|
rtlStatus, rtlProtocols := common.Config.GetRateLimitersStatus()
|
||||||
|
data := ipListsPage{
|
||||||
|
basePage: s.getBasePageData(pageIPListsTitle, webIPListsPath, r),
|
||||||
|
RateLimitersStatus: rtlStatus,
|
||||||
|
RateLimitersProtocols: strings.Join(rtlProtocols, ", "),
|
||||||
|
IsAllowListEnabled: common.Config.IsAllowListEnabled(),
|
||||||
|
}
|
||||||
|
|
||||||
|
renderAdminTemplate(w, templateIPLists, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpdServer) handleWebAddIPListEntryGet(w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
||||||
|
listType, _, err := getIPListPathParams(r)
|
||||||
|
if err != nil {
|
||||||
|
s.renderBadRequestPage(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.renderIPListPage(w, r, dataprovider.IPListEntry{Type: listType}, genericPageModeAdd, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpdServer) handleWebAddIPListEntryPost(w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
||||||
|
listType, _, err := getIPListPathParams(r)
|
||||||
|
if err != nil {
|
||||||
|
s.renderBadRequestPage(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
entry, err := getIPListEntryFromPostFields(r, listType)
|
||||||
|
if err != nil {
|
||||||
|
s.renderIPListPage(w, r, entry, genericPageModeAdd, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
entry.Type = listType
|
||||||
|
claims, err := getTokenClaims(r)
|
||||||
|
if err != nil || claims.Username == "" {
|
||||||
|
s.renderBadRequestPage(w, r, errors.New("invalid token claims"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ipAddr := util.GetIPFromRemoteAddress(r.RemoteAddr)
|
||||||
|
if err := verifyCSRFToken(r.Form.Get(csrfFormToken), ipAddr); err != nil {
|
||||||
|
s.renderForbiddenPage(w, r, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = dataprovider.AddIPListEntry(&entry, claims.Username, ipAddr, claims.Role)
|
||||||
|
if err != nil {
|
||||||
|
s.renderIPListPage(w, r, entry, genericPageModeAdd, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
http.Redirect(w, r, webIPListsPath, http.StatusSeeOther)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpdServer) handleWebUpdateIPListEntryGet(w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
||||||
|
listType, ipOrNet, err := getIPListPathParams(r)
|
||||||
|
if err != nil {
|
||||||
|
s.renderBadRequestPage(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
entry, err := dataprovider.IPListEntryExists(ipOrNet, listType)
|
||||||
|
if err == nil {
|
||||||
|
s.renderIPListPage(w, r, entry, genericPageModeUpdate, "")
|
||||||
|
} else if errors.Is(err, util.ErrNotFound) {
|
||||||
|
s.renderNotFoundPage(w, r, err)
|
||||||
|
} else {
|
||||||
|
s.renderInternalServerErrorPage(w, r, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *httpdServer) handleWebUpdateIPListEntryPost(w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
||||||
|
claims, err := getTokenClaims(r)
|
||||||
|
if err != nil || claims.Username == "" {
|
||||||
|
s.renderBadRequestPage(w, r, errors.New("invalid token claims"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
listType, ipOrNet, err := getIPListPathParams(r)
|
||||||
|
if err != nil {
|
||||||
|
s.renderBadRequestPage(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
entry, err := dataprovider.IPListEntryExists(ipOrNet, listType)
|
||||||
|
if errors.Is(err, util.ErrNotFound) {
|
||||||
|
s.renderNotFoundPage(w, r, err)
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
s.renderInternalServerErrorPage(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
updatedEntry, err := getIPListEntryFromPostFields(r, listType)
|
||||||
|
if err != nil {
|
||||||
|
s.renderIPListPage(w, r, entry, genericPageModeUpdate, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ipAddr := util.GetIPFromRemoteAddress(r.RemoteAddr)
|
||||||
|
if err := verifyCSRFToken(r.Form.Get(csrfFormToken), ipAddr); err != nil {
|
||||||
|
s.renderForbiddenPage(w, r, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
updatedEntry.Type = listType
|
||||||
|
updatedEntry.IPOrNet = ipOrNet
|
||||||
|
err = dataprovider.UpdateIPListEntry(&updatedEntry, claims.Username, ipAddr, claims.Role)
|
||||||
|
if err != nil {
|
||||||
|
s.renderIPListPage(w, r, entry, genericPageModeUpdate, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
http.Redirect(w, r, webIPListsPath, http.StatusSeeOther)
|
||||||
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// Package httpdtest provides utilities for testing the exposed REST API.
|
// Package httpdtest provides utilities for testing the supported REST API.
|
||||||
package httpdtest
|
package httpdtest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -63,6 +63,7 @@ const (
|
||||||
eventActionsPath = "/api/v2/eventactions"
|
eventActionsPath = "/api/v2/eventactions"
|
||||||
eventRulesPath = "/api/v2/eventrules"
|
eventRulesPath = "/api/v2/eventrules"
|
||||||
rolesPath = "/api/v2/roles"
|
rolesPath = "/api/v2/roles"
|
||||||
|
ipListsPath = "/api/v2/iplists"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -478,6 +479,129 @@ func GetRoles(limit, offset int64, expectedStatusCode int) ([]dataprovider.Role,
|
||||||
return roles, body, err
|
return roles, body, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddIPListEntry adds a new IP list entry and checks the received HTTP Status code against expectedStatusCode.
|
||||||
|
func AddIPListEntry(entry dataprovider.IPListEntry, expectedStatusCode int) (dataprovider.IPListEntry, []byte, error) {
|
||||||
|
var newEntry dataprovider.IPListEntry
|
||||||
|
var body []byte
|
||||||
|
|
||||||
|
asJSON, _ := json.Marshal(entry)
|
||||||
|
resp, err := sendHTTPRequest(http.MethodPost, buildURLRelativeToBase(ipListsPath, strconv.Itoa(int(entry.Type))),
|
||||||
|
bytes.NewBuffer(asJSON), "application/json", getDefaultToken())
|
||||||
|
if err != nil {
|
||||||
|
return newEntry, body, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||||
|
if expectedStatusCode != http.StatusCreated {
|
||||||
|
body, _ = getResponseBody(resp)
|
||||||
|
return newEntry, body, err
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
newEntry, body, err = GetIPListEntry(entry.IPOrNet, entry.Type, http.StatusOK)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = checkIPListEntry(entry, newEntry)
|
||||||
|
}
|
||||||
|
return newEntry, body, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIPListEntry updates an existing IP list entry and checks the received HTTP Status code against expectedStatusCode
|
||||||
|
func UpdateIPListEntry(entry dataprovider.IPListEntry, expectedStatusCode int) (dataprovider.IPListEntry, []byte, error) {
|
||||||
|
var newEntry dataprovider.IPListEntry
|
||||||
|
var body []byte
|
||||||
|
|
||||||
|
asJSON, _ := json.Marshal(entry)
|
||||||
|
resp, err := sendHTTPRequest(http.MethodPut, buildURLRelativeToBase(ipListsPath, fmt.Sprintf("%d", entry.Type),
|
||||||
|
url.PathEscape(entry.IPOrNet)), bytes.NewBuffer(asJSON),
|
||||||
|
"application/json", getDefaultToken())
|
||||||
|
if err != nil {
|
||||||
|
return newEntry, body, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, _ = getResponseBody(resp)
|
||||||
|
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||||
|
if expectedStatusCode != http.StatusOK {
|
||||||
|
return newEntry, body, err
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
newEntry, body, err = GetIPListEntry(entry.IPOrNet, entry.Type, http.StatusOK)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = checkIPListEntry(entry, newEntry)
|
||||||
|
}
|
||||||
|
return newEntry, body, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveIPListEntry removes an existing IP list entry and checks the received HTTP Status code against expectedStatusCode.
|
||||||
|
func RemoveIPListEntry(entry dataprovider.IPListEntry, expectedStatusCode int) ([]byte, error) {
|
||||||
|
var body []byte
|
||||||
|
resp, err := sendHTTPRequest(http.MethodDelete, buildURLRelativeToBase(ipListsPath, fmt.Sprintf("%d", entry.Type),
|
||||||
|
url.PathEscape(entry.IPOrNet)), nil, "", getDefaultToken())
|
||||||
|
if err != nil {
|
||||||
|
return body, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
body, _ = getResponseBody(resp)
|
||||||
|
return body, checkResponse(resp.StatusCode, expectedStatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetIPListEntry returns an IP list entry matching the specified parameters, if exists,
|
||||||
|
// and checks the received HTTP Status code against expectedStatusCode.
|
||||||
|
func GetIPListEntry(ipOrNet string, listType dataprovider.IPListType, expectedStatusCode int,
|
||||||
|
) (dataprovider.IPListEntry, []byte, error) {
|
||||||
|
var entry dataprovider.IPListEntry
|
||||||
|
var body []byte
|
||||||
|
resp, err := sendHTTPRequest(http.MethodGet, buildURLRelativeToBase(ipListsPath, fmt.Sprintf("%d", listType), url.PathEscape(ipOrNet)),
|
||||||
|
nil, "", getDefaultToken())
|
||||||
|
if err != nil {
|
||||||
|
return entry, body, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||||
|
if err == nil && expectedStatusCode == http.StatusOK {
|
||||||
|
err = render.DecodeJSON(resp.Body, &entry)
|
||||||
|
} else {
|
||||||
|
body, _ = getResponseBody(resp)
|
||||||
|
}
|
||||||
|
return entry, body, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetIPListEntries returns a list of IP list entries and checks the received HTTP Status code against expectedStatusCode.
|
||||||
|
func GetIPListEntries(listType dataprovider.IPListType, filter, from, order string, limit int64,
|
||||||
|
expectedStatusCode int,
|
||||||
|
) ([]dataprovider.IPListEntry, []byte, error) {
|
||||||
|
var entries []dataprovider.IPListEntry
|
||||||
|
var body []byte
|
||||||
|
|
||||||
|
url, err := url.Parse(buildURLRelativeToBase(ipListsPath, strconv.Itoa(int(listType))))
|
||||||
|
if err != nil {
|
||||||
|
return entries, body, err
|
||||||
|
}
|
||||||
|
q := url.Query()
|
||||||
|
q.Add("filter", filter)
|
||||||
|
q.Add("from", from)
|
||||||
|
q.Add("order", order)
|
||||||
|
if limit > 0 {
|
||||||
|
q.Add("limit", strconv.FormatInt(limit, 10))
|
||||||
|
}
|
||||||
|
url.RawQuery = q.Encode()
|
||||||
|
resp, err := sendHTTPRequest(http.MethodGet, url.String(), nil, "", getDefaultToken())
|
||||||
|
if err != nil {
|
||||||
|
return entries, body, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||||
|
if err == nil && expectedStatusCode == http.StatusOK {
|
||||||
|
err = render.DecodeJSON(resp.Body, &entries)
|
||||||
|
} else {
|
||||||
|
body, _ = getResponseBody(resp)
|
||||||
|
}
|
||||||
|
return entries, body, err
|
||||||
|
}
|
||||||
|
|
||||||
// AddAdmin adds a new admin and checks the received HTTP Status code against expectedStatusCode.
|
// AddAdmin adds a new admin and checks the received HTTP Status code against expectedStatusCode.
|
||||||
func AddAdmin(admin dataprovider.Admin, expectedStatusCode int) (dataprovider.Admin, []byte, error) {
|
func AddAdmin(admin dataprovider.Admin, expectedStatusCode int) (dataprovider.Admin, []byte, error) {
|
||||||
var newAdmin dataprovider.Admin
|
var newAdmin dataprovider.Admin
|
||||||
|
@ -1641,6 +1765,31 @@ func checkEventRule(expected, actual dataprovider.EventRule) error {
|
||||||
return checkEventRuleActions(expected.Actions, actual.Actions)
|
return checkEventRuleActions(expected.Actions, actual.Actions)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func checkIPListEntry(expected, actual dataprovider.IPListEntry) error {
|
||||||
|
if expected.IPOrNet != actual.IPOrNet {
|
||||||
|
return errors.New("ipornet mismatch")
|
||||||
|
}
|
||||||
|
if expected.Description != actual.Description {
|
||||||
|
return errors.New("description mismatch")
|
||||||
|
}
|
||||||
|
if expected.Type != actual.Type {
|
||||||
|
return errors.New("type mismatch")
|
||||||
|
}
|
||||||
|
if expected.Mode != actual.Mode {
|
||||||
|
return errors.New("mode mismatch")
|
||||||
|
}
|
||||||
|
if expected.Protocols != actual.Protocols {
|
||||||
|
return errors.New("protocols mismatch")
|
||||||
|
}
|
||||||
|
if actual.CreatedAt == 0 {
|
||||||
|
return errors.New("created_at unset")
|
||||||
|
}
|
||||||
|
if actual.UpdatedAt == 0 {
|
||||||
|
return errors.New("updated_at unset")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func checkRole(expected, actual dataprovider.Role) error {
|
func checkRole(expected, actual dataprovider.Role) error {
|
||||||
if expected.ID <= 0 {
|
if expected.ID <= 0 {
|
||||||
if actual.ID <= 0 {
|
if actual.ID <= 0 {
|
||||||
|
|
|
@ -640,7 +640,7 @@ var (
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
// AddMetricsEndpoint exposes metrics to the specified endpoint
|
// AddMetricsEndpoint publishes metrics to the specified endpoint
|
||||||
func AddMetricsEndpoint(metricsPath string, handler chi.Router) {
|
func AddMetricsEndpoint(metricsPath string, handler chi.Router) {
|
||||||
handler.Handle(metricsPath, promhttp.Handler())
|
handler.Handle(metricsPath, promhttp.Handler())
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ func init() {
|
||||||
version.AddFeature("-metrics")
|
version.AddFeature("-metrics")
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddMetricsEndpoint exposes metrics to the specified endpoint
|
// AddMetricsEndpoint publishes metrics to the specified endpoint
|
||||||
func AddMetricsEndpoint(_ string, _ chi.Router) {}
|
func AddMetricsEndpoint(_ string, _ chi.Router) {}
|
||||||
|
|
||||||
// TransferCompleted updates metrics after an upload or a download
|
// TransferCompleted updates metrics after an upload or a download
|
||||||
|
|
|
@ -121,14 +121,8 @@ func (s *Service) Start(disableAWSInstallationCode bool) error {
|
||||||
|
|
||||||
func (s *Service) initializeServices(disableAWSInstallationCode bool) error {
|
func (s *Service) initializeServices(disableAWSInstallationCode bool) error {
|
||||||
providerConf := config.GetProviderConf()
|
providerConf := config.GetProviderConf()
|
||||||
err := common.Initialize(config.GetCommonConfig(), providerConf.GetShared())
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(logSender, "", "%v", err)
|
|
||||||
logger.ErrorToConsole("%v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
kmsConfig := config.GetKMSConfig()
|
kmsConfig := config.GetKMSConfig()
|
||||||
err = kmsConfig.Initialize()
|
err := kmsConfig.Initialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(logSender, "", "unable to initialize KMS: %v", err)
|
logger.Error(logSender, "", "unable to initialize KMS: %v", err)
|
||||||
logger.ErrorToConsole("unable to initialize KMS: %v", err)
|
logger.ErrorToConsole("unable to initialize KMS: %v", err)
|
||||||
|
@ -159,6 +153,12 @@ func (s *Service) initializeServices(disableAWSInstallationCode bool) error {
|
||||||
logger.ErrorToConsole("error initializing data provider: %v", err)
|
logger.ErrorToConsole("error initializing data provider: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
err = common.Initialize(config.GetCommonConfig(), providerConf.GetShared())
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(logSender, "", "%v", err)
|
||||||
|
logger.ErrorToConsole("%v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if s.PortableMode == 1 {
|
if s.PortableMode == 1 {
|
||||||
// create the user for portable mode
|
// create the user for portable mode
|
||||||
|
@ -319,7 +319,7 @@ func (s *Service) LoadInitialData() error {
|
||||||
return fmt.Errorf("unable to stat file %#v: %w", s.LoadDataFrom, err)
|
return fmt.Errorf("unable to stat file %#v: %w", s.LoadDataFrom, err)
|
||||||
}
|
}
|
||||||
if info.Size() > httpd.MaxRestoreSize {
|
if info.Size() > httpd.MaxRestoreSize {
|
||||||
return fmt.Errorf("unable to restore input file %#v size too big: %v/%v bytes",
|
return fmt.Errorf("unable to restore input file %q size too big: %d/%d bytes",
|
||||||
s.LoadDataFrom, info.Size(), httpd.MaxRestoreSize)
|
s.LoadDataFrom, info.Size(), httpd.MaxRestoreSize)
|
||||||
}
|
}
|
||||||
content, err := os.ReadFile(s.LoadDataFrom)
|
content, err := os.ReadFile(s.LoadDataFrom)
|
||||||
|
@ -350,42 +350,46 @@ func (s *Service) LoadInitialData() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) restoreDump(dump *dataprovider.BackupData) error {
|
func (s *Service) restoreDump(dump *dataprovider.BackupData) error {
|
||||||
err := httpd.RestoreRoles(dump.Roles, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem, "", "")
|
err := httpd.RestoreIPListEntries(dump.IPLists, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem, "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore roles from file %#v: %v", s.LoadDataFrom, err)
|
return fmt.Errorf("unable to restore IP list entries from file %q: %v", s.LoadDataFrom, err)
|
||||||
|
}
|
||||||
|
err = httpd.RestoreRoles(dump.Roles, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem, "", "")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to restore roles from file %q: %v", s.LoadDataFrom, err)
|
||||||
}
|
}
|
||||||
err = httpd.RestoreFolders(dump.Folders, s.LoadDataFrom, s.LoadDataMode, s.LoadDataQuotaScan, dataprovider.ActionExecutorSystem, "", "")
|
err = httpd.RestoreFolders(dump.Folders, s.LoadDataFrom, s.LoadDataMode, s.LoadDataQuotaScan, dataprovider.ActionExecutorSystem, "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore folders from file %#v: %v", s.LoadDataFrom, err)
|
return fmt.Errorf("unable to restore folders from file %q: %v", s.LoadDataFrom, err)
|
||||||
}
|
}
|
||||||
err = httpd.RestoreGroups(dump.Groups, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem, "", "")
|
err = httpd.RestoreGroups(dump.Groups, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem, "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore groups from file %#v: %v", s.LoadDataFrom, err)
|
return fmt.Errorf("unable to restore groups from file %q: %v", s.LoadDataFrom, err)
|
||||||
}
|
}
|
||||||
err = httpd.RestoreUsers(dump.Users, s.LoadDataFrom, s.LoadDataMode, s.LoadDataQuotaScan, dataprovider.ActionExecutorSystem, "", "")
|
err = httpd.RestoreUsers(dump.Users, s.LoadDataFrom, s.LoadDataMode, s.LoadDataQuotaScan, dataprovider.ActionExecutorSystem, "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore users from file %#v: %v", s.LoadDataFrom, err)
|
return fmt.Errorf("unable to restore users from file %q: %v", s.LoadDataFrom, err)
|
||||||
}
|
}
|
||||||
err = httpd.RestoreAdmins(dump.Admins, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem, "", "")
|
err = httpd.RestoreAdmins(dump.Admins, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem, "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore admins from file %#v: %v", s.LoadDataFrom, err)
|
return fmt.Errorf("unable to restore admins from file %q: %v", s.LoadDataFrom, err)
|
||||||
}
|
}
|
||||||
err = httpd.RestoreAPIKeys(dump.APIKeys, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem, "", "")
|
err = httpd.RestoreAPIKeys(dump.APIKeys, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem, "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore API keys from file %#v: %v", s.LoadDataFrom, err)
|
return fmt.Errorf("unable to restore API keys from file %q: %v", s.LoadDataFrom, err)
|
||||||
}
|
}
|
||||||
err = httpd.RestoreShares(dump.Shares, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem, "", "")
|
err = httpd.RestoreShares(dump.Shares, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem, "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore API keys from file %#v: %v", s.LoadDataFrom, err)
|
return fmt.Errorf("unable to restore API keys from file %q: %v", s.LoadDataFrom, err)
|
||||||
}
|
}
|
||||||
err = httpd.RestoreEventActions(dump.EventActions, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem, "", "")
|
err = httpd.RestoreEventActions(dump.EventActions, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem, "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore event actions from file %#v: %v", s.LoadDataFrom, err)
|
return fmt.Errorf("unable to restore event actions from file %q: %v", s.LoadDataFrom, err)
|
||||||
}
|
}
|
||||||
err = httpd.RestoreEventRules(dump.EventRules, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem,
|
err = httpd.RestoreEventRules(dump.EventRules, s.LoadDataFrom, s.LoadDataMode, dataprovider.ActionExecutorSystem,
|
||||||
"", "", dump.Version)
|
"", "", dump.Version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to restore event rules from file %#v: %v", s.LoadDataFrom, err)
|
return fmt.Errorf("unable to restore event rules from file %q: %v", s.LoadDataFrom, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,7 +84,7 @@ func (c *Connection) Fileread(request *sftp.Request) (io.ReaderAt, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if ok, policy := c.User.IsFileAllowed(request.Filepath); !ok {
|
if ok, policy := c.User.IsFileAllowed(request.Filepath); !ok {
|
||||||
c.Log(logger.LevelWarn, "reading file %#v is not allowed", request.Filepath)
|
c.Log(logger.LevelWarn, "reading file %q is not allowed", request.Filepath)
|
||||||
return nil, c.GetErrorForDeniedFile(policy)
|
return nil, c.GetErrorForDeniedFile(policy)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,13 +94,13 @@ func (c *Connection) Fileread(request *sftp.Request) (io.ReaderAt, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := common.ExecutePreAction(c.BaseConnection, common.OperationPreDownload, p, request.Filepath, 0, 0); err != nil {
|
if _, err := common.ExecutePreAction(c.BaseConnection, common.OperationPreDownload, p, request.Filepath, 0, 0); err != nil {
|
||||||
c.Log(logger.LevelDebug, "download for file %#v denied by pre action: %v", request.Filepath, err)
|
c.Log(logger.LevelDebug, "download for file %q denied by pre action: %v", request.Filepath, err)
|
||||||
return nil, c.GetPermissionDeniedError()
|
return nil, c.GetPermissionDeniedError()
|
||||||
}
|
}
|
||||||
|
|
||||||
file, r, cancelFn, err := fs.Open(p, 0)
|
file, r, cancelFn, err := fs.Open(p, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Log(logger.LevelError, "could not open file %#v for reading: %+v", p, err)
|
c.Log(logger.LevelError, "could not open file %q for reading: %+v", p, err)
|
||||||
return nil, c.GetFsError(fs, err)
|
return nil, c.GetFsError(fs, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ func (c *Connection) handleFilewrite(request *sftp.Request) (sftp.WriterAtReader
|
||||||
c.UpdateLastActivity()
|
c.UpdateLastActivity()
|
||||||
|
|
||||||
if ok, _ := c.User.IsFileAllowed(request.Filepath); !ok {
|
if ok, _ := c.User.IsFileAllowed(request.Filepath); !ok {
|
||||||
c.Log(logger.LevelWarn, "writing file %#v is not allowed", request.Filepath)
|
c.Log(logger.LevelWarn, "writing file %q is not allowed", request.Filepath)
|
||||||
return nil, c.GetPermissionDeniedError()
|
return nil, c.GetPermissionDeniedError()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,13 +160,13 @@ func (c *Connection) handleFilewrite(request *sftp.Request) (sftp.WriterAtReader
|
||||||
}
|
}
|
||||||
|
|
||||||
if statErr != nil {
|
if statErr != nil {
|
||||||
c.Log(logger.LevelError, "error performing file stat %#v: %+v", p, statErr)
|
c.Log(logger.LevelError, "error performing file stat %q: %+v", p, statErr)
|
||||||
return nil, c.GetFsError(fs, statErr)
|
return nil, c.GetFsError(fs, statErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This happen if we upload a file that has the same name of an existing directory
|
// This happen if we upload a file that has the same name of an existing directory
|
||||||
if stat.IsDir() {
|
if stat.IsDir() {
|
||||||
c.Log(logger.LevelError, "attempted to open a directory for writing to: %#v", p)
|
c.Log(logger.LevelError, "attempted to open a directory for writing to: %q", p)
|
||||||
return nil, sftp.ErrSSHFxOpUnsupported
|
return nil, sftp.ErrSSHFxOpUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,7 +255,7 @@ func (c *Connection) Readlink(filePath string) (string, error) {
|
||||||
|
|
||||||
s, err := fs.Readlink(p)
|
s, err := fs.Readlink(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Log(logger.LevelDebug, "error running readlink on path %#v: %+v", p, err)
|
c.Log(logger.LevelDebug, "error running readlink on path %q: %+v", p, err)
|
||||||
return "", c.GetFsError(fs, err)
|
return "", c.GetFsError(fs, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -383,11 +383,11 @@ func (c *Connection) handleSFTPRemove(request *sftp.Request) error {
|
||||||
|
|
||||||
var fi os.FileInfo
|
var fi os.FileInfo
|
||||||
if fi, err = fs.Lstat(fsPath); err != nil {
|
if fi, err = fs.Lstat(fsPath); err != nil {
|
||||||
c.Log(logger.LevelDebug, "failed to remove file %#v: stat error: %+v", fsPath, err)
|
c.Log(logger.LevelDebug, "failed to remove file %q: stat error: %+v", fsPath, err)
|
||||||
return c.GetFsError(fs, err)
|
return c.GetFsError(fs, err)
|
||||||
}
|
}
|
||||||
if fi.IsDir() && fi.Mode()&os.ModeSymlink == 0 {
|
if fi.IsDir() && fi.Mode()&os.ModeSymlink == 0 {
|
||||||
c.Log(logger.LevelDebug, "cannot remove %#v is not a file/symlink", fsPath)
|
c.Log(logger.LevelDebug, "cannot remove %q is not a file/symlink", fsPath)
|
||||||
return sftp.ErrSSHFxFailure
|
return sftp.ErrSSHFxFailure
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,14 +402,14 @@ func (c *Connection) handleSFTPUploadToNewFile(fs vfs.Fs, pflags sftp.FileOpenFl
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := common.ExecutePreAction(c.BaseConnection, common.OperationPreUpload, resolvedPath, requestPath, 0, 0); err != nil {
|
if _, err := common.ExecutePreAction(c.BaseConnection, common.OperationPreUpload, resolvedPath, requestPath, 0, 0); err != nil {
|
||||||
c.Log(logger.LevelDebug, "upload for file %#v denied by pre action: %v", requestPath, err)
|
c.Log(logger.LevelDebug, "upload for file %q denied by pre action: %v", requestPath, err)
|
||||||
return nil, c.GetPermissionDeniedError()
|
return nil, c.GetPermissionDeniedError()
|
||||||
}
|
}
|
||||||
|
|
||||||
osFlags := getOSOpenFlags(pflags)
|
osFlags := getOSOpenFlags(pflags)
|
||||||
file, w, cancelFn, err := fs.Create(filePath, osFlags)
|
file, w, cancelFn, err := fs.Create(filePath, osFlags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Log(logger.LevelError, "error creating file %#vm os flags %v, pflags %+v: %+v", resolvedPath, osFlags, pflags, err)
|
c.Log(logger.LevelError, "error creating file %q, os flags %d, pflags %+v: %+v", resolvedPath, osFlags, pflags, err)
|
||||||
return nil, c.GetFsError(fs, err)
|
return nil, c.GetFsError(fs, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -450,14 +450,14 @@ func (c *Connection) handleSFTPUploadToExistingFile(fs vfs.Fs, pflags sftp.FileO
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := common.ExecutePreAction(c.BaseConnection, common.OperationPreUpload, resolvedPath, requestPath, fileSize, osFlags); err != nil {
|
if _, err := common.ExecutePreAction(c.BaseConnection, common.OperationPreUpload, resolvedPath, requestPath, fileSize, osFlags); err != nil {
|
||||||
c.Log(logger.LevelDebug, "upload for file %#v denied by pre action: %v", requestPath, err)
|
c.Log(logger.LevelDebug, "upload for file %q denied by pre action: %v", requestPath, err)
|
||||||
return nil, c.GetPermissionDeniedError()
|
return nil, c.GetPermissionDeniedError()
|
||||||
}
|
}
|
||||||
|
|
||||||
if common.Config.IsAtomicUploadEnabled() && fs.IsAtomicUploadSupported() {
|
if common.Config.IsAtomicUploadEnabled() && fs.IsAtomicUploadSupported() {
|
||||||
_, _, err = fs.Rename(resolvedPath, filePath)
|
_, _, err = fs.Rename(resolvedPath, filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Log(logger.LevelError, "error renaming existing file for atomic upload, source: %#v, dest: %#v, err: %+v",
|
c.Log(logger.LevelError, "error renaming existing file for atomic upload, source: %q, dest: %q, err: %+v",
|
||||||
resolvedPath, filePath, err)
|
resolvedPath, filePath, err)
|
||||||
return nil, c.GetFsError(fs, err)
|
return nil, c.GetFsError(fs, err)
|
||||||
}
|
}
|
||||||
|
@ -465,7 +465,7 @@ func (c *Connection) handleSFTPUploadToExistingFile(fs vfs.Fs, pflags sftp.FileO
|
||||||
|
|
||||||
file, w, cancelFn, err := fs.Create(filePath, osFlags)
|
file, w, cancelFn, err := fs.Create(filePath, osFlags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Log(logger.LevelError, "error opening existing file, os flags %v, pflags: %+v, source: %#v, err: %+v",
|
c.Log(logger.LevelError, "error opening existing file, os flags %v, pflags: %+v, source: %q, err: %+v",
|
||||||
osFlags, pflags, filePath, err)
|
osFlags, pflags, filePath, err)
|
||||||
return nil, c.GetFsError(fs, err)
|
return nil, c.GetFsError(fs, err)
|
||||||
}
|
}
|
||||||
|
@ -473,7 +473,7 @@ func (c *Connection) handleSFTPUploadToExistingFile(fs vfs.Fs, pflags sftp.FileO
|
||||||
initialSize := int64(0)
|
initialSize := int64(0)
|
||||||
truncatedSize := int64(0) // bytes truncated and not included in quota
|
truncatedSize := int64(0) // bytes truncated and not included in quota
|
||||||
if isResume {
|
if isResume {
|
||||||
c.Log(logger.LevelDebug, "resuming upload requested, file path %#v initial size: %v has append flag %v",
|
c.Log(logger.LevelDebug, "resuming upload requested, file path %q initial size: %d, has append flag %t",
|
||||||
filePath, fileSize, pflags.Append)
|
filePath, fileSize, pflags.Append)
|
||||||
// enforce min write offset only if the client passed the APPEND flag
|
// enforce min write offset only if the client passed the APPEND flag
|
||||||
if pflags.Append {
|
if pflags.Append {
|
||||||
|
|
|
@ -505,11 +505,11 @@ func (c *Configuration) configureKeyboardInteractiveAuth(serverConfig *ssh.Serve
|
||||||
}
|
}
|
||||||
|
|
||||||
func canAcceptConnection(ip string) bool {
|
func canAcceptConnection(ip string) bool {
|
||||||
if common.IsBanned(ip) {
|
if common.IsBanned(ip, common.ProtocolSSH) {
|
||||||
logger.Log(logger.LevelDebug, common.ProtocolSSH, "", "connection refused, ip %#v is banned", ip)
|
logger.Log(logger.LevelDebug, common.ProtocolSSH, "", "connection refused, ip %#v is banned", ip)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if err := common.Connections.IsNewConnectionAllowed(ip); err != nil {
|
if err := common.Connections.IsNewConnectionAllowed(ip, common.ProtocolSSH); err != nil {
|
||||||
logger.Log(logger.LevelDebug, common.ProtocolSSH, "", "connection not allowed from ip %q: %v", ip, err)
|
logger.Log(logger.LevelDebug, common.ProtocolSSH, "", "connection not allowed from ip %q: %v", ip, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -700,7 +700,7 @@ func checkAuthError(ip string, err error) {
|
||||||
if errors.Is(err, util.ErrNotFound) {
|
if errors.Is(err, util.ErrNotFound) {
|
||||||
event = common.HostEventUserNotFound
|
event = common.HostEventUserNotFound
|
||||||
}
|
}
|
||||||
common.AddDefenderEvent(ip, event)
|
common.AddDefenderEvent(ip, common.ProtocolSSH, event)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -708,7 +708,7 @@ func checkAuthError(ip string, err error) {
|
||||||
} else {
|
} else {
|
||||||
logger.ConnectionFailedLog("", ip, dataprovider.LoginMethodNoAuthTryed, common.ProtocolSSH, err.Error())
|
logger.ConnectionFailedLog("", ip, dataprovider.LoginMethodNoAuthTryed, common.ProtocolSSH, err.Error())
|
||||||
metric.AddNoAuthTryed()
|
metric.AddNoAuthTryed()
|
||||||
common.AddDefenderEvent(ip, common.HostEventNoLoginTried)
|
common.AddDefenderEvent(ip, common.ProtocolSSH, common.HostEventNoLoginTried)
|
||||||
dataprovider.ExecutePostLoginHook(&dataprovider.User{}, dataprovider.LoginMethodNoAuthTryed, ip, common.ProtocolSSH, err)
|
dataprovider.ExecutePostLoginHook(&dataprovider.User{}, dataprovider.LoginMethodNoAuthTryed, ip, common.ProtocolSSH, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1159,7 +1159,7 @@ func updateLoginMetrics(user *dataprovider.User, ip, method string, err error) {
|
||||||
if errors.Is(err, util.ErrNotFound) {
|
if errors.Is(err, util.ErrNotFound) {
|
||||||
event = common.HostEventUserNotFound
|
event = common.HostEventUserNotFound
|
||||||
}
|
}
|
||||||
common.AddDefenderEvent(ip, event)
|
common.AddDefenderEvent(ip, common.ProtocolSSH, event)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
metric.AddLoginResult(method, err)
|
metric.AddLoginResult(method, err)
|
||||||
|
|
|
@ -213,18 +213,18 @@ func TestMain(m *testing.M) {
|
||||||
scriptArgs = "$@"
|
scriptArgs = "$@"
|
||||||
}
|
}
|
||||||
|
|
||||||
err = common.Initialize(commonConf, 0)
|
|
||||||
if err != nil {
|
|
||||||
logger.WarnToConsole("error initializing common: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.ErrorToConsole("error initializing data provider: %v", err)
|
logger.ErrorToConsole("error initializing data provider: %v", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = common.Initialize(commonConf, 0)
|
||||||
|
if err != nil {
|
||||||
|
logger.WarnToConsole("error initializing common: %v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
httpConfig := config.GetHTTPConfig()
|
httpConfig := config.GetHTTPConfig()
|
||||||
httpConfig.Initialize(configDir) //nolint:errcheck
|
httpConfig.Initialize(configDir) //nolint:errcheck
|
||||||
kmsConfig := config.GetKMSConfig()
|
kmsConfig := config.GetKMSConfig()
|
||||||
|
@ -3066,7 +3066,7 @@ func TestPreLoginUserCreation(t *testing.T) {
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
user, _, err := httpdtest.GetUserByUsername(defaultUsername, http.StatusNotFound)
|
_, _, err = httpdtest.GetUserByUsername(defaultUsername, http.StatusNotFound)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
conn, client, err := getSftpClient(u, usePubKey)
|
conn, client, err := getSftpClient(u, usePubKey)
|
||||||
if assert.NoError(t, err) {
|
if assert.NoError(t, err) {
|
||||||
|
@ -3074,7 +3074,7 @@ func TestPreLoginUserCreation(t *testing.T) {
|
||||||
defer client.Close()
|
defer client.Close()
|
||||||
assert.NoError(t, checkBasicSFTP(client))
|
assert.NoError(t, checkBasicSFTP(client))
|
||||||
}
|
}
|
||||||
user, _, err = httpdtest.GetUserByUsername(defaultUsername, http.StatusOK)
|
user, _, err := httpdtest.GetUserByUsername(defaultUsername, http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = httpdtest.RemoveUser(user, http.StatusOK)
|
_, err = httpdtest.RemoveUser(user, http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
|
@ -120,7 +120,7 @@ func (c *Config) Initialize(configDir string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) getMailClientOptions() []mail.Option {
|
func (c *Config) getMailClientOptions() []mail.Option {
|
||||||
options := []mail.Option{mail.WithPort(c.Port)}
|
options := []mail.Option{mail.WithPort(c.Port), mail.WithoutNoop()}
|
||||||
|
|
||||||
switch c.Encryption {
|
switch c.Encryption {
|
||||||
case 1:
|
case 1:
|
||||||
|
|
|
@ -32,6 +32,7 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/netip"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
@ -775,3 +776,26 @@ func GetAbsolutePath(name string) (string, error) {
|
||||||
}
|
}
|
||||||
return filepath.Join(curDir, name), nil
|
return filepath.Join(curDir, name), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLastIPForPrefix returns the last IP for the given prefix
|
||||||
|
// https://github.com/go4org/netipx/blob/8449b0a6169f5140fb0340cb4fc0de4c9b281ef6/netipx.go#L173
|
||||||
|
func GetLastIPForPrefix(p netip.Prefix) netip.Addr {
|
||||||
|
if !p.IsValid() {
|
||||||
|
return netip.Addr{}
|
||||||
|
}
|
||||||
|
a16 := p.Addr().As16()
|
||||||
|
var off uint8
|
||||||
|
var bits uint8 = 128
|
||||||
|
if p.Addr().Is4() {
|
||||||
|
off = 12
|
||||||
|
bits = 32
|
||||||
|
}
|
||||||
|
for b := uint8(p.Bits()); b < bits; b++ {
|
||||||
|
byteNum, bitInByte := b/8, 7-(b%8)
|
||||||
|
a16[off+byteNum] |= 1 << uint(bitInByte)
|
||||||
|
}
|
||||||
|
if p.Addr().Is4() {
|
||||||
|
return netip.AddrFrom16(a16).Unmap()
|
||||||
|
}
|
||||||
|
return netip.AddrFrom16(a16) // doesn't unmap
|
||||||
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ package version
|
||||||
|
|
||||||
import "strings"
|
import "strings"
|
||||||
|
|
||||||
const version = "2.4.3-dev"
|
const version = "2.4.4-dev"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
commit = ""
|
commit = ""
|
||||||
|
|
|
@ -176,7 +176,7 @@ func (v *BaseVirtualFolder) hasPathPlaceholder() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// VirtualFolder defines a mapping between an SFTPGo exposed virtual path and a
|
// VirtualFolder defines a mapping between an SFTPGo virtual path and a
|
||||||
// filesystem path outside the user home directory.
|
// filesystem path outside the user home directory.
|
||||||
// The specified paths must be absolute and the virtual path cannot be "/",
|
// The specified paths must be absolute and the virtual path cannot be "/",
|
||||||
// it must be a sub directory. The parent directory for the specified virtual
|
// it must be a sub directory. The parent directory for the specified virtual
|
||||||
|
|
|
@ -165,12 +165,12 @@ func (s *webDavServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
common.Connections.AddClientConnection(ipAddr)
|
common.Connections.AddClientConnection(ipAddr)
|
||||||
defer common.Connections.RemoveClientConnection(ipAddr)
|
defer common.Connections.RemoveClientConnection(ipAddr)
|
||||||
|
|
||||||
if err := common.Connections.IsNewConnectionAllowed(ipAddr); err != nil {
|
if err := common.Connections.IsNewConnectionAllowed(ipAddr, common.ProtocolWebDAV); err != nil {
|
||||||
logger.Log(logger.LevelDebug, common.ProtocolWebDAV, "", "connection not allowed from ip %q: %v", ipAddr, err)
|
logger.Log(logger.LevelDebug, common.ProtocolWebDAV, "", "connection not allowed from ip %q: %v", ipAddr, err)
|
||||||
http.Error(w, err.Error(), http.StatusServiceUnavailable)
|
http.Error(w, err.Error(), http.StatusServiceUnavailable)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if common.IsBanned(ipAddr) {
|
if common.IsBanned(ipAddr, common.ProtocolWebDAV) {
|
||||||
http.Error(w, common.ErrConnectionDenied.Error(), http.StatusForbidden)
|
http.Error(w, common.ErrConnectionDenied.Error(), http.StatusForbidden)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -413,7 +413,7 @@ func updateLoginMetrics(user *dataprovider.User, ip, loginMethod string, err err
|
||||||
if errors.Is(err, util.ErrNotFound) {
|
if errors.Is(err, util.ErrNotFound) {
|
||||||
event = common.HostEventUserNotFound
|
event = common.HostEventUserNotFound
|
||||||
}
|
}
|
||||||
common.AddDefenderEvent(ip, event)
|
common.AddDefenderEvent(ip, common.ProtocolWebDAV, event)
|
||||||
}
|
}
|
||||||
metric.AddLoginResult(loginMethod, err)
|
metric.AddLoginResult(loginMethod, err)
|
||||||
dataprovider.ExecutePostLoginHook(user, loginMethod, ip, common.ProtocolWebDAV, err)
|
dataprovider.ExecutePostLoginHook(user, loginMethod, ip, common.ProtocolWebDAV, err)
|
||||||
|
|
|
@ -319,18 +319,18 @@ func TestMain(m *testing.M) {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = common.Initialize(commonConf, 0)
|
|
||||||
if err != nil {
|
|
||||||
logger.WarnToConsole("error initializing common: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.ErrorToConsole("error initializing data provider: %v", err)
|
logger.ErrorToConsole("error initializing data provider: %v", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = common.Initialize(commonConf, 0)
|
||||||
|
if err != nil {
|
||||||
|
logger.WarnToConsole("error initializing common: %v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
httpConfig := config.GetHTTPConfig()
|
httpConfig := config.GetHTTPConfig()
|
||||||
httpConfig.Initialize(configDir) //nolint:errcheck
|
httpConfig.Initialize(configDir) //nolint:errcheck
|
||||||
kmsConfig := config.GetKMSConfig()
|
kmsConfig := config.GetKMSConfig()
|
||||||
|
|
3
main.go
3
main.go
|
@ -21,8 +21,6 @@ package main // import "github.com/drakkan/sftpgo"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.uber.org/automaxprocs/maxprocs"
|
"go.uber.org/automaxprocs/maxprocs"
|
||||||
|
|
||||||
|
@ -34,6 +32,5 @@ func main() {
|
||||||
fmt.Printf("error setting max procs: %v\n", err)
|
fmt.Printf("error setting max procs: %v\n", err)
|
||||||
undo()
|
undo()
|
||||||
}
|
}
|
||||||
rand.Seed(time.Now().UnixNano())
|
|
||||||
cmd.Execute()
|
cmd.Execute()
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ tags:
|
||||||
- name: admins
|
- name: admins
|
||||||
- name: API keys
|
- name: API keys
|
||||||
- name: connections
|
- name: connections
|
||||||
|
- name: IP Lists
|
||||||
- name: defender
|
- name: defender
|
||||||
- name: quota
|
- name: quota
|
||||||
- name: folders
|
- name: folders
|
||||||
|
@ -23,12 +24,12 @@ info:
|
||||||
description: |
|
description: |
|
||||||
SFTPGo allows you to securely share your files over SFTP and optionally over HTTP/S, FTP/S and WebDAV as well.
|
SFTPGo allows you to securely share your files over SFTP and optionally over HTTP/S, FTP/S and WebDAV as well.
|
||||||
Several storage backends are supported and they are configurable per-user, so you can serve a local directory for a user and an S3 bucket (or part of it) for another one.
|
Several storage backends are supported and they are configurable per-user, so you can serve a local directory for a user and an S3 bucket (or part of it) for another one.
|
||||||
SFTPGo also supports virtual folders, a virtual folder can use any of the supported storage backends. So you can have, for example, an S3 user that exposes a Google Cloud Storage bucket (or part of it) on a specified path and an encrypted local filesystem on another one.
|
SFTPGo also supports virtual folders, a virtual folder can use any of the supported storage backends. So you can have, for example, a user with the S3 backend mapping a Google Cloud Storage bucket (or part of it) on a specified path and an encrypted local filesystem on another one.
|
||||||
Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.
|
Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.
|
||||||
SFTPGo supports groups to simplify the administration of multiple accounts by letting you assign settings once to a group, instead of multiple times to each individual user.
|
SFTPGo supports groups to simplify the administration of multiple accounts by letting you assign settings once to a group, instead of multiple times to each individual user.
|
||||||
The SFTPGo WebClient allows end users to change their credentials, browse and manage their files in the browser and setup two-factor authentication which works with Authy, Google Authenticator and other compatible apps.
|
The SFTPGo WebClient allows end users to change their credentials, browse and manage their files in the browser and setup two-factor authentication which works with Authy, Google Authenticator and other compatible apps.
|
||||||
From the WebClient each authorized user can also create HTTP/S links to externally share files and folders securely, by setting limits to the number of downloads/uploads, protecting the share with a password, limiting access by source IP address, setting an automatic expiration date.
|
From the WebClient each authorized user can also create HTTP/S links to externally share files and folders securely, by setting limits to the number of downloads/uploads, protecting the share with a password, limiting access by source IP address, setting an automatic expiration date.
|
||||||
version: 2.4.3-dev
|
version: 2.4.4-dev
|
||||||
contact:
|
contact:
|
||||||
name: API support
|
name: API support
|
||||||
url: 'https://github.com/drakkan/sftpgo'
|
url: 'https://github.com/drakkan/sftpgo'
|
||||||
|
@ -783,6 +784,204 @@ paths:
|
||||||
$ref: '#/components/responses/InternalServerError'
|
$ref: '#/components/responses/InternalServerError'
|
||||||
default:
|
default:
|
||||||
$ref: '#/components/responses/DefaultResponse'
|
$ref: '#/components/responses/DefaultResponse'
|
||||||
|
/iplists/{type}:
|
||||||
|
parameters:
|
||||||
|
- name: type
|
||||||
|
in: path
|
||||||
|
description: IP list type
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/IPListType'
|
||||||
|
get:
|
||||||
|
tags:
|
||||||
|
- IP Lists
|
||||||
|
summary: Get IP list entries
|
||||||
|
description: Returns an array with one or more IP list entry
|
||||||
|
operationId: get_ip_list_entries
|
||||||
|
parameters:
|
||||||
|
- in: query
|
||||||
|
name: filter
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
description: restrict results to ipornet matching or starting with this filter
|
||||||
|
- in: query
|
||||||
|
name: from
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
description: ipornet to start from
|
||||||
|
required: false
|
||||||
|
- in: query
|
||||||
|
name: limit
|
||||||
|
schema:
|
||||||
|
type: integer
|
||||||
|
minimum: 1
|
||||||
|
maximum: 500
|
||||||
|
default: 100
|
||||||
|
required: false
|
||||||
|
description: 'The maximum number of items to return. Max value is 500, default is 100'
|
||||||
|
- in: query
|
||||||
|
name: order
|
||||||
|
required: false
|
||||||
|
description: Ordering entries by ipornet field. Default ASC
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- ASC
|
||||||
|
- DESC
|
||||||
|
example: ASC
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: successful operation
|
||||||
|
content:
|
||||||
|
application/json; charset=utf-8:
|
||||||
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/IPListEntry'
|
||||||
|
'400':
|
||||||
|
$ref: '#/components/responses/BadRequest'
|
||||||
|
'401':
|
||||||
|
$ref: '#/components/responses/Unauthorized'
|
||||||
|
'403':
|
||||||
|
$ref: '#/components/responses/Forbidden'
|
||||||
|
'500':
|
||||||
|
$ref: '#/components/responses/InternalServerError'
|
||||||
|
default:
|
||||||
|
$ref: '#/components/responses/DefaultResponse'
|
||||||
|
post:
|
||||||
|
tags:
|
||||||
|
- IP Lists
|
||||||
|
summary: Add a new IP list entry
|
||||||
|
description: Add an IP address or a CIDR network to a supported list
|
||||||
|
operationId: add_ip_list_entry
|
||||||
|
requestBody:
|
||||||
|
required: true
|
||||||
|
content:
|
||||||
|
application/json; charset=utf-8:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/IPListEntry'
|
||||||
|
responses:
|
||||||
|
'201':
|
||||||
|
description: successful operation
|
||||||
|
headers:
|
||||||
|
Location:
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
description: 'URI of the newly created object'
|
||||||
|
content:
|
||||||
|
application/json; charset=utf-8:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/ApiResponse'
|
||||||
|
example:
|
||||||
|
message: Entry added
|
||||||
|
'400':
|
||||||
|
$ref: '#/components/responses/BadRequest'
|
||||||
|
'401':
|
||||||
|
$ref: '#/components/responses/Unauthorized'
|
||||||
|
'403':
|
||||||
|
$ref: '#/components/responses/Forbidden'
|
||||||
|
'500':
|
||||||
|
$ref: '#/components/responses/InternalServerError'
|
||||||
|
default:
|
||||||
|
$ref: '#/components/responses/DefaultResponse'
|
||||||
|
/iplists/{type}/{ipornet}:
|
||||||
|
parameters:
|
||||||
|
- name: type
|
||||||
|
in: path
|
||||||
|
description: IP list type
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/IPListType'
|
||||||
|
- name: ipornet
|
||||||
|
in: path
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
get:
|
||||||
|
tags:
|
||||||
|
- IP Lists
|
||||||
|
summary: Find entry by ipornet
|
||||||
|
description: Returns the entry with the given ipornet if it exists.
|
||||||
|
operationId: get_ip_list_by_ipornet
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: successful operation
|
||||||
|
content:
|
||||||
|
application/json; charset=utf-8:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/IPListEntry'
|
||||||
|
'400':
|
||||||
|
$ref: '#/components/responses/BadRequest'
|
||||||
|
'401':
|
||||||
|
$ref: '#/components/responses/Unauthorized'
|
||||||
|
'403':
|
||||||
|
$ref: '#/components/responses/Forbidden'
|
||||||
|
'404':
|
||||||
|
$ref: '#/components/responses/NotFound'
|
||||||
|
'500':
|
||||||
|
$ref: '#/components/responses/InternalServerError'
|
||||||
|
default:
|
||||||
|
$ref: '#/components/responses/DefaultResponse'
|
||||||
|
put:
|
||||||
|
tags:
|
||||||
|
- IP Lists
|
||||||
|
summary: Update IP list entry
|
||||||
|
description: Updates an existing IP list entry
|
||||||
|
operationId: update_ip_list_entry
|
||||||
|
requestBody:
|
||||||
|
required: true
|
||||||
|
content:
|
||||||
|
application/json; charset=utf-8:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/IPListEntry'
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: successful operation
|
||||||
|
content:
|
||||||
|
application/json; charset=utf-8:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/ApiResponse'
|
||||||
|
example:
|
||||||
|
message: Entry updated
|
||||||
|
'400':
|
||||||
|
$ref: '#/components/responses/BadRequest'
|
||||||
|
'401':
|
||||||
|
$ref: '#/components/responses/Unauthorized'
|
||||||
|
'403':
|
||||||
|
$ref: '#/components/responses/Forbidden'
|
||||||
|
'404':
|
||||||
|
$ref: '#/components/responses/NotFound'
|
||||||
|
'500':
|
||||||
|
$ref: '#/components/responses/InternalServerError'
|
||||||
|
default:
|
||||||
|
$ref: '#/components/responses/DefaultResponse'
|
||||||
|
delete:
|
||||||
|
tags:
|
||||||
|
- IP Lists
|
||||||
|
summary: Delete IP list entry
|
||||||
|
description: Deletes an existing IP list entry
|
||||||
|
operationId: delete_ip_list_entry
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: successful operation
|
||||||
|
content:
|
||||||
|
application/json; charset=utf-8:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/ApiResponse'
|
||||||
|
example:
|
||||||
|
message: Entry deleted
|
||||||
|
'400':
|
||||||
|
$ref: '#/components/responses/BadRequest'
|
||||||
|
'401':
|
||||||
|
$ref: '#/components/responses/Unauthorized'
|
||||||
|
'403':
|
||||||
|
$ref: '#/components/responses/Forbidden'
|
||||||
|
'404':
|
||||||
|
$ref: '#/components/responses/NotFound'
|
||||||
|
'500':
|
||||||
|
$ref: '#/components/responses/InternalServerError'
|
||||||
|
default:
|
||||||
|
$ref: '#/components/responses/DefaultResponse'
|
||||||
/defender/hosts:
|
/defender/hosts:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
|
@ -4616,7 +4815,8 @@ components:
|
||||||
- metadata_checks
|
- metadata_checks
|
||||||
- view_events
|
- view_events
|
||||||
- manage_event_rules
|
- manage_event_rules
|
||||||
- manager_roles
|
- manage_roles
|
||||||
|
- manage_ip_lists
|
||||||
description: |
|
description: |
|
||||||
Admin permissions:
|
Admin permissions:
|
||||||
* `*` - all permissions are granted
|
* `*` - all permissions are granted
|
||||||
|
@ -4638,7 +4838,8 @@ components:
|
||||||
* `metadata_checks` - view and start metadata checks is allowed
|
* `metadata_checks` - view and start metadata checks is allowed
|
||||||
* `view_events` - view and search filesystem and provider events is allowed
|
* `view_events` - view and search filesystem and provider events is allowed
|
||||||
* `manage_event_rules` - manage event actions and rules is allowed
|
* `manage_event_rules` - manage event actions and rules is allowed
|
||||||
* `manager_roles` - manage roles is allowed
|
* `manage_roles` - manage roles is allowed
|
||||||
|
* `manage_ip_lists` - manage global and ratelimter allow lists and defender block and safe lists is allowed
|
||||||
FsProviders:
|
FsProviders:
|
||||||
type: integer
|
type: integer
|
||||||
enum:
|
enum:
|
||||||
|
@ -4903,6 +5104,26 @@ components:
|
||||||
TLS version:
|
TLS version:
|
||||||
* `12` - TLS 1.2
|
* `12` - TLS 1.2
|
||||||
* `13` - TLS 1.3
|
* `13` - TLS 1.3
|
||||||
|
IPListType:
|
||||||
|
type: integer
|
||||||
|
enum:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 3
|
||||||
|
description: >
|
||||||
|
IP List types:
|
||||||
|
* `1` - allow list
|
||||||
|
* `2` - defender
|
||||||
|
* `3` - rate limiter safe list
|
||||||
|
IPListMode:
|
||||||
|
type: integer
|
||||||
|
enum:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
description: >
|
||||||
|
IP list modes
|
||||||
|
* `1` - allow
|
||||||
|
* `2` - deny, supported for defender list type only
|
||||||
TOTPConfig:
|
TOTPConfig:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -4948,7 +5169,7 @@ components:
|
||||||
properties:
|
properties:
|
||||||
path:
|
path:
|
||||||
type: string
|
type: string
|
||||||
description: 'exposed virtual path, if no other specific filter is defined, the filter applies for sub directories too. For example if filters are defined for the paths "/" and "/sub" then the filters for "/" are applied for any file outside the "/sub" directory'
|
description: 'virtual path as seen by users, if no other specific filter is defined, the filter applies for sub directories too. For example if filters are defined for the paths "/" and "/sub" then the filters for "/" are applied for any file outside the "/sub" directory'
|
||||||
allowed_patterns:
|
allowed_patterns:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
|
@ -5665,7 +5886,7 @@ components:
|
||||||
description: Last user login as unix timestamp in milliseconds. It is saved at most once every 10 minutes
|
description: Last user login as unix timestamp in milliseconds. It is saved at most once every 10 minutes
|
||||||
role:
|
role:
|
||||||
type: string
|
type: string
|
||||||
description: 'If set the admin can only administer users with the same role. Role admins cannot have the following permissions: "manage_admins", "manage_apikeys", "manage_system", "manage_event_rules", "manage_roles"'
|
description: 'If set the admin can only administer users with the same role. Role admins cannot have the following permissions: "manage_admins", "manage_apikeys", "manage_system", "manage_event_rules", "manage_roles", "manage_ip_lists"'
|
||||||
AdminProfile:
|
AdminProfile:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -5823,7 +6044,7 @@ components:
|
||||||
properties:
|
properties:
|
||||||
path:
|
path:
|
||||||
type: string
|
type: string
|
||||||
description: 'exposed virtual directory path, if no other specific retention is defined, the retention applies for sub directories too. For example if retention is defined for the paths "/" and "/sub" then the retention for "/" is applied for any file outside the "/sub" directory'
|
description: 'virtual directory path as seen by users, if no other specific retention is defined, the retention applies for sub directories too. For example if retention is defined for the paths "/" and "/sub" then the retention for "/" is applied for any file outside the "/sub" directory'
|
||||||
example: '/'
|
example: '/'
|
||||||
retention:
|
retention:
|
||||||
type: integer
|
type: integer
|
||||||
|
@ -5985,7 +6206,7 @@ components:
|
||||||
$ref: '#/components/schemas/TLSVersions'
|
$ref: '#/components/schemas/TLSVersions'
|
||||||
force_passive_ip:
|
force_passive_ip:
|
||||||
type: string
|
type: string
|
||||||
description: External IP address to expose for passive connections
|
description: External IP address for passive connections
|
||||||
passive_ip_overrides:
|
passive_ip_overrides:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
|
@ -6107,6 +6328,21 @@ components:
|
||||||
type: boolean
|
type: boolean
|
||||||
mfa:
|
mfa:
|
||||||
$ref: '#/components/schemas/MFAStatus'
|
$ref: '#/components/schemas/MFAStatus'
|
||||||
|
allow_list:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
is_active:
|
||||||
|
type: boolean
|
||||||
|
rate_limiters:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
is_active:
|
||||||
|
type: boolean
|
||||||
|
protocols:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
example: SSH
|
||||||
Share:
|
Share:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -6827,6 +7063,30 @@ components:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
$ref: '#/components/schemas/EventActionMinimal'
|
$ref: '#/components/schemas/EventActionMinimal'
|
||||||
|
IPListEntry:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
ipornet:
|
||||||
|
type: string
|
||||||
|
description: IP address or network in CIDR format, for example `192.168.1.2/32`, `192.168.0.0/24`, `2001:db8::/32`
|
||||||
|
description:
|
||||||
|
type: string
|
||||||
|
description: optional description
|
||||||
|
type:
|
||||||
|
$ref: '#/components/schemas/IPListType'
|
||||||
|
mode:
|
||||||
|
$ref: '#/components/schemas/IPListMode'
|
||||||
|
protocols:
|
||||||
|
type: integer
|
||||||
|
description: Defines the protocol the entry applies to. `0` means all the supported protocols, 1 SSH, 2 FTP, 4 WebDAV, 8 HTTP. Protocols can be combined, for example 3 means SSH and FTP
|
||||||
|
created_at:
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
description: creation time as unix timestamp in milliseconds
|
||||||
|
updated_at:
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
description: last update time as unix timestamp in millisecond
|
||||||
ApiResponse:
|
ApiResponse:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
NFPM_VERSION=2.24.0
|
NFPM_VERSION=2.25.0
|
||||||
NFPM_ARCH=${NFPM_ARCH:-amd64}
|
NFPM_ARCH=${NFPM_ARCH:-amd64}
|
||||||
if [ -z ${SFTPGO_VERSION} ]
|
if [ -z ${SFTPGO_VERSION} ]
|
||||||
then
|
then
|
||||||
|
|
|
@ -3,24 +3,24 @@
|
||||||
<package xmlns="http://schemas.microsoft.com/packaging/2015/06/nuspec.xsd">
|
<package xmlns="http://schemas.microsoft.com/packaging/2015/06/nuspec.xsd">
|
||||||
<metadata>
|
<metadata>
|
||||||
<id>sftpgo</id>
|
<id>sftpgo</id>
|
||||||
<version>2.4.3</version>
|
<version>2.4.4</version>
|
||||||
<packageSourceUrl>https://github.com/drakkan/sftpgo/tree/main/pkgs/choco</packageSourceUrl>
|
<packageSourceUrl>https://github.com/drakkan/sftpgo/tree/main/pkgs/choco</packageSourceUrl>
|
||||||
<owners>asheroto</owners>
|
<owners>asheroto</owners>
|
||||||
<title>SFTPGo</title>
|
<title>SFTPGo</title>
|
||||||
<authors>Nicola Murino</authors>
|
<authors>Nicola Murino</authors>
|
||||||
<projectUrl>https://github.com/drakkan/sftpgo</projectUrl>
|
<projectUrl>https://github.com/drakkan/sftpgo</projectUrl>
|
||||||
<iconUrl>https://cdn.statically.io/gh/drakkan/sftpgo/v2.4.3/static/img/logo.png</iconUrl>
|
<iconUrl>https://cdn.statically.io/gh/drakkan/sftpgo/v2.4.4/static/img/logo.png</iconUrl>
|
||||||
<licenseUrl>https://github.com/drakkan/sftpgo/blob/main/LICENSE</licenseUrl>
|
<licenseUrl>https://github.com/drakkan/sftpgo/blob/main/LICENSE</licenseUrl>
|
||||||
<requireLicenseAcceptance>false</requireLicenseAcceptance>
|
<requireLicenseAcceptance>false</requireLicenseAcceptance>
|
||||||
<projectSourceUrl>https://github.com/drakkan/sftpgo</projectSourceUrl>
|
<projectSourceUrl>https://github.com/drakkan/sftpgo</projectSourceUrl>
|
||||||
<docsUrl>https://github.com/drakkan/sftpgo/tree/v2.4.3/docs</docsUrl>
|
<docsUrl>https://github.com/drakkan/sftpgo/tree/v2.4.4/docs</docsUrl>
|
||||||
<bugTrackerUrl>https://github.com/drakkan/sftpgo/issues</bugTrackerUrl>
|
<bugTrackerUrl>https://github.com/drakkan/sftpgo/issues</bugTrackerUrl>
|
||||||
<tags>sftp sftp-server ftp webdav s3 azure-blob google-cloud-storage cloud-storage scp data-at-rest-encryption multi-factor-authentication multi-step-authentication</tags>
|
<tags>sftp sftp-server ftp webdav s3 azure-blob google-cloud-storage cloud-storage scp data-at-rest-encryption multi-factor-authentication multi-step-authentication</tags>
|
||||||
<summary>Fully featured and highly configurable SFTP server with optional HTTP/S,FTP/S and WebDAV support.</summary>
|
<summary>Fully featured and highly configurable SFTP server with optional HTTP/S,FTP/S and WebDAV support.</summary>
|
||||||
<description>SFTPGo allows you to securely share your files over SFTP and optionally over HTTP/S, FTP/S and WebDAV as well.
|
<description>SFTPGo allows you to securely share your files over SFTP and optionally over HTTP/S, FTP/S and WebDAV as well.
|
||||||
Several storage backends are supported and they are configurable per-user, so you can serve a local directory for a user and an S3 bucket (or part of it) for another one.
|
Several storage backends are supported and they are configurable per-user, so you can serve a local directory for a user and an S3 bucket (or part of it) for another one.
|
||||||
|
|
||||||
SFTPGo also supports virtual folders. A virtual folder can use any of the supported storage backends. So you can have, for example, an S3 user that exposes a GCS bucket (or part of it) on a specified path and an encrypted local filesystem on another one. Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.
|
SFTPGo also supports virtual folders. A virtual folder can use any of the supported storage backends. So you can have, for example, a user with the S3 backend mapping a GCS bucket (or part of it) on a specified path and an encrypted local filesystem on another one. Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.
|
||||||
|
|
||||||
SFTPGo allows to create HTTP/S links to externally share files and folders securely, by setting limits to the number of downloads/uploads, protecting the share with a password, limiting access by source IP address, setting an automatic expiration date.
|
SFTPGo allows to create HTTP/S links to externally share files and folders securely, by setting limits to the number of downloads/uploads, protecting the share with a password, limiting access by source IP address, setting an automatic expiration date.
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ You can find more info [here](https://github.com/drakkan/sftpgo).
|
||||||
|
|
||||||
* This package installs SFTPGo as Windows Service.
|
* This package installs SFTPGo as Windows Service.
|
||||||
* After the first installation please take a look at the [Getting Started Guide](https://github.com/drakkan/sftpgo/blob/main/docs/howto/getting-started.md).</description>
|
* After the first installation please take a look at the [Getting Started Guide](https://github.com/drakkan/sftpgo/blob/main/docs/howto/getting-started.md).</description>
|
||||||
<releaseNotes>https://github.com/drakkan/sftpgo/releases/tag/v2.4.3</releaseNotes>
|
<releaseNotes>https://github.com/drakkan/sftpgo/releases/tag/v2.4.4</releaseNotes>
|
||||||
</metadata>
|
</metadata>
|
||||||
<files>
|
<files>
|
||||||
<file src="**" exclude="**\*.md;**\icon.png;**\icon.jpg;**\icon.svg" />
|
<file src="**" exclude="**\*.md;**\icon.png;**\icon.jpg;**\icon.svg" />
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
$ErrorActionPreference = 'Stop'
|
$ErrorActionPreference = 'Stop'
|
||||||
$packageName = 'sftpgo'
|
$packageName = 'sftpgo'
|
||||||
$softwareName = 'SFTPGo'
|
$softwareName = 'SFTPGo'
|
||||||
$url = 'https://github.com/drakkan/sftpgo/releases/download/v2.4.3/sftpgo_v2.4.3_windows_x86_64.exe'
|
$url = 'https://github.com/drakkan/sftpgo/releases/download/v2.4.4/sftpgo_v2.4.4_windows_x86_64.exe'
|
||||||
$checksum = '7DF2DBC5EEBC859E4DE1832D1124F9872C1394A1C9D00EBA44F89E5EDABFFE4F'
|
$checksum = 'E43B7097B2099ACE95D336694DAEEF65646D4078FC045011DD3C6A2F07A30B46'
|
||||||
$silentArgs = '/VERYSILENT'
|
$silentArgs = '/VERYSILENT'
|
||||||
$validExitCodes = @(0)
|
$validExitCodes = @(0)
|
||||||
|
|
||||||
|
@ -47,8 +47,8 @@ Write-Output ""
|
||||||
Write-Output "General information (README) location:"
|
Write-Output "General information (README) location:"
|
||||||
Write-Output "`thttps://github.com/drakkan/sftpgo"
|
Write-Output "`thttps://github.com/drakkan/sftpgo"
|
||||||
Write-Output "Getting started guide location:"
|
Write-Output "Getting started guide location:"
|
||||||
Write-Output "`thttps://github.com/drakkan/sftpgo/blob/v2.4.3/docs/howto/getting-started.md"
|
Write-Output "`thttps://github.com/drakkan/sftpgo/blob/v2.4.4/docs/howto/getting-started.md"
|
||||||
Write-Output "Detailed information (docs folder) location:"
|
Write-Output "Detailed information (docs folder) location:"
|
||||||
Write-Output "`thttps://github.com/drakkan/sftpgo/tree/v2.4.3/docs"
|
Write-Output "`thttps://github.com/drakkan/sftpgo/tree/v2.4.4/docs"
|
||||||
Write-Output ""
|
Write-Output ""
|
||||||
Write-Output "---------------------------"
|
Write-Output "---------------------------"
|
|
@ -1,3 +1,9 @@
|
||||||
|
sftpgo (2.4.4-1ppa1) bionic; urgency=medium
|
||||||
|
|
||||||
|
* New upstream release
|
||||||
|
|
||||||
|
-- Nicola Murino <nicola.murino@gmail.com> Sat, 04 Feb 2023 17:29:22 +0100
|
||||||
|
|
||||||
sftpgo (2.4.3-1ppa1) bionic; urgency=medium
|
sftpgo (2.4.3-1ppa1) bionic; urgency=medium
|
||||||
|
|
||||||
* New upstream release
|
* New upstream release
|
||||||
|
|
|
@ -2,7 +2,7 @@ Index: sftpgo/sftpgo.json
|
||||||
===================================================================
|
===================================================================
|
||||||
--- sftpgo.orig/sftpgo.json
|
--- sftpgo.orig/sftpgo.json
|
||||||
+++ sftpgo/sftpgo.json
|
+++ sftpgo/sftpgo.json
|
||||||
@@ -59,7 +59,7 @@
|
@@ -60,7 +60,7 @@
|
||||||
"domains": [],
|
"domains": [],
|
||||||
"email": "",
|
"email": "",
|
||||||
"key_type": "4096",
|
"key_type": "4096",
|
||||||
|
@ -11,7 +11,7 @@ Index: sftpgo/sftpgo.json
|
||||||
"ca_endpoint": "https://acme-v02.api.letsencrypt.org/directory",
|
"ca_endpoint": "https://acme-v02.api.letsencrypt.org/directory",
|
||||||
"renew_days": 30,
|
"renew_days": 30,
|
||||||
"http01_challenge": {
|
"http01_challenge": {
|
||||||
@@ -186,7 +186,7 @@
|
@@ -187,7 +187,7 @@
|
||||||
},
|
},
|
||||||
"data_provider": {
|
"data_provider": {
|
||||||
"driver": "sqlite",
|
"driver": "sqlite",
|
||||||
|
@ -20,7 +20,7 @@ Index: sftpgo/sftpgo.json
|
||||||
"host": "",
|
"host": "",
|
||||||
"port": 0,
|
"port": 0,
|
||||||
"username": "",
|
"username": "",
|
||||||
@@ -202,7 +202,7 @@
|
@@ -203,7 +203,7 @@
|
||||||
"track_quota": 2,
|
"track_quota": 2,
|
||||||
"delayed_quota_update": 0,
|
"delayed_quota_update": 0,
|
||||||
"pool_size": 0,
|
"pool_size": 0,
|
||||||
|
@ -29,7 +29,7 @@ Index: sftpgo/sftpgo.json
|
||||||
"actions": {
|
"actions": {
|
||||||
"execute_on": [],
|
"execute_on": [],
|
||||||
"execute_for": [],
|
"execute_for": [],
|
||||||
@@ -244,7 +244,7 @@
|
@@ -245,7 +245,7 @@
|
||||||
"port": 0,
|
"port": 0,
|
||||||
"proto": "http"
|
"proto": "http"
|
||||||
},
|
},
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
"data_retention_hook": "",
|
"data_retention_hook": "",
|
||||||
"max_total_connections": 0,
|
"max_total_connections": 0,
|
||||||
"max_per_host_connections": 20,
|
"max_per_host_connections": 20,
|
||||||
"whitelist_file": "",
|
"allowlist_status": 0,
|
||||||
"allow_self_connections": 0,
|
"allow_self_connections": 0,
|
||||||
"defender": {
|
"defender": {
|
||||||
"enabled": false,
|
"enabled": false,
|
||||||
|
@ -32,11 +32,7 @@
|
||||||
"score_no_auth": 0,
|
"score_no_auth": 0,
|
||||||
"observation_time": 30,
|
"observation_time": 30,
|
||||||
"entries_soft_limit": 100,
|
"entries_soft_limit": 100,
|
||||||
"entries_hard_limit": 150,
|
"entries_hard_limit": 150
|
||||||
"safelist_file": "",
|
|
||||||
"blocklist_file": "",
|
|
||||||
"safelist": [],
|
|
||||||
"blocklist": []
|
|
||||||
},
|
},
|
||||||
"rate_limiters": [
|
"rate_limiters": [
|
||||||
{
|
{
|
||||||
|
@ -50,7 +46,6 @@
|
||||||
"DAV",
|
"DAV",
|
||||||
"HTTP"
|
"HTTP"
|
||||||
],
|
],
|
||||||
"allow_list": [],
|
|
||||||
"generate_defender_events": false,
|
"generate_defender_events": false,
|
||||||
"entries_soft_limit": 100,
|
"entries_soft_limit": 100,
|
||||||
"entries_hard_limit": 150
|
"entries_hard_limit": 150
|
||||||
|
|
|
@ -101,7 +101,7 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
<b>Role</b>
|
<b>Role</b>
|
||||||
</div>
|
</div>
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<h6 class="card-title mb-4">Setting a role limit the administrator to only manage users with the same role. Role administrators cannot have the following permissions: "manage_admins", "manage_roles", "manage_event_rules", "manage_apikeys", "manage_system"</h6>
|
<h6 class="card-title mb-4">Setting a role limit the administrator to only manage users with the same role. Role administrators cannot have the following permissions: "manage_admins", "manage_roles", "manage_event_rules", "manage_apikeys", "manage_system", "manage_ip_lists"</h6>
|
||||||
<div class="form-group row">
|
<div class="form-group row">
|
||||||
<label for="idRole" class="col-sm-2 col-form-label">Role</label>
|
<label for="idRole" class="col-sm-2 col-form-label">Role</label>
|
||||||
<div class="col-sm-10">
|
<div class="col-sm-10">
|
||||||
|
|
|
@ -73,12 +73,6 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
<i class="fas fa-users"></i>
|
<i class="fas fa-users"></i>
|
||||||
<span>{{.UsersTitle}}</span></a>
|
<span>{{.UsersTitle}}</span></a>
|
||||||
</li>
|
</li>
|
||||||
|
|
||||||
<li class="nav-item {{if eq .CurrentURL .FoldersURL}}active{{end}}">
|
|
||||||
<a class="nav-link" href="{{.FoldersURL}}">
|
|
||||||
<i class="fas fa-folder"></i>
|
|
||||||
<span>{{.FoldersTitle}}</span></a>
|
|
||||||
</li>
|
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
|
||||||
{{ if .LoggedAdmin.HasPermission "manage_groups"}}
|
{{ if .LoggedAdmin.HasPermission "manage_groups"}}
|
||||||
|
@ -89,6 +83,22 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
</li>
|
</li>
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
|
{{ if .LoggedAdmin.HasPermission "view_users"}}
|
||||||
|
<li class="nav-item {{if eq .CurrentURL .FoldersURL}}active{{end}}">
|
||||||
|
<a class="nav-link" href="{{.FoldersURL}}">
|
||||||
|
<i class="fas fa-folder"></i>
|
||||||
|
<span>{{.FoldersTitle}}</span></a>
|
||||||
|
</li>
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{ if .LoggedAdmin.HasPermission "view_conns"}}
|
||||||
|
<li class="nav-item {{if eq .CurrentURL .ConnectionsURL}}active{{end}}">
|
||||||
|
<a class="nav-link" href="{{.ConnectionsURL}}">
|
||||||
|
<i class="fas fa-exchange-alt"></i>
|
||||||
|
<span>{{.ConnectionsTitle}}</span></a>
|
||||||
|
</li>
|
||||||
|
{{end}}
|
||||||
|
|
||||||
{{ if .LoggedAdmin.HasPermission "manage_event_rules"}}
|
{{ if .LoggedAdmin.HasPermission "manage_event_rules"}}
|
||||||
<li class="nav-item {{if .IsEventManagerPage}}active{{end}}">
|
<li class="nav-item {{if .IsEventManagerPage}}active{{end}}">
|
||||||
<a class="nav-link {{if not .IsEventManagerPage}}collapsed{{end}}" href="#" data-toggle="collapse" data-target="#collapseEventManager"
|
<a class="nav-link {{if not .IsEventManagerPage}}collapsed{{end}}" href="#" data-toggle="collapse" data-target="#collapseEventManager"
|
||||||
|
@ -105,27 +115,23 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
</li>
|
</li>
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
{{ if .LoggedAdmin.HasPermission "view_conns"}}
|
{{ if or (.LoggedAdmin.HasPermission "manage_ip_lists") (and .HasDefender (.LoggedAdmin.HasPermission "view_defender"))}}
|
||||||
<li class="nav-item {{if eq .CurrentURL .ConnectionsURL}}active{{end}}">
|
<li class="nav-item {{if .IsIPManagerPage}}active{{end}}">
|
||||||
<a class="nav-link" href="{{.ConnectionsURL}}">
|
<a class="nav-link {{if not .IsIPManagerPage}}collapsed{{end}}" href="#" data-toggle="collapse" data-target="#collapseIPManager"
|
||||||
<i class="fas fa-exchange-alt"></i>
|
aria-expanded="true" aria-controls="collapseIPManager">
|
||||||
<span>{{.ConnectionsTitle}}</span></a>
|
|
||||||
</li>
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
{{ if and .HasDefender (.LoggedAdmin.HasPermission "view_defender")}}
|
|
||||||
<li class="nav-item {{if eq .CurrentURL .DefenderURL}}active{{end}}">
|
|
||||||
<a class="nav-link" href="{{.DefenderURL}}">
|
|
||||||
<i class="fas fa-shield-alt"></i>
|
<i class="fas fa-shield-alt"></i>
|
||||||
<span>{{.DefenderTitle}}</span></a>
|
<span>IP Manager</span>
|
||||||
</li>
|
</a>
|
||||||
|
<div id="collapseIPManager" class="collapse {{if .IsIPManagerPage}}show{{end}}" aria-labelledby="headingIPManager" data-parent="#accordionSidebar">
|
||||||
|
<div class="bg-white py-2 collapse-inner rounded">
|
||||||
|
{{ if .LoggedAdmin.HasPermission "manage_ip_lists"}}
|
||||||
|
<a class="collapse-item {{if eq .CurrentURL .IPListsURL}}active{{end}}" href="{{.IPListsURL}}">{{.IPListsTitle}}</a>
|
||||||
{{end}}
|
{{end}}
|
||||||
|
{{ if and .HasDefender (.LoggedAdmin.HasPermission "view_defender")}}
|
||||||
{{ if .LoggedAdmin.HasPermission "manage_roles"}}
|
<a class="collapse-item {{if eq .CurrentURL .DefenderURL}}active{{end}}" href="{{.DefenderURL}}">{{.DefenderTitle}}</a>
|
||||||
<li class="nav-item {{if eq .CurrentURL .RolesURL}}active{{end}}">
|
{{end}}
|
||||||
<a class="nav-link" href="{{.RolesURL}}">
|
</div>
|
||||||
<i class="fas fa-user-lock"></i>
|
</div>
|
||||||
<span>{{.RolesTitle}}</span></a>
|
|
||||||
</li>
|
</li>
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
|
@ -137,6 +143,14 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
</li>
|
</li>
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
|
{{ if .LoggedAdmin.HasPermission "manage_roles"}}
|
||||||
|
<li class="nav-item {{if eq .CurrentURL .RolesURL}}active{{end}}">
|
||||||
|
<a class="nav-link" href="{{.RolesURL}}">
|
||||||
|
<i class="fas fa-user-lock"></i>
|
||||||
|
<span>{{.RolesTitle}}</span></a>
|
||||||
|
</li>
|
||||||
|
{{end}}
|
||||||
|
|
||||||
{{ if and .HasSearcher (.LoggedAdmin.HasPermission "view_events")}}
|
{{ if and .HasSearcher (.LoggedAdmin.HasPermission "view_events")}}
|
||||||
<li class="nav-item {{if eq .CurrentURL .EventsURL}}active{{end}}">
|
<li class="nav-item {{if eq .CurrentURL .EventsURL}}active{{end}}">
|
||||||
<a class="nav-link" href="{{.EventsURL}}">
|
<a class="nav-link" href="{{.EventsURL}}">
|
||||||
|
|
|
@ -31,7 +31,7 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
</div>
|
</div>
|
||||||
<div class="card shadow mb-4">
|
<div class="card shadow mb-4">
|
||||||
<div class="card-header py-3">
|
<div class="card-header py-3">
|
||||||
<h6 class="m-0 font-weight-bold text-primary">View and manage blocklist</h6>
|
<h6 class="m-0 font-weight-bold text-primary">View and manage auto blocklist</h6>
|
||||||
</div>
|
</div>
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="table-responsive">
|
<div class="table-responsive">
|
||||||
|
@ -63,7 +63,7 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
<span aria-hidden="true">×</span>
|
<span aria-hidden="true">×</span>
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
<div class="modal-body">Do you want to remoce the selected blocklist entry?</div>
|
<div class="modal-body">Do you want to remoce the selected entry?</div>
|
||||||
<div class="modal-footer">
|
<div class="modal-footer">
|
||||||
<button class="btn btn-secondary" type="button" data-dismiss="modal">
|
<button class="btn btn-secondary" type="button" data-dismiss="modal">
|
||||||
Cancel
|
Cancel
|
||||||
|
@ -89,10 +89,10 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
<script type="text/javascript">
|
<script type="text/javascript">
|
||||||
|
|
||||||
function deleteAction() {
|
function deleteAction() {
|
||||||
var table = $('#dataTable').DataTable();
|
let table = $('#dataTable').DataTable();
|
||||||
table.button('delete:name').enable(false);
|
table.button('delete:name').enable(false);
|
||||||
var id = table.row({ selected: true }).data()["id"];
|
let id = table.row({ selected: true }).data()["id"];
|
||||||
var path = '{{.DefenderHostsURL}}' + "/" + fixedEncodeURIComponent(id);
|
let path = '{{.DefenderHostsURL}}' + "/" + fixedEncodeURIComponent(id);
|
||||||
$('#deleteModal').modal('hide');
|
$('#deleteModal').modal('hide');
|
||||||
$.ajax({
|
$.ajax({
|
||||||
url: path,
|
url: path,
|
||||||
|
@ -104,9 +104,9 @@ function deleteAction() {
|
||||||
window.location.href = '{{.DefenderURL}}';
|
window.location.href = '{{.DefenderURL}}';
|
||||||
},
|
},
|
||||||
error: function ($xhr, textStatus, errorThrown) {
|
error: function ($xhr, textStatus, errorThrown) {
|
||||||
var txt = "Unable to delete the selected entry";
|
let txt = "Unable to delete the selected entry";
|
||||||
if ($xhr) {
|
if ($xhr) {
|
||||||
var json = $xhr.responseJSON;
|
let json = $xhr.responseJSON;
|
||||||
if (json) {
|
if (json) {
|
||||||
if (json.message){
|
if (json.message){
|
||||||
txt += ": " + json.message;
|
txt += ": " + json.message;
|
||||||
|
@ -144,15 +144,15 @@ function deleteAction() {
|
||||||
enabled: false
|
enabled: false
|
||||||
};
|
};
|
||||||
|
|
||||||
var table = $('#dataTable').DataTable({
|
let table = $('#dataTable').DataTable({
|
||||||
"ajax": {
|
"ajax": {
|
||||||
"url": "{{.DefenderHostsURL}}",
|
"url": "{{.DefenderHostsURL}}",
|
||||||
"dataSrc": "",
|
"dataSrc": "",
|
||||||
"error": function ($xhr, textStatus, errorThrown) {
|
"error": function ($xhr, textStatus, errorThrown) {
|
||||||
$(".dataTables_processing").hide();
|
$(".dataTables_processing").hide();
|
||||||
var txt = "Failed to get defender's list";
|
let txt = "Failed to get auto blocklist";
|
||||||
if ($xhr) {
|
if ($xhr) {
|
||||||
var json = $xhr.responseJSON;
|
let json = $xhr.responseJSON;
|
||||||
if (json) {
|
if (json) {
|
||||||
if (json.message){
|
if (json.message){
|
||||||
txt += ": " + json.message;
|
txt += ": " + json.message;
|
||||||
|
@ -218,7 +218,7 @@ function deleteAction() {
|
||||||
|
|
||||||
{{if .LoggedAdmin.HasPermission "manage_defender"}}
|
{{if .LoggedAdmin.HasPermission "manage_defender"}}
|
||||||
table.on('select deselect', function () {
|
table.on('select deselect', function () {
|
||||||
var selectedRows = table.rows({ selected: true }).count();
|
let selectedRows = table.rows({ selected: true }).count();
|
||||||
table.button('delete:name').enable(selectedRows == 1);
|
table.button('delete:name').enable(selectedRows == 1);
|
||||||
});
|
});
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
|
@ -23,7 +23,7 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
{{define "additionalnavitems"}}
|
{{define "additionalnavitems"}}
|
||||||
<li class="nav-item dropdown no-arrow mx-1">
|
<li class="nav-item dropdown no-arrow mx-1">
|
||||||
<a class="nav-link dropdown-toggle" href="#" id="editorDropdown" role="button"
|
<a class="nav-link dropdown-toggle" href="#" id="infoDropdown" role="button"
|
||||||
data-toggle="modal" data-target="#infoModal">
|
data-toggle="modal" data-target="#infoModal">
|
||||||
<i class="fas fa-info fa-fw"></i>
|
<i class="fas fa-info fa-fw"></i>
|
||||||
</a>
|
</a>
|
||||||
|
|
|
@ -157,7 +157,7 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
let dateFn = $.fn.dataTable.render.datetime();
|
let dateFn = $.fn.dataTable.render.datetime();
|
||||||
let isFsDataTableInitialized = false;
|
let isFsDataTableInitialized = false;
|
||||||
let isProviderDataTableInitialized = false;
|
let isProviderDataTableInitialized = false;
|
||||||
let pageSize = 20;
|
const pageSize = 20;
|
||||||
const paginationData = new Map();
|
const paginationData = new Map();
|
||||||
|
|
||||||
function fileSizeIEC(a,b,c,d,e){
|
function fileSizeIEC(a,b,c,d,e){
|
||||||
|
@ -231,7 +231,7 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
paginationData.set("prevClicked",false);
|
paginationData.set("prevClicked",false);
|
||||||
paginationData.set("nextClicked",false);
|
paginationData.set("nextClicked",false);
|
||||||
let exportURL = getSearchURL(true);
|
let exportURL = getSearchURL(true);
|
||||||
var ts = new Date().getTime().toString();
|
let ts = new Date().getTime().toString();
|
||||||
window.open(`${exportURL}&_=${ts}`);
|
window.open(`${exportURL}&_=${ts}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,9 +338,9 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
"dataSrc": handleResponseData,
|
"dataSrc": handleResponseData,
|
||||||
"error": function ($xhr, textStatus, errorThrown) {
|
"error": function ($xhr, textStatus, errorThrown) {
|
||||||
$(".dataTables_processing").hide();
|
$(".dataTables_processing").hide();
|
||||||
var txt = "Failed to get provider events";
|
let txt = "Failed to get provider events";
|
||||||
if ($xhr) {
|
if ($xhr) {
|
||||||
var json = $xhr.responseJSON;
|
let json = $xhr.responseJSON;
|
||||||
if (json) {
|
if (json) {
|
||||||
if (json.message){
|
if (json.message){
|
||||||
txt += ": " + json.message;
|
txt += ": " + json.message;
|
||||||
|
@ -423,7 +423,7 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
"dataSrc": handleResponseData,
|
"dataSrc": handleResponseData,
|
||||||
"error": function ($xhr, textStatus, errorThrown) {
|
"error": function ($xhr, textStatus, errorThrown) {
|
||||||
$(".dataTables_processing").hide();
|
$(".dataTables_processing").hide();
|
||||||
var txt = "Failed to get filesystem events";
|
let txt = "Failed to get filesystem events";
|
||||||
if ($xhr) {
|
if ($xhr) {
|
||||||
let json = $xhr.responseJSON;
|
let json = $xhr.responseJSON;
|
||||||
if (json) {
|
if (json) {
|
||||||
|
|
104
templates/webadmin/iplist.html
Normal file
104
templates/webadmin/iplist.html
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
<!--
|
||||||
|
Copyright (C) 2019-2023 Nicola Murino
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License as published
|
||||||
|
by the Free Software Foundation, version 3.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
-->
|
||||||
|
{{template "base" .}}
|
||||||
|
|
||||||
|
{{define "title"}}{{.Title}}{{end}}
|
||||||
|
|
||||||
|
{{define "extra_css"}}
|
||||||
|
<link href="{{.StaticURL}}/vendor/bootstrap-select/css/bootstrap-select.min.css" rel="stylesheet">
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{define "page_body"}}
|
||||||
|
<!-- Page Heading -->
|
||||||
|
<div class="card shadow mb-4">
|
||||||
|
<div class="card-header py-3">
|
||||||
|
<h6 class="m-0 font-weight-bold text-primary">{{.Title}}</h6>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
{{if .Error}}
|
||||||
|
<div class="card mb-4 border-left-warning">
|
||||||
|
<div class="card-body text-form-error">{{.Error}}</div>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
<form id="iplist_form" action="{{.CurrentURL}}" method="POST" autocomplete="off">
|
||||||
|
<div class="form-group row">
|
||||||
|
<label for="idIPOrNet" class="col-sm-2 col-form-label">IP/Network</label>
|
||||||
|
<div class="col-sm-10">
|
||||||
|
<input type="text" class="form-control" id="idIPOrNet" name="ipornet" placeholder=""
|
||||||
|
value="{{.Entry.IPOrNet}}" maxlength="50" autocomplete="nope" aria-describedby="ipOrNetHelpBlock" required {{if eq .Mode 2}}readonly{{end}}>
|
||||||
|
{{if ne .Mode 2}}
|
||||||
|
<small id="ipOrNetHelpBlock" class="form-text text-muted">
|
||||||
|
IP address or network in CIDR format, example: "192.168.1.1 or 10.8.0.100/32 or 2001:db8:1234::/48"
|
||||||
|
</small>
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="form-group row">
|
||||||
|
<label for="idType" class="col-sm-2 col-form-label">Type</label>
|
||||||
|
<div class="col-sm-10">
|
||||||
|
<input type="text" class="form-control" id="idType" name="type" placeholder=""
|
||||||
|
value="{{.Entry.Type.AsString}}" maxlength="50" readonly>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{{if eq .Entry.Type 2}}
|
||||||
|
<div class="form-group row">
|
||||||
|
<label for="idMode" class="col-sm-2 col-form-label">Mode</label>
|
||||||
|
<div class="col-sm-10">
|
||||||
|
<select class="form-control selectpicker" id="idMode" name="mode">
|
||||||
|
<option value="2" {{if eq .Entry.Mode 2 }}selected{{end}}>Deny</option>
|
||||||
|
<option value="1" {{if eq .Entry.Mode 1 }}selected{{end}}>Allow</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
<div class="form-group row">
|
||||||
|
<label for="idProtocols" class="col-sm-2 col-form-label">Protocols</label>
|
||||||
|
<div class="col-sm-10">
|
||||||
|
<select class="form-control selectpicker" id="idProtocols" name="protocols" multiple title="Any">
|
||||||
|
<option value="1" {{if .Entry.HasProtocol "SSH" }}selected{{end}}>SSH</option>
|
||||||
|
<option value="2" {{if .Entry.HasProtocol "FTP" }}selected{{end}}>FTP</option>
|
||||||
|
<option value="4" {{if .Entry.HasProtocol "DAV" }}selected{{end}}>DAV</option>
|
||||||
|
<option value="8" {{if .Entry.HasProtocol "HTTP" }}selected{{end}}>HTTP</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="form-group row">
|
||||||
|
<label for="idDescription" class="col-sm-2 col-form-label">Note</label>
|
||||||
|
<div class="col-sm-10">
|
||||||
|
<input type="text" class="form-control" id="idDescription" name="description" placeholder=""
|
||||||
|
value="{{.Entry.Description}}" maxlength="512" aria-describedby="descriptionHelpBlock">
|
||||||
|
<small id="descriptionHelpBlock" class="form-text text-muted">
|
||||||
|
Optional note
|
||||||
|
</small>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<input type="hidden" name="_form_token" value="{{.CSRFToken}}">
|
||||||
|
<div class="col-sm-12 text-right px-0">
|
||||||
|
<button type="submit" class="btn btn-primary mt-3 ml-3 px-5" name="form_action" value="submit">Submit</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{define "extra_js"}}
|
||||||
|
<script src="{{.StaticURL}}/vendor/bootstrap-select/js/bootstrap-select.min.js"></script>
|
||||||
|
{{end}}
|
508
templates/webadmin/iplists.html
Normal file
508
templates/webadmin/iplists.html
Normal file
|
@ -0,0 +1,508 @@
|
||||||
|
<!--
|
||||||
|
Copyright (C) 2019-2023 Nicola Murino
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License as published
|
||||||
|
by the Free Software Foundation, version 3.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
-->
|
||||||
|
{{template "base" .}}
|
||||||
|
|
||||||
|
{{define "title"}}{{.Title}}{{end}}
|
||||||
|
|
||||||
|
{{define "extra_css"}}
|
||||||
|
<link href="{{.StaticURL}}/vendor/datatables/dataTables.bootstrap4.min.css" rel="stylesheet">
|
||||||
|
<link href="{{.StaticURL}}/vendor/datatables/buttons.bootstrap4.min.css" rel="stylesheet">
|
||||||
|
<link href="{{.StaticURL}}/vendor/datatables/fixedHeader.bootstrap4.min.css" rel="stylesheet">
|
||||||
|
<link href="{{.StaticURL}}/vendor/datatables/responsive.bootstrap4.min.css" rel="stylesheet">
|
||||||
|
<link href="{{.StaticURL}}/vendor/datatables/select.bootstrap4.min.css" rel="stylesheet">
|
||||||
|
<link href="{{.StaticURL}}/vendor/bootstrap-select/css/bootstrap-select.min.css" rel="stylesheet">
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{define "page_body"}}
|
||||||
|
<div id="errorMsg" class="card mb-4 border-left-warning" style="display: none;">
|
||||||
|
<div id="errorTxt" class="card-body text-form-error"></div>
|
||||||
|
</div>
|
||||||
|
<div class="card shadow mb-4">
|
||||||
|
<div class="card-header py-3">
|
||||||
|
<h6 class="m-0 font-weight-bold text-primary">View and manage IP Lists</h6>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
{{if not .HasDefender}}
|
||||||
|
<div id="defender-info" class="card mb-3 border-left-info" style="display: none;">
|
||||||
|
<div class="card-body">Defender disabled in your configuration</div>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
{{if not .IsAllowListEnabled}}
|
||||||
|
<div id="allowlist-info" class="card mb-3 border-left-info" style="display: none;">
|
||||||
|
<div class="card-body">Allowlist disabled in your configuration</div>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
{{if not .RateLimitersStatus}}
|
||||||
|
<div id="ratelimited-info" class="card mb-3 border-left-info" style="display: none;">
|
||||||
|
<div class="card-body">Ratelimiters disabled in your configuration</div>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
<div class="form-row">
|
||||||
|
<div class="form-group col-md-3">
|
||||||
|
<select class="form-control selectpicker" id="idListType" name="list_type" onchange="onListChanged(this.value)">
|
||||||
|
<option value="2">Defender</option>
|
||||||
|
<option value="1">Allow list</option>
|
||||||
|
<option value="3">Rate limiters safe list</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<div class="form-group col-md-5">
|
||||||
|
</div>
|
||||||
|
<div class="form-group col-md-4">
|
||||||
|
<div class="input-group">
|
||||||
|
<input type="text" class="form-control bg-light border-0" id="idIp" name="ip" placeholder="IP/Network or initial part" aria-describedby="search-button">
|
||||||
|
<div class="input-group-append">
|
||||||
|
<button id="search-button" class="btn btn-primary" type="button" onclick="onSearchClicked()">
|
||||||
|
<i class="fas fa-search fa-sm"></i>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="table-responsive">
|
||||||
|
<table class="table table-hover nowrap" id="dataTable" width="100%" cellspacing="0">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>IP/Network</th>
|
||||||
|
<th>Protocols</th>
|
||||||
|
<th>Mode</th>
|
||||||
|
<th>Note</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="paginationContainer" class="m-4 d-none">
|
||||||
|
<nav aria-label="Pagination">
|
||||||
|
<ul class="pagination justify-content-end">
|
||||||
|
<li id="pageItemPrev" class="page-item disabled"><a id="pagePrevious" class="page-link" href="#" onclick="prevClicked()">Previous</a></li>
|
||||||
|
<li id="pageItemNext" class="page-item disabled"><a id="pageNext" class="page-link" href="#" onclick="nextClicked()">Next</a></li>
|
||||||
|
</ul>
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{define "dialog"}}
|
||||||
|
<div class="modal fade" id="deleteModal" tabindex="-1" role="dialog" aria-labelledby="deleteModalLabel"
|
||||||
|
aria-hidden="true">
|
||||||
|
<div class="modal-dialog" role="document">
|
||||||
|
<div class="modal-content">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h5 class="modal-title" id="deleteModalLabel">
|
||||||
|
Confirmation required
|
||||||
|
</h5>
|
||||||
|
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
|
||||||
|
<span aria-hidden="true">×</span>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body">Do you want to remoce the selected entry?</div>
|
||||||
|
<div class="modal-footer">
|
||||||
|
<button class="btn btn-secondary" type="button" data-dismiss="modal">
|
||||||
|
Cancel
|
||||||
|
</button>
|
||||||
|
<a class="btn btn-warning" href="#" onclick="deleteAction()">
|
||||||
|
Delete
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{define "extra_js"}}
|
||||||
|
<script src="{{.StaticURL}}/vendor/datatables/jquery.dataTables.min.js"></script>
|
||||||
|
<script src="{{.StaticURL}}/vendor/datatables/dataTables.bootstrap4.min.js"></script>
|
||||||
|
<script src="{{.StaticURL}}/vendor/datatables/dataTables.buttons.min.js"></script>
|
||||||
|
<script src="{{.StaticURL}}/vendor/datatables/buttons.bootstrap4.min.js"></script>
|
||||||
|
<script src="{{.StaticURL}}/vendor/datatables/dataTables.fixedHeader.min.js"></script>
|
||||||
|
<script src="{{.StaticURL}}/vendor/datatables/dataTables.responsive.min.js"></script>
|
||||||
|
<script src="{{.StaticURL}}/vendor/datatables/responsive.bootstrap4.min.js"></script>
|
||||||
|
<script src="{{.StaticURL}}/vendor/datatables/dataTables.select.min.js"></script>
|
||||||
|
<script src="{{.StaticURL}}/vendor/datatables/ellipsis.js"></script>
|
||||||
|
<script src="{{.StaticURL}}/vendor/bootstrap-select/js/bootstrap-select.min.js"></script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
|
||||||
|
const prefListTypeName = 'sftpgo_pref_{{.LoggedAdmin.Username}}_iplist_type';
|
||||||
|
const prefListFilter = 'sftpgo_pref_{{.LoggedAdmin.Username}}_iplist_search_filter';
|
||||||
|
const listType = getListType();
|
||||||
|
const listFilter = getSearchFilter();
|
||||||
|
|
||||||
|
if (listType === '1' || listType === '3'){
|
||||||
|
$('#idListType').val(listType);
|
||||||
|
} else {
|
||||||
|
$('#idListType').val('2');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (listFilter){
|
||||||
|
$('#idIp').val(listFilter);
|
||||||
|
} else {
|
||||||
|
$('#idIp').val('');
|
||||||
|
}
|
||||||
|
|
||||||
|
const pageSize = 15;
|
||||||
|
const paginationData = new Map();
|
||||||
|
|
||||||
|
function saveListType(val) {
|
||||||
|
localStorage.setItem(prefListTypeName, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
function getListType() {
|
||||||
|
return localStorage.getItem(prefListTypeName);
|
||||||
|
}
|
||||||
|
|
||||||
|
function saveSearchFilter() {
|
||||||
|
let val = $("#idIp").val();
|
||||||
|
if (val){
|
||||||
|
localStorage.setItem(prefListFilter, val);
|
||||||
|
} else {
|
||||||
|
localStorage.removeItem(prefListFilter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function getSearchFilter() {
|
||||||
|
return localStorage.getItem(prefListFilter);
|
||||||
|
}
|
||||||
|
|
||||||
|
function resetPagination() {
|
||||||
|
$('#pageItemPrev').addClass("disabled");
|
||||||
|
$('#pageItemNext').addClass("disabled");
|
||||||
|
$('#paginationContainer').addClass("d-none");
|
||||||
|
paginationData.delete("firstIpOrNet");
|
||||||
|
paginationData.delete("lastIpOrNet");
|
||||||
|
paginationData.set("prevClicked",false);
|
||||||
|
paginationData.set("nextClicked",false);
|
||||||
|
}
|
||||||
|
|
||||||
|
function prevClicked(){
|
||||||
|
paginationData.set("prevClicked",true);
|
||||||
|
paginationData.set("nextClicked",false);
|
||||||
|
doSearch();
|
||||||
|
}
|
||||||
|
|
||||||
|
function nextClicked(){
|
||||||
|
paginationData.set("prevClicked",false);
|
||||||
|
paginationData.set("nextClicked",true);
|
||||||
|
doSearch();
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleResponseData(data) {
|
||||||
|
let length = data.length;
|
||||||
|
let isNext = paginationData.get("nextClicked");
|
||||||
|
let isPrev = paginationData.get("prevClicked");
|
||||||
|
|
||||||
|
if (length > pageSize) {
|
||||||
|
data.pop();
|
||||||
|
length--;
|
||||||
|
if (isPrev || isNext){
|
||||||
|
$('#pageItemPrev').removeClass("disabled");
|
||||||
|
}
|
||||||
|
$('#pageItemNext').removeClass("disabled");
|
||||||
|
} else {
|
||||||
|
if (isPrev){
|
||||||
|
$('#pageItemPrev').addClass("disabled");
|
||||||
|
$('#pageItemNext').removeClass("disabled");
|
||||||
|
} else if (isNext){
|
||||||
|
$('#pageItemPrev').removeClass("disabled");
|
||||||
|
$('#pageItemNext').addClass("disabled");
|
||||||
|
} else {
|
||||||
|
$('#pageItemNext').addClass("disabled");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (isPrev){
|
||||||
|
data = data.reverse();
|
||||||
|
}
|
||||||
|
if (length > 0){
|
||||||
|
paginationData.set("firstIpOrNet",data[0].ipornet);
|
||||||
|
paginationData.set("lastIpOrNet",data[length-1].ipornet);
|
||||||
|
$('#paginationContainer').removeClass("d-none");
|
||||||
|
} else {
|
||||||
|
resetPagination();
|
||||||
|
}
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getSearchURL(){
|
||||||
|
let listType = fixedEncodeURIComponent($("#idListType").val());
|
||||||
|
let filter = encodeURIComponent($("#idIp").val());
|
||||||
|
let limit = pageSize + 1;
|
||||||
|
let from = "";
|
||||||
|
let order = "ASC"
|
||||||
|
if (paginationData.get("nextClicked") && paginationData.has("lastIpOrNet")){
|
||||||
|
from = encodeURIComponent(paginationData.get("lastIpOrNet"));
|
||||||
|
}
|
||||||
|
if (paginationData.get("prevClicked") && paginationData.has("firstIpOrNet")){
|
||||||
|
from = encodeURIComponent(paginationData.get("firstIpOrNet"));
|
||||||
|
order = "DESC";
|
||||||
|
}
|
||||||
|
return "{{.IPListsURL}}"+`/${listType}?filter=${filter}&from=${from}&limit=${limit}&order=${order}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function deleteAction() {
|
||||||
|
let table = $('#dataTable').DataTable();
|
||||||
|
table.button('delete:name').enable(false);
|
||||||
|
let selectedRow = table.row({ selected: true }).data();
|
||||||
|
let path = '{{.IPListURL}}' + "/" + fixedEncodeURIComponent(selectedRow["type"])+"/"+ fixedEncodeURIComponent(selectedRow["ipornet"]);
|
||||||
|
$('#deleteModal').modal('hide');
|
||||||
|
$.ajax({
|
||||||
|
url: path,
|
||||||
|
type: 'DELETE',
|
||||||
|
dataType: 'json',
|
||||||
|
headers: {'X-CSRF-TOKEN' : '{{.CSRFToken}}'},
|
||||||
|
timeout: 15000,
|
||||||
|
success: function (result) {
|
||||||
|
window.location.href = '{{.IPListsURL}}';
|
||||||
|
},
|
||||||
|
error: function ($xhr, textStatus, errorThrown) {
|
||||||
|
let txt = "Unable to delete the selected entry";
|
||||||
|
if ($xhr) {
|
||||||
|
let json = $xhr.responseJSON;
|
||||||
|
if (json) {
|
||||||
|
if (json.message){
|
||||||
|
txt += ": " + json.message;
|
||||||
|
} else {
|
||||||
|
txt += ": " + json.error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
$('#errorTxt').text(txt);
|
||||||
|
$('#errorMsg').show();
|
||||||
|
setTimeout(function () {
|
||||||
|
$('#errorMsg').hide();
|
||||||
|
}, 5000);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function setTableColumnVisibility(val){
|
||||||
|
let column = $('#dataTable').DataTable().column(2);
|
||||||
|
|
||||||
|
switch (val){
|
||||||
|
case '2':
|
||||||
|
column.visible(true);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
column.visible(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateListTypeInfo(val) {
|
||||||
|
let info1 = $('#allowlist-info');
|
||||||
|
let info2 = $('#defender-info');
|
||||||
|
let info3 = $('#ratelimited-info');
|
||||||
|
if (info1){
|
||||||
|
info1.hide();
|
||||||
|
}
|
||||||
|
if (info2){
|
||||||
|
info2.hide();
|
||||||
|
}
|
||||||
|
if (info3){
|
||||||
|
info3.hide();
|
||||||
|
}
|
||||||
|
switch (val){
|
||||||
|
case '1':
|
||||||
|
if (info1){
|
||||||
|
info1.show();
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case '2':
|
||||||
|
if (info2){
|
||||||
|
info2.show();
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case '3':
|
||||||
|
if (info3){
|
||||||
|
info3.show();
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function onListChanged(val){
|
||||||
|
saveListType(val);
|
||||||
|
updateListTypeInfo(val);
|
||||||
|
setTableColumnVisibility(val);
|
||||||
|
let table = $('#dataTable').DataTable();
|
||||||
|
table.clear().draw();
|
||||||
|
table.ajax.url(getSearchURL()).load();
|
||||||
|
}
|
||||||
|
|
||||||
|
function onSearchClicked(){
|
||||||
|
resetPagination();
|
||||||
|
doSearch();
|
||||||
|
saveSearchFilter();
|
||||||
|
}
|
||||||
|
|
||||||
|
function doSearch(){
|
||||||
|
let table = $('#dataTable').DataTable();
|
||||||
|
table.clear().draw();
|
||||||
|
table.ajax.url(getSearchURL()).load();
|
||||||
|
}
|
||||||
|
|
||||||
|
$(document).ready(function () {
|
||||||
|
$.fn.dataTable.ext.buttons.add = {
|
||||||
|
text: '<i class="fas fa-plus"></i>',
|
||||||
|
name: 'add',
|
||||||
|
titleAttr: "Add",
|
||||||
|
action: function (e, dt, node, config) {
|
||||||
|
window.location.href = '{{.IPListURL}}'+"/"+fixedEncodeURIComponent($("#idListType").val());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
$.fn.dataTable.ext.buttons.edit = {
|
||||||
|
text: '<i class="fas fa-pen"></i>',
|
||||||
|
name: 'edit',
|
||||||
|
titleAttr: "Edit",
|
||||||
|
action: function (e, dt, node, config) {
|
||||||
|
let selectedRow = table.row({ selected: true }).data();
|
||||||
|
let path = '{{.IPListURL}}' + "/" + fixedEncodeURIComponent(selectedRow["type"])+"/"+ fixedEncodeURIComponent(selectedRow["ipornet"]);
|
||||||
|
window.location.href = path;
|
||||||
|
},
|
||||||
|
enabled: false
|
||||||
|
};
|
||||||
|
|
||||||
|
$.fn.dataTable.ext.buttons.delete = {
|
||||||
|
text: '<i class="fas fa-trash"></i>',
|
||||||
|
name: 'delete',
|
||||||
|
titleAttr: "Delete",
|
||||||
|
action: function (e, dt, node, config) {
|
||||||
|
$('#deleteModal').modal('show');
|
||||||
|
},
|
||||||
|
enabled: false
|
||||||
|
};
|
||||||
|
|
||||||
|
let table = $('#dataTable').DataTable({
|
||||||
|
"ajax": {
|
||||||
|
"url": getSearchURL(),
|
||||||
|
"dataSrc": handleResponseData,
|
||||||
|
"error": function ($xhr, textStatus, errorThrown) {
|
||||||
|
$(".dataTables_processing").hide();
|
||||||
|
let txt = "Failed to get IP list";
|
||||||
|
if ($xhr) {
|
||||||
|
let json = $xhr.responseJSON;
|
||||||
|
if (json) {
|
||||||
|
if (json.message){
|
||||||
|
txt += ": " + json.message;
|
||||||
|
} else {
|
||||||
|
txt += ": " + json.error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
$('#errorTxt').text(txt);
|
||||||
|
$('#errorMsg').show();
|
||||||
|
setTimeout(function () {
|
||||||
|
$('#errorMsg').hide();
|
||||||
|
}, 10000);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"deferRender": true,
|
||||||
|
"processing": true,
|
||||||
|
"columns": [
|
||||||
|
{ "data": "ipornet" },
|
||||||
|
{
|
||||||
|
"data": "protocols",
|
||||||
|
"render": function (data, type, row) {
|
||||||
|
if (type === 'display') {
|
||||||
|
if (data == 0){
|
||||||
|
return "Any";
|
||||||
|
}
|
||||||
|
const protocols = [];
|
||||||
|
if ((data & 1) != 0){
|
||||||
|
protocols.push('SSH');
|
||||||
|
}
|
||||||
|
if ((data & 2) != 0){
|
||||||
|
protocols.push('FTP');
|
||||||
|
}
|
||||||
|
if ((data & 4) != 0){
|
||||||
|
protocols.push('DAV');
|
||||||
|
}
|
||||||
|
if ((data & 8) != 0){
|
||||||
|
protocols.push('HTTP');
|
||||||
|
}
|
||||||
|
return protocols.join(', ');
|
||||||
|
}
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": "mode",
|
||||||
|
"render": function (data, type, row) {
|
||||||
|
if (type === 'display') {
|
||||||
|
if (data == 1){
|
||||||
|
return "Allow";
|
||||||
|
}
|
||||||
|
return "Deny";
|
||||||
|
}
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": "description",
|
||||||
|
"render": function (data, type, row) {
|
||||||
|
if (type === 'display') {
|
||||||
|
let ellipsisFn = $.fn.dataTable.render.ellipsis(70, true);
|
||||||
|
return ellipsisFn(data,type);
|
||||||
|
}
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"select": {
|
||||||
|
"style": "single",
|
||||||
|
"blurable": true
|
||||||
|
},
|
||||||
|
"buttons": [],
|
||||||
|
"lengthChange": false,
|
||||||
|
"columnDefs": [],
|
||||||
|
"responsive": true,
|
||||||
|
"searching": false,
|
||||||
|
"paging": false,
|
||||||
|
"info": false,
|
||||||
|
"ordering": false,
|
||||||
|
"language": {
|
||||||
|
"loadingRecords": "",
|
||||||
|
"emptyTable": "No entries found"
|
||||||
|
},
|
||||||
|
"initComplete": function (settings, json) {
|
||||||
|
table.button().add(0, 'delete');
|
||||||
|
table.button().add(0, 'edit');
|
||||||
|
table.button().add(0, 'add');
|
||||||
|
|
||||||
|
table.buttons().container().appendTo('.col-md-6:eq(0)', table.table().container());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
new $.fn.dataTable.FixedHeader(table);
|
||||||
|
$.fn.dataTable.ext.errMode = 'none';
|
||||||
|
|
||||||
|
table.on('select deselect', function () {
|
||||||
|
let selectedRows = table.rows({ selected: true }).count();
|
||||||
|
table.button('delete:name').enable(selectedRows == 1);
|
||||||
|
table.button('edit:name').enable(selectedRows == 1);
|
||||||
|
});
|
||||||
|
|
||||||
|
resetPagination();
|
||||||
|
|
||||||
|
let listType = $('#idListType').val();
|
||||||
|
setTableColumnVisibility(listType);
|
||||||
|
updateListTypeInfo(listType);
|
||||||
|
});
|
||||||
|
|
||||||
|
</script>
|
||||||
|
{{end}}
|
|
@ -17,10 +17,6 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
{{define "title"}}{{.Title}}{{end}}
|
{{define "title"}}{{.Title}}{{end}}
|
||||||
|
|
||||||
{{define "extra_css"}}
|
|
||||||
<link href="{{.StaticURL}}/vendor/bootstrap-select/css/bootstrap-select.min.css" rel="stylesheet">
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
{{define "page_body"}}
|
{{define "page_body"}}
|
||||||
<!-- Page Heading -->
|
<!-- Page Heading -->
|
||||||
<div class="card shadow mb-4">
|
<div class="card shadow mb-4">
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue