Merge branch 'master' into appsec_socket

This commit is contained in:
Laurence Jones 2024-02-20 13:07:30 +00:00 committed by GitHub
commit 4342c7bf02
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
184 changed files with 10960 additions and 10874 deletions

View file

@ -1,4 +1,4 @@
name: Hub tests
name: (sub) Bats / Hub
on:
workflow_call:
@ -8,16 +8,13 @@ on:
GIST_BADGES_ID:
required: true
env:
PREFIX_TEST_NAMES_WITH_FILE: true
jobs:
build:
strategy:
matrix:
test-file: ["hub-1.bats", "hub-2.bats", "hub-3.bats"]
name: "Build + tests"
name: "Functional tests"
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
@ -28,15 +25,15 @@ jobs:
echo githubciXXXXXXXXXXXXXXXXXXXXXXXX | sudo tee /etc/machine-id
- name: "Check out CrowdSec repository"
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: "Set up Go"
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version: "1.21.6"
go-version: "1.21.7"
- name: "Install bats dependencies"
env:
@ -50,7 +47,7 @@ jobs:
- name: "Run hub tests"
run: |
./test/bin/generate-hub-tests
./test/run-tests test/dyn-bats/${{ matrix.test-file }}
./test/run-tests ./test/dyn-bats/${{ matrix.test-file }} --formatter $(pwd)/test/lib/color-formatter
- name: "Collect hub coverage"
run: ./test/bin/collect-hub-coverage >> $GITHUB_ENV

View file

@ -1,4 +1,4 @@
name: Functional tests (MySQL)
name: (sub) Bats / MySQL
on:
workflow_call:
@ -7,16 +7,9 @@ on:
required: true
type: string
env:
PREFIX_TEST_NAMES_WITH_FILE: true
jobs:
build:
strategy:
matrix:
go-version: ["1.21.6"]
name: "Build + tests"
name: "Functional tests"
runs-on: ubuntu-latest
timeout-minutes: 30
services:
@ -35,15 +28,15 @@ jobs:
echo githubciXXXXXXXXXXXXXXXXXXXXXXXX | sudo tee /etc/machine-id
- name: "Check out CrowdSec repository"
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: "Set up Go ${{ matrix.go-version }}"
uses: actions/setup-go@v4
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
go-version: "1.21.7"
- name: "Install bats dependencies"
env:
@ -62,7 +55,7 @@ jobs:
MYSQL_USER: root
- name: "Run tests"
run: make bats-test
run: ./test/run-tests ./test/bats --formatter $(pwd)/test/lib/color-formatter
env:
DB_BACKEND: mysql
MYSQL_HOST: 127.0.0.1

View file

@ -1,18 +1,11 @@
name: Functional tests (Postgres)
name: (sub) Bats / Postgres
on:
workflow_call:
env:
PREFIX_TEST_NAMES_WITH_FILE: true
jobs:
build:
strategy:
matrix:
go-version: ["1.21.6"]
name: "Build + tests"
name: "Functional tests"
runs-on: ubuntu-latest
timeout-minutes: 30
services:
@ -44,15 +37,15 @@ jobs:
echo githubciXXXXXXXXXXXXXXXXXXXXXXXX | sudo tee /etc/machine-id
- name: "Check out CrowdSec repository"
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: "Set up Go ${{ matrix.go-version }}"
uses: actions/setup-go@v4
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
go-version: "1.21.7"
- name: "Install bats dependencies"
env:
@ -71,7 +64,7 @@ jobs:
PGUSER: postgres
- name: "Run tests (DB_BACKEND: pgx)"
run: make bats-test
run: ./test/run-tests ./test/bats --formatter $(pwd)/test/lib/color-formatter
env:
DB_BACKEND: pgx
PGHOST: 127.0.0.1

View file

@ -1,19 +1,14 @@
name: Functional tests (sqlite)
name: (sub) Bats / sqlite + coverage
on:
workflow_call:
env:
PREFIX_TEST_NAMES_WITH_FILE: true
TEST_COVERAGE: true
jobs:
build:
strategy:
matrix:
go-version: ["1.21.6"]
name: "Build + tests"
name: "Functional tests"
runs-on: ubuntu-latest
timeout-minutes: 20
@ -25,15 +20,15 @@ jobs:
echo githubciXXXXXXXXXXXXXXXXXXXXXXXX | sudo tee /etc/machine-id
- name: "Check out CrowdSec repository"
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: "Set up Go ${{ matrix.go-version }}"
uses: actions/setup-go@v4
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
go-version: "1.21.7"
- name: "Install bats dependencies"
env:
@ -46,7 +41,7 @@ jobs:
make clean bats-build bats-fixture BUILD_STATIC=1
- name: "Run tests"
run: make bats-test
run: ./test/run-tests ./test/bats --formatter $(pwd)/test/lib/color-formatter
- name: "Collect coverage data"
run: |

View file

@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Cleanup
run: |

View file

@ -21,25 +21,21 @@ on:
jobs:
build:
strategy:
matrix:
go-version: ["1.21.6"]
name: Build
runs-on: windows-2019
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: false
- name: "Set up Go ${{ matrix.go-version }}"
uses: actions/setup-go@v4
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
go-version: "1.21.7"
- name: Build
run: make windows_installer BUILD_RE2_WASM=1

View file

@ -44,14 +44,20 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
# required to pick up tags for BUILD_VERSION
fetch-depth: 0
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: "1.21.7"
cache-dependency-path: "**/go.sum"
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@ -71,14 +77,8 @@ jobs:
# and modify them (or add more) to build your code if your project
# uses a compiled language
- name: "Set up Go"
uses: actions/setup-go@v4
with:
go-version: "1.21.6"
cache-dependency-path: "**/go.sum"
- run: |
make clean build BUILD_RE2_WASM=1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
uses: github/codeql-action/analyze@v3

View file

@ -15,59 +15,42 @@ on:
- 'README.md'
jobs:
test_docker_image:
test_flavor:
strategy:
# we could test all the flavors in a single pytest job,
# but let's split them (and the image build) in multiple runners for performance
matrix:
# can be slim, full or debian (no debian slim).
flavor: ["slim", "debian"]
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Check out the repo
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
with:
config: .github/buildkit.toml
- name: "Build flavor: slim"
uses: docker/build-push-action@v4
- name: "Build image"
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile
tags: crowdsecurity/crowdsec:test-slim
target: slim
platforms: linux/amd64
load: true
cache-from: type=gha
cache-to: type=gha,mode=min
- name: "Build flavor: full"
uses: docker/build-push-action@v4
with:
context: .
file: ./Dockerfile
tags: crowdsecurity/crowdsec:test
target: full
platforms: linux/amd64
load: true
cache-from: type=gha
cache-to: type=gha,mode=min
- name: "Build flavor: full (debian)"
uses: docker/build-push-action@v4
with:
context: .
file: ./Dockerfile.debian
tags: crowdsecurity/crowdsec:test-debian
target: full
file: ./Dockerfile${{ matrix.flavor == 'debian' && '.debian' || '' }}
tags: crowdsecurity/crowdsec:test${{ matrix.flavor == 'full' && '' || '-' }}${{ matrix.flavor == 'full' && '' || matrix.flavor }}
target: ${{ matrix.flavor == 'debian' && 'full' || matrix.flavor }}
platforms: linux/amd64
load: true
cache-from: type=gha
cache-to: type=gha,mode=min
- name: "Setup Python"
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: "3.x"
@ -78,7 +61,7 @@ jobs:
- name: "Cache virtualenvs"
id: cache-pipenv
uses: actions/cache@v3
uses: actions/cache@v4
with:
path: ~/.local/share/virtualenvs
key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }}
@ -95,9 +78,10 @@ jobs:
- name: "Run tests"
env:
CROWDSEC_TEST_VERSION: test
CROWDSEC_TEST_FLAVORS: slim,debian
CROWDSEC_TEST_FLAVORS: ${{ matrix.flavor }}
CROWDSEC_TEST_NETWORK: net-test
CROWDSEC_TEST_TIMEOUT: 90
# running serially to reduce test flakiness
run: |
cd docker/test
pipenv run pytest -n 2 --durations=0 --color=yes
pipenv run pytest -n 1 --durations=0 --color=yes

View file

@ -20,25 +20,21 @@ env:
jobs:
build:
strategy:
matrix:
go-version: ["1.21.6"]
name: "Build + tests"
runs-on: windows-2022
steps:
- name: Check out CrowdSec repository
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: false
- name: "Set up Go ${{ matrix.go-version }}"
uses: actions/setup-go@v4
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
go-version: "1.21.7"
- name: Build
run: |

View file

@ -118,15 +118,15 @@ jobs:
steps:
- name: Check out CrowdSec repository
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: false
- name: "Set up Go"
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version: "1.21.6"
go-version: "1.21.7"
- name: Create localstack streams
run: |

View file

@ -0,0 +1,47 @@
name: (push-master) Publish latest Docker images
on:
push:
branches: [ master ]
paths:
- 'pkg/**'
- 'cmd/**'
- 'mk/**'
- 'docker/docker_start.sh'
- 'docker/config.yaml'
- '.github/workflows/publish-docker-master.yml'
- '.github/workflows/publish-docker.yml'
- 'Dockerfile'
- 'Dockerfile.debian'
- 'go.mod'
- 'go.sum'
- 'Makefile'
jobs:
dev-alpine:
uses: ./.github/workflows/publish-docker.yml
with:
platform: linux/amd64
crowdsec_version: ""
image_version: dev
latest: false
push: true
slim: false
debian: false
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
dev-debian:
uses: ./.github/workflows/publish-docker.yml
with:
platform: linux/amd64
crowdsec_version: ""
image_version: dev
latest: false
push: true
slim: false
debian: true
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}

View file

@ -0,0 +1,48 @@
name: (manual) Publish Docker images
on:
workflow_dispatch:
inputs:
image_version:
description: Docker Image version (base tag, i.e. v1.6.0-2)
required: true
crowdsec_version:
description: Crowdsec version (BUILD_VERSION)
required: true
latest:
description: Overwrite latest (and slim) tags?
default: false
required: true
push:
description: Really push?
default: false
required: true
jobs:
alpine:
uses: ./.github/workflows/publish-docker.yml
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
with:
image_version: ${{ github.event.inputs.image_version }}
crowdsec_version: ${{ github.event.inputs.crowdsec_version }}
latest: ${{ github.event.inputs.latest == 'true' }}
push: ${{ github.event.inputs.push == 'true' }}
slim: true
debian: false
platform: "linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6"
debian:
uses: ./.github/workflows/publish-docker.yml
secrets:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
with:
image_version: ${{ github.event.inputs.image_version }}
crowdsec_version: ${{ github.event.inputs.crowdsec_version }}
latest: ${{ github.event.inputs.latest == 'true' }}
push: ${{ github.event.inputs.push == 'true' }}
slim: false
debian: true
platform: "linux/amd64,linux/386,linux/arm64"

125
.github/workflows/publish-docker.yml vendored Normal file
View file

@ -0,0 +1,125 @@
name: (sub) Publish Docker images
on:
workflow_call:
secrets:
DOCKER_USERNAME:
required: true
DOCKER_PASSWORD:
required: true
inputs:
platform:
required: true
type: string
image_version:
required: true
type: string
crowdsec_version:
required: true
type: string
latest:
required: true
type: boolean
push:
required: true
type: boolean
slim:
required: true
type: boolean
debian:
required: true
type: boolean
jobs:
push_to_registry:
name: Push Docker image to registries
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
config: .github/buildkit.toml
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Prepare (slim)
if: ${{ inputs.slim }}
id: slim
run: |
DOCKERHUB_IMAGE=${{ secrets.DOCKER_USERNAME }}/crowdsec
GHCR_IMAGE=ghcr.io/${{ github.repository_owner }}/crowdsec
VERSION=${{ inputs.image_version }}
DEBIAN=${{ inputs.debian && '-debian' || '' }}
TAGS="${DOCKERHUB_IMAGE}:${VERSION}-slim${DEBIAN},${GHCR_IMAGE}:${VERSION}-slim${DEBIAN}"
if [[ ${{ inputs.latest }} == true ]]; then
TAGS=$TAGS,${DOCKERHUB_IMAGE}:slim${DEBIAN},${GHCR_IMAGE}:slim${DEBIAN}
fi
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
- name: Prepare (full)
id: full
run: |
DOCKERHUB_IMAGE=${{ secrets.DOCKER_USERNAME }}/crowdsec
GHCR_IMAGE=ghcr.io/${{ github.repository_owner }}/crowdsec
VERSION=${{ inputs.image_version }}
DEBIAN=${{ inputs.debian && '-debian' || '' }}
TAGS="${DOCKERHUB_IMAGE}:${VERSION}${DEBIAN},${GHCR_IMAGE}:${VERSION}${DEBIAN}"
if [[ ${{ inputs.latest }} == true ]]; then
TAGS=$TAGS,${DOCKERHUB_IMAGE}:latest${DEBIAN},${GHCR_IMAGE}:latest${DEBIAN}
fi
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
- name: Build and push image (slim)
if: ${{ inputs.slim }}
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile${{ inputs.debian && '.debian' || '' }}
push: ${{ inputs.push }}
tags: ${{ steps.slim.outputs.tags }}
target: slim
platforms: ${{ inputs.platform }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.created=${{ steps.slim.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
build-args: |
BUILD_VERSION=${{ inputs.crowdsec_version }}
- name: Build and push image (full)
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile${{ inputs.debian && '.debian' || '' }}
push: ${{ inputs.push }}
tags: ${{ steps.full.outputs.tags }}
target: full
platforms: ${{ inputs.platform }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.created=${{ steps.full.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
build-args: |
BUILD_VERSION=${{ inputs.crowdsec_version }}

View file

@ -1,5 +1,5 @@
# .github/workflows/build-docker-image.yml
name: build
name: Release
on:
release:
@ -12,24 +12,20 @@ permissions:
jobs:
build:
strategy:
matrix:
go-version: ["1.21.6"]
name: Build and upload binary package
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: false
- name: "Set up Go ${{ matrix.go-version }}"
uses: actions/setup-go@v4
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
go-version: "1.21.7"
- name: Build the binaries
run: |

View file

@ -1,71 +0,0 @@
name: Publish Debian Docker image on Push to Master
on:
push:
branches: [ master ]
paths:
- 'pkg/**'
- 'cmd/**'
- 'plugins/**'
- 'docker/docker_start.sh'
- 'docker/config.yaml'
- '.github/workflows/publish_docker-image_on_master-debian.yml'
- 'Dockerfile.debian'
- 'go.mod'
- 'go.sum'
- 'Makefile'
jobs:
push_to_registry:
name: Push Debian Docker image to Docker Hub
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'crowdsecurity' }}
steps:
- name: Check out the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=crowdsecurity/crowdsec
GHCR_IMAGE=ghcr.io/${{ github.repository_owner }}/crowdsec
VERSION=dev-debian
TAGS="${DOCKER_IMAGE}:${VERSION},${GHCR_IMAGE}:${VERSION}"
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
config: .github/buildkit.toml
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push full image
uses: docker/build-push-action@v4
with:
context: .
file: ./Dockerfile.debian
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.prep.outputs.tags }}
platforms: linux/amd64
labels: |
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
cache-from: type=gha
cache-to: type=gha,mode=min

View file

@ -1,71 +0,0 @@
name: Publish Docker image on Push to Master
on:
push:
branches: [ master ]
paths:
- 'pkg/**'
- 'cmd/**'
- 'plugins/**'
- 'docker/docker_start.sh'
- 'docker/config.yaml'
- '.github/workflows/publish_docker-image_on_master.yml'
- 'Dockerfile'
- 'go.mod'
- 'go.sum'
- 'Makefile'
jobs:
push_to_registry:
name: Push Docker image to Docker Hub
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'crowdsecurity' }}
steps:
- name: Check out the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=crowdsecurity/crowdsec
GHCR_IMAGE=ghcr.io/${{ github.repository_owner }}/crowdsec
VERSION=dev
TAGS="${DOCKER_IMAGE}:${VERSION},${GHCR_IMAGE}:${VERSION}"
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
config: .github/buildkit.toml
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push full image
uses: docker/build-push-action@v4
with:
context: .
file: ./Dockerfile
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.prep.outputs.tags }}
platforms: linux/amd64
labels: |
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
cache-from: type=gha
cache-to: type=gha,mode=min

View file

@ -1,61 +0,0 @@
name: Publish Docker Debian image
on:
release:
types:
- released
- prereleased
workflow_dispatch:
jobs:
push_to_registry:
name: Push Docker debian image to Docker Hub
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=crowdsecurity/crowdsec
VERSION=bullseye
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
elif [[ $GITHUB_REF == refs/heads/* ]]; then
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -E 's#/+#-#g')
elif [[ $GITHUB_REF == refs/pull/* ]]; then
VERSION=pr-${{ github.event.number }}
fi
TAGS="${DOCKER_IMAGE}:${VERSION}-debian"
if [[ "${{ github.event.action }}" == "released" ]]; then
TAGS=$TAGS,${DOCKER_IMAGE}:latest-debian
fi
echo "version=${VERSION}" >> $GITHUB_OUTPUT
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
config: .github/buildkit.toml
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Build and push
uses: docker/build-push-action@v4
with:
context: .
file: ./Dockerfile.debian
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.prep.outputs.tags }}
platforms: linux/amd64,linux/arm64,linux/386
labels: |
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}

View file

@ -1,86 +0,0 @@
name: Publish Docker image
on:
release:
types:
- released
- prereleased
jobs:
push_to_registry:
name: Push Docker image to Docker Hub
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=crowdsecurity/crowdsec
GHCR_IMAGE=ghcr.io/${{ github.repository_owner }}/crowdsec
VERSION=edge
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
elif [[ $GITHUB_REF == refs/heads/* ]]; then
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -E 's#/+#-#g')
elif [[ $GITHUB_REF == refs/pull/* ]]; then
VERSION=pr-${{ github.event.number }}
fi
TAGS="${DOCKER_IMAGE}:${VERSION},${GHCR_IMAGE}:${VERSION}"
TAGS_SLIM="${DOCKER_IMAGE}:${VERSION}-slim,${GHCR_IMAGE}:${VERSION}-slim"
if [[ ${{ github.event.action }} == released ]]; then
TAGS=$TAGS,${DOCKER_IMAGE}:latest,${GHCR_IMAGE}:latest
TAGS_SLIM=$TAGS_SLIM,${DOCKER_IMAGE}:slim,${GHCR_IMAGE}:slim
fi
echo "version=${VERSION}" >> $GITHUB_OUTPUT
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
echo "tags_slim=${TAGS_SLIM}" >> $GITHUB_OUTPUT
echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
config: .github/buildkit.toml
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push slim image
uses: docker/build-push-action@v4
with:
context: .
file: ./Dockerfile
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.prep.outputs.tags_slim }}
target: slim
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6,linux/386
labels: |
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
- name: Build and push full image
uses: docker/build-push-action@v4
with:
context: .
file: ./Dockerfile
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.prep.outputs.tags }}
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6,linux/386
labels: |
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}

View file

@ -1,4 +1,4 @@
name: Update Docker Hub README
name: (push-master) Update Docker Hub README
on:
push:
@ -13,7 +13,7 @@ jobs:
steps:
-
name: Check out the repo
uses: actions/checkout@v3
uses: actions/checkout@v4
if: ${{ github.repository_owner == 'crowdsecurity' }}
-
name: Update docker hub README

View file

@ -11,7 +11,7 @@ run:
linters-settings:
cyclop:
# lower this after refactoring
max-complexity: 66
max-complexity: 53
gci:
sections:
@ -26,7 +26,7 @@ linters-settings:
gocyclo:
# lower this after refactoring
min-complexity: 64
min-complexity: 49
funlen:
# Checks the number of lines in a function.
@ -46,14 +46,14 @@ linters-settings:
maintidx:
# raise this after refactoring
under: 9
under: 11
misspell:
locale: US
nestif:
# lower this after refactoring
min-complexity: 27
min-complexity: 28
nlreturn:
block-size: 4
@ -73,6 +73,10 @@ linters-settings:
- pkg: "github.com/pkg/errors"
desc: "errors.Wrap() is deprecated in favor of fmt.Errorf()"
wsl:
# Allow blocks to end with comments
allow-trailing-comment: true
linters:
enable-all: true
disable:
@ -105,6 +109,7 @@ linters:
# - durationcheck # check for two durations multiplied together
# - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases
# - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13.
# - execinquery # execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds
# - exportloopref # checks for pointers to enclosing loop variables
# - funlen # Tool for detection of long functions
# - ginkgolinter # enforces standards of using ginkgo and gomega
@ -203,7 +208,6 @@ linters:
#
# Too strict / too many false positives (for now?)
#
- execinquery # execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds
- exhaustruct # Checks if all structure fields are initialized
- forbidigo # Forbids identifiers
- gochecknoglobals # check that no global variables exist
@ -263,6 +267,10 @@ issues:
- perfsprint
text: "fmt.Sprintf can be replaced .*"
- linters:
- perfsprint
text: "fmt.Errorf can be replaced with errors.New"
#
# Will fix, easy but some neurons required
#
@ -295,10 +303,6 @@ issues:
- nosprintfhostport
text: "host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf"
- linters:
- wastedassign
text: "assigned to .*, but reassigned without using the value"
# https://github.com/timakin/bodyclose
- linters:
- bodyclose
@ -310,19 +314,10 @@ issues:
- nonamedreturns
text: "named return .* with type .* found"
# https://github.com/alexkohler/nakedret#purpose
- linters:
- nakedret
text: "naked return in func .* with .* lines of code"
#
# Will fix, might be trickier
#
- linters:
- staticcheck
text: "x509.ParseCRL has been deprecated since Go 1.19: Use ParseRevocationList instead"
# https://github.com/pkg/errors/issues/245
- linters:
- depguard

View file

@ -1,8 +1,7 @@
# vim: set ft=dockerfile:
ARG GOVERSION=1.21.6
ARG BUILD_VERSION
FROM golang:1.21.7-alpine3.18 AS build
FROM golang:${GOVERSION}-alpine3.18 AS build
ARG BUILD_VERSION
WORKDIR /go/src/crowdsec
@ -40,10 +39,8 @@ RUN apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/co
mkdir -p /staging/var/lib/crowdsec && \
mkdir -p /var/lib/crowdsec/data
COPY --from=build /go/bin/yq /usr/local/bin/yq
COPY --from=build /go/bin/yq /usr/local/bin/crowdsec /usr/local/bin/cscli /usr/local/bin/
COPY --from=build /etc/crowdsec /staging/etc/crowdsec
COPY --from=build /usr/local/bin/crowdsec /usr/local/bin/crowdsec
COPY --from=build /usr/local/bin/cscli /usr/local/bin/cscli
COPY --from=build /go/src/crowdsec/docker/docker_start.sh /
COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/config.yaml
RUN yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml
@ -54,11 +51,14 @@ FROM slim as plugins
# Due to the wizard using cp -n, we have to copy the config files directly from the source as -n does not exist in busybox cp
# The files are here for reference, as users will need to mount a new version to be actually able to use notifications
COPY --from=build /go/src/crowdsec/cmd/notification-email/email.yaml /staging/etc/crowdsec/notifications/email.yaml
COPY --from=build /go/src/crowdsec/cmd/notification-http/http.yaml /staging/etc/crowdsec/notifications/http.yaml
COPY --from=build /go/src/crowdsec/cmd/notification-slack/slack.yaml /staging/etc/crowdsec/notifications/slack.yaml
COPY --from=build /go/src/crowdsec/cmd/notification-splunk/splunk.yaml /staging/etc/crowdsec/notifications/splunk.yaml
COPY --from=build /go/src/crowdsec/cmd/notification-sentinel/sentinel.yaml /staging/etc/crowdsec/notifications/sentinel.yaml
COPY --from=build \
/go/src/crowdsec/cmd/notification-email/email.yaml \
/go/src/crowdsec/cmd/notification-http/http.yaml \
/go/src/crowdsec/cmd/notification-slack/slack.yaml \
/go/src/crowdsec/cmd/notification-splunk/splunk.yaml \
/go/src/crowdsec/cmd/notification-sentinel/sentinel.yaml \
/staging/etc/crowdsec/notifications/
COPY --from=build /usr/local/lib/crowdsec/plugins /usr/local/lib/crowdsec/plugins
FROM slim as geoip

View file

@ -1,8 +1,7 @@
# vim: set ft=dockerfile:
ARG GOVERSION=1.21.6
ARG BUILD_VERSION
FROM golang:1.21.7-bookworm AS build
FROM golang:${GOVERSION}-bookworm AS build
ARG BUILD_VERSION
WORKDIR /go/src/crowdsec
@ -56,10 +55,8 @@ RUN apt-get update && \
mkdir -p /staging/var/lib/crowdsec && \
mkdir -p /var/lib/crowdsec/data
COPY --from=build /go/bin/yq /usr/local/bin/yq
COPY --from=build /go/bin/yq /usr/local/bin/crowdsec /usr/local/bin/cscli /usr/local/bin/
COPY --from=build /etc/crowdsec /staging/etc/crowdsec
COPY --from=build /usr/local/bin/crowdsec /usr/local/bin/crowdsec
COPY --from=build /usr/local/bin/cscli /usr/local/bin/cscli
COPY --from=build /go/src/crowdsec/docker/docker_start.sh /
COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/config.yaml
RUN yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml && \
@ -71,11 +68,14 @@ FROM slim as plugins
# Due to the wizard using cp -n, we have to copy the config files directly from the source as -n does not exist in busybox cp
# The files are here for reference, as users will need to mount a new version to be actually able to use notifications
COPY --from=build /go/src/crowdsec/cmd/notification-email/email.yaml /staging/etc/crowdsec/notifications/email.yaml
COPY --from=build /go/src/crowdsec/cmd/notification-http/http.yaml /staging/etc/crowdsec/notifications/http.yaml
COPY --from=build /go/src/crowdsec/cmd/notification-slack/slack.yaml /staging/etc/crowdsec/notifications/slack.yaml
COPY --from=build /go/src/crowdsec/cmd/notification-splunk/splunk.yaml /staging/etc/crowdsec/notifications/splunk.yaml
COPY --from=build /go/src/crowdsec/cmd/notification-sentinel/sentinel.yaml /staging/etc/crowdsec/notifications/sentinel.yaml
COPY --from=build \
/go/src/crowdsec/cmd/notification-email/email.yaml \
/go/src/crowdsec/cmd/notification-http/http.yaml \
/go/src/crowdsec/cmd/notification-slack/slack.yaml \
/go/src/crowdsec/cmd/notification-splunk/splunk.yaml \
/go/src/crowdsec/cmd/notification-sentinel/sentinel.yaml \
/staging/etc/crowdsec/notifications/
COPY --from=build /usr/local/lib/crowdsec/plugins /usr/local/lib/crowdsec/plugins
FROM slim as geoip

View file

@ -27,7 +27,7 @@ stages:
- task: GoTool@0
displayName: "Install Go"
inputs:
version: '1.21.6'
version: '1.21.7'
- pwsh: |
choco install -y make

View file

@ -11,7 +11,6 @@ import (
"strconv"
"strings"
"text/template"
"time"
"github.com/fatih/color"
"github.com/go-openapi/strfmt"
@ -30,83 +29,46 @@ import (
func DecisionsFromAlert(alert *models.Alert) string {
ret := ""
var decMap = make(map[string]int)
decMap := make(map[string]int)
for _, decision := range alert.Decisions {
k := *decision.Type
if *decision.Simulated {
k = fmt.Sprintf("(simul)%s", k)
}
v := decMap[k]
decMap[k] = v + 1
}
for k, v := range decMap {
if len(ret) > 0 {
ret += " "
}
ret += fmt.Sprintf("%s:%d", k, v)
}
return ret
}
func DateFromAlert(alert *models.Alert) string {
ts, err := time.Parse(time.RFC3339, alert.CreatedAt)
if err != nil {
log.Infof("while parsing %s with %s : %s", alert.CreatedAt, time.RFC3339, err)
return alert.CreatedAt
}
return ts.Format(time.RFC822)
}
func SourceFromAlert(alert *models.Alert) string {
//more than one item, just number and scope
if len(alert.Decisions) > 1 {
return fmt.Sprintf("%d %ss (%s)", len(alert.Decisions), *alert.Decisions[0].Scope, *alert.Decisions[0].Origin)
}
//fallback on single decision information
if len(alert.Decisions) == 1 {
return fmt.Sprintf("%s:%s", *alert.Decisions[0].Scope, *alert.Decisions[0].Value)
}
//try to compose a human friendly version
if *alert.Source.Value != "" && *alert.Source.Scope != "" {
scope := ""
scope = fmt.Sprintf("%s:%s", *alert.Source.Scope, *alert.Source.Value)
extra := ""
if alert.Source.Cn != "" {
extra = alert.Source.Cn
}
if alert.Source.AsNumber != "" {
extra += fmt.Sprintf("/%s", alert.Source.AsNumber)
}
if alert.Source.AsName != "" {
extra += fmt.Sprintf("/%s", alert.Source.AsName)
}
if extra != "" {
scope += " (" + extra + ")"
}
return scope
}
return ""
}
func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error {
if csConfig.Cscli.Output == "raw" {
func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error {
switch cli.cfg().Cscli.Output {
case "raw":
csvwriter := csv.NewWriter(os.Stdout)
header := []string{"id", "scope", "value", "reason", "country", "as", "decisions", "created_at"}
if printMachine {
header = append(header, "machine")
}
err := csvwriter.Write(header)
if err != nil {
if err := csvwriter.Write(header); err != nil {
return err
}
for _, alertItem := range *alerts {
row := []string{
fmt.Sprintf("%d", alertItem.ID),
strconv.FormatInt(alertItem.ID, 10),
*alertItem.Source.Scope,
*alertItem.Source.Value,
*alertItem.Scenario,
@ -118,28 +80,32 @@ func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error {
if printMachine {
row = append(row, alertItem.MachineID)
}
err := csvwriter.Write(row)
if err != nil {
if err := csvwriter.Write(row); err != nil {
return err
}
}
csvwriter.Flush()
} else if csConfig.Cscli.Output == "json" {
case "json":
if *alerts == nil {
// avoid returning "null" in json
// could be cleaner if we used slice of alerts directly
fmt.Println("[]")
return nil
}
x, _ := json.MarshalIndent(alerts, "", " ")
fmt.Printf("%s", string(x))
} else if csConfig.Cscli.Output == "human" {
fmt.Print(string(x))
case "human":
if len(*alerts) == 0 {
fmt.Println("No active alerts")
return nil
}
alertsTable(color.Output, alerts, printMachine)
}
return nil
}
@ -161,77 +127,86 @@ var alertTemplate = `
`
func DisplayOneAlert(alert *models.Alert, withDetail bool) error {
if csConfig.Cscli.Output == "human" {
tmpl, err := template.New("alert").Parse(alertTemplate)
if err != nil {
return err
}
err = tmpl.Execute(os.Stdout, alert)
if err != nil {
return err
}
func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) error {
tmpl, err := template.New("alert").Parse(alertTemplate)
if err != nil {
return err
}
alertDecisionsTable(color.Output, alert)
if err = tmpl.Execute(os.Stdout, alert); err != nil {
return err
}
if len(alert.Meta) > 0 {
fmt.Printf("\n - Context :\n")
sort.Slice(alert.Meta, func(i, j int) bool {
return alert.Meta[i].Key < alert.Meta[j].Key
})
table := newTable(color.Output)
table.SetRowLines(false)
table.SetHeaders("Key", "Value")
for _, meta := range alert.Meta {
var valSlice []string
if err := json.Unmarshal([]byte(meta.Value), &valSlice); err != nil {
return fmt.Errorf("unknown context value type '%s' : %s", meta.Value, err)
}
for _, value := range valSlice {
table.AddRow(
meta.Key,
value,
)
}
alertDecisionsTable(color.Output, alert)
if len(alert.Meta) > 0 {
fmt.Printf("\n - Context :\n")
sort.Slice(alert.Meta, func(i, j int) bool {
return alert.Meta[i].Key < alert.Meta[j].Key
})
table := newTable(color.Output)
table.SetRowLines(false)
table.SetHeaders("Key", "Value")
for _, meta := range alert.Meta {
var valSlice []string
if err := json.Unmarshal([]byte(meta.Value), &valSlice); err != nil {
return fmt.Errorf("unknown context value type '%s': %w", meta.Value, err)
}
for _, value := range valSlice {
table.AddRow(
meta.Key,
value,
)
}
table.Render()
}
if withDetail {
fmt.Printf("\n - Events :\n")
for _, event := range alert.Events {
alertEventTable(color.Output, event)
}
table.Render()
}
if withDetail {
fmt.Printf("\n - Events :\n")
for _, event := range alert.Events {
alertEventTable(color.Output, event)
}
}
return nil
}
type cliAlerts struct{}
func NewCLIAlerts() *cliAlerts {
return &cliAlerts{}
type cliAlerts struct{
client *apiclient.ApiClient
cfg configGetter
}
func (cli cliAlerts) NewCommand() *cobra.Command {
func NewCLIAlerts(getconfig configGetter) *cliAlerts {
return &cliAlerts{
cfg: getconfig,
}
}
func (cli *cliAlerts) NewCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "alerts [action]",
Short: "Manage alerts",
Args: cobra.MinimumNArgs(1),
DisableAutoGenTag: true,
Aliases: []string{"alert"},
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
var err error
if err := csConfig.LoadAPIClient(); err != nil {
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
cfg := cli.cfg()
if err := cfg.LoadAPIClient(); err != nil {
return fmt.Errorf("loading api client: %w", err)
}
apiURL, err := url.Parse(csConfig.API.Client.Credentials.URL)
apiURL, err := url.Parse(cfg.API.Client.Credentials.URL)
if err != nil {
return fmt.Errorf("parsing api url %s: %w", apiURL, err)
}
Client, err = apiclient.NewClient(&apiclient.Config{
MachineID: csConfig.API.Client.Credentials.Login,
Password: strfmt.Password(csConfig.API.Client.Credentials.Password),
cli.client, err = apiclient.NewClient(&apiclient.Config{
MachineID: cfg.API.Client.Credentials.Login,
Password: strfmt.Password(cfg.API.Client.Credentials.Password),
UserAgent: fmt.Sprintf("crowdsec/%s", version.String()),
URL: apiURL,
VersionPrefix: "v1",
@ -240,6 +215,7 @@ func (cli cliAlerts) NewCommand() *cobra.Command {
if err != nil {
return fmt.Errorf("new api client: %w", err)
}
return nil
},
}
@ -252,7 +228,7 @@ func (cli cliAlerts) NewCommand() *cobra.Command {
return cmd
}
func (cli cliAlerts) NewListCmd() *cobra.Command {
func (cli *cliAlerts) NewListCmd() *cobra.Command {
var alertListFilter = apiclient.AlertsListOpts{
ScopeEquals: new(string),
ValueEquals: new(string),
@ -265,8 +241,10 @@ func (cli cliAlerts) NewListCmd() *cobra.Command {
IncludeCAPI: new(bool),
OriginEquals: new(string),
}
limit := new(int)
contained := new(bool)
var printMachine bool
cmd := &cobra.Command{
@ -278,9 +256,7 @@ cscli alerts list --range 1.2.3.0/24
cscli alerts list -s crowdsecurity/ssh-bf
cscli alerts list --type ban`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
var err error
RunE: func(cmd *cobra.Command, _ []string) error {
if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals,
alertListFilter.ScopeEquals, alertListFilter.ValueEquals); err != nil {
printHelp(cmd)
@ -346,42 +322,45 @@ cscli alerts list --type ban`,
alertListFilter.Contains = new(bool)
}
alerts, _, err := Client.Alerts.List(context.Background(), alertListFilter)
alerts, _, err := cli.client.Alerts.List(context.Background(), alertListFilter)
if err != nil {
return fmt.Errorf("unable to list alerts: %v", err)
return fmt.Errorf("unable to list alerts: %w", err)
}
err = AlertsToTable(alerts, printMachine)
if err != nil {
return fmt.Errorf("unable to list alerts: %v", err)
if err = cli.alertsToTable(alerts, printMachine); err != nil {
return fmt.Errorf("unable to list alerts: %w", err)
}
return nil
},
}
cmd.Flags().SortFlags = false
cmd.Flags().BoolVarP(alertListFilter.IncludeCAPI, "all", "a", false, "Include decisions from Central API")
cmd.Flags().StringVar(alertListFilter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)")
cmd.Flags().StringVar(alertListFilter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)")
cmd.Flags().StringVarP(alertListFilter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value <IP>)")
cmd.Flags().StringVarP(alertListFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)")
cmd.Flags().StringVarP(alertListFilter.RangeEquals, "range", "r", "", "restrict to alerts from this range (shorthand for --scope range --value <RANGE/X>)")
cmd.Flags().StringVar(alertListFilter.TypeEquals, "type", "", "restrict to alerts with given decision type (ie. ban, captcha)")
cmd.Flags().StringVar(alertListFilter.ScopeEquals, "scope", "", "restrict to alerts of this scope (ie. ip,range)")
cmd.Flags().StringVarP(alertListFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope")
cmd.Flags().StringVar(alertListFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ",")))
cmd.Flags().BoolVar(contained, "contained", false, "query decisions contained by range")
cmd.Flags().BoolVarP(&printMachine, "machine", "m", false, "print machines that sent alerts")
cmd.Flags().IntVarP(limit, "limit", "l", 50, "limit size of alerts list table (0 to view all alerts)")
flags := cmd.Flags()
flags.SortFlags = false
flags.BoolVarP(alertListFilter.IncludeCAPI, "all", "a", false, "Include decisions from Central API")
flags.StringVar(alertListFilter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)")
flags.StringVar(alertListFilter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)")
flags.StringVarP(alertListFilter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value <IP>)")
flags.StringVarP(alertListFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)")
flags.StringVarP(alertListFilter.RangeEquals, "range", "r", "", "restrict to alerts from this range (shorthand for --scope range --value <RANGE/X>)")
flags.StringVar(alertListFilter.TypeEquals, "type", "", "restrict to alerts with given decision type (ie. ban, captcha)")
flags.StringVar(alertListFilter.ScopeEquals, "scope", "", "restrict to alerts of this scope (ie. ip,range)")
flags.StringVarP(alertListFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope")
flags.StringVar(alertListFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ",")))
flags.BoolVar(contained, "contained", false, "query decisions contained by range")
flags.BoolVarP(&printMachine, "machine", "m", false, "print machines that sent alerts")
flags.IntVarP(limit, "limit", "l", 50, "limit size of alerts list table (0 to view all alerts)")
return cmd
}
func (cli cliAlerts) NewDeleteCmd() *cobra.Command {
var ActiveDecision *bool
var AlertDeleteAll bool
var delAlertByID string
contained := new(bool)
func (cli *cliAlerts) NewDeleteCmd() *cobra.Command {
var (
ActiveDecision *bool
AlertDeleteAll bool
delAlertByID string
)
var alertDeleteFilter = apiclient.AlertsDeleteOpts{
ScopeEquals: new(string),
ValueEquals: new(string),
@ -389,6 +368,9 @@ func (cli cliAlerts) NewDeleteCmd() *cobra.Command {
IPEquals: new(string),
RangeEquals: new(string),
}
contained := new(bool)
cmd := &cobra.Command{
Use: "delete [filters] [--all]",
Short: `Delete alerts
@ -399,7 +381,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`,
DisableAutoGenTag: true,
Aliases: []string{"remove"},
Args: cobra.ExactArgs(0),
PreRunE: func(cmd *cobra.Command, args []string) error {
PreRunE: func(cmd *cobra.Command, _ []string) error {
if AlertDeleteAll {
return nil
}
@ -412,11 +394,11 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`,
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, _ []string) error {
var err error
if !AlertDeleteAll {
if err := manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals,
if err = manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals,
alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil {
printHelp(cmd)
return err
@ -452,14 +434,14 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`,
var alerts *models.DeleteAlertsResponse
if delAlertByID == "" {
alerts, _, err = Client.Alerts.Delete(context.Background(), alertDeleteFilter)
alerts, _, err = cli.client.Alerts.Delete(context.Background(), alertDeleteFilter)
if err != nil {
return fmt.Errorf("unable to delete alerts : %v", err)
return fmt.Errorf("unable to delete alerts: %w", err)
}
} else {
alerts, _, err = Client.Alerts.DeleteOne(context.Background(), delAlertByID)
alerts, _, err = cli.client.Alerts.DeleteOne(context.Background(), delAlertByID)
if err != nil {
return fmt.Errorf("unable to delete alert: %v", err)
return fmt.Errorf("unable to delete alert: %w", err)
}
}
log.Infof("%s alert(s) deleted", alerts.NbDeleted)
@ -467,26 +449,31 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`,
return nil
},
}
cmd.Flags().SortFlags = false
cmd.Flags().StringVar(alertDeleteFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)")
cmd.Flags().StringVarP(alertDeleteFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope")
cmd.Flags().StringVarP(alertDeleteFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)")
cmd.Flags().StringVarP(alertDeleteFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value <IP>)")
cmd.Flags().StringVarP(alertDeleteFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value <RANGE>)")
cmd.Flags().StringVar(&delAlertByID, "id", "", "alert ID")
cmd.Flags().BoolVarP(&AlertDeleteAll, "all", "a", false, "delete all alerts")
cmd.Flags().BoolVar(contained, "contained", false, "query decisions contained by range")
flags := cmd.Flags()
flags.SortFlags = false
flags.StringVar(alertDeleteFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)")
flags.StringVarP(alertDeleteFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope")
flags.StringVarP(alertDeleteFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)")
flags.StringVarP(alertDeleteFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value <IP>)")
flags.StringVarP(alertDeleteFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value <RANGE>)")
flags.StringVar(&delAlertByID, "id", "", "alert ID")
flags.BoolVarP(&AlertDeleteAll, "all", "a", false, "delete all alerts")
flags.BoolVar(contained, "contained", false, "query decisions contained by range")
return cmd
}
func (cli cliAlerts) NewInspectCmd() *cobra.Command {
func (cli *cliAlerts) NewInspectCmd() *cobra.Command {
var details bool
cmd := &cobra.Command{
Use: `inspect "alert_id"`,
Short: `Show info about an alert`,
Example: `cscli alerts inspect 123`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
cfg := cli.cfg()
if len(args) == 0 {
printHelp(cmd)
return fmt.Errorf("missing alert_id")
@ -496,61 +483,65 @@ func (cli cliAlerts) NewInspectCmd() *cobra.Command {
if err != nil {
return fmt.Errorf("bad alert id %s", alertID)
}
alert, _, err := Client.Alerts.GetByID(context.Background(), id)
alert, _, err := cli.client.Alerts.GetByID(context.Background(), id)
if err != nil {
return fmt.Errorf("can't find alert with id %s: %s", alertID, err)
return fmt.Errorf("can't find alert with id %s: %w", alertID, err)
}
switch csConfig.Cscli.Output {
switch cfg.Cscli.Output {
case "human":
if err := DisplayOneAlert(alert, details); err != nil {
if err := cli.displayOneAlert(alert, details); err != nil {
continue
}
case "json":
data, err := json.MarshalIndent(alert, "", " ")
if err != nil {
return fmt.Errorf("unable to marshal alert with id %s: %s", alertID, err)
return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err)
}
fmt.Printf("%s\n", string(data))
case "raw":
data, err := yaml.Marshal(alert)
if err != nil {
return fmt.Errorf("unable to marshal alert with id %s: %s", alertID, err)
return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err)
}
fmt.Printf("%s\n", string(data))
fmt.Println(string(data))
}
}
return nil
},
}
cmd.Flags().SortFlags = false
cmd.Flags().BoolVarP(&details, "details", "d", false, "show alerts with events")
return cmd
}
func (cli cliAlerts) NewFlushCmd() *cobra.Command {
var maxItems int
var maxAge string
func (cli *cliAlerts) NewFlushCmd() *cobra.Command {
var (
maxItems int
maxAge string
)
cmd := &cobra.Command{
Use: `flush`,
Short: `Flush alerts
/!\ This command can be used only on the same machine than the local API`,
Example: `cscli alerts flush --max-items 1000 --max-age 7d`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
var err error
if err := require.LAPI(csConfig); err != nil {
RunE: func(_ *cobra.Command, _ []string) error {
cfg := cli.cfg()
if err := require.LAPI(cfg); err != nil {
return err
}
dbClient, err = database.NewClient(csConfig.DbConfig)
db, err := database.NewClient(cfg.DbConfig)
if err != nil {
return fmt.Errorf("unable to create new database client: %s", err)
return fmt.Errorf("unable to create new database client: %w", err)
}
log.Info("Flushing alerts. !! This may take a long time !!")
err = dbClient.FlushAlerts(maxAge, maxItems)
err = db.FlushAlerts(maxAge, maxItems)
if err != nil {
return fmt.Errorf("unable to flush alerts: %s", err)
return fmt.Errorf("unable to flush alerts: %w", err)
}
log.Info("Alerts flushed")

View file

@ -4,7 +4,8 @@ import (
"encoding/csv"
"encoding/json"
"fmt"
"io"
"os"
"slices"
"strings"
"time"
@ -12,7 +13,6 @@ import (
"github.com/fatih/color"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"slices"
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1"
@ -20,53 +20,33 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/types"
)
func getBouncers(out io.Writer, dbClient *database.Client) error {
bouncers, err := dbClient.ListBouncers()
if err != nil {
return fmt.Errorf("unable to list bouncers: %s", err)
func askYesNo(message string, defaultAnswer bool) (bool, error) {
var answer bool
prompt := &survey.Confirm{
Message: message,
Default: defaultAnswer,
}
switch csConfig.Cscli.Output {
case "human":
getBouncersTable(out, bouncers)
case "json":
enc := json.NewEncoder(out)
enc.SetIndent("", " ")
if err := enc.Encode(bouncers); err != nil {
return fmt.Errorf("failed to unmarshal: %w", err)
}
return nil
case "raw":
csvwriter := csv.NewWriter(out)
err := csvwriter.Write([]string{"name", "ip", "revoked", "last_pull", "type", "version", "auth_type"})
if err != nil {
return fmt.Errorf("failed to write raw header: %w", err)
}
for _, b := range bouncers {
var revoked string
if !b.Revoked {
revoked = "validated"
} else {
revoked = "pending"
}
err := csvwriter.Write([]string{b.Name, b.IPAddress, revoked, b.LastPull.Format(time.RFC3339), b.Type, b.Version, b.AuthType})
if err != nil {
return fmt.Errorf("failed to write raw: %w", err)
}
}
csvwriter.Flush()
if err := survey.AskOne(prompt, &answer); err != nil {
return defaultAnswer, err
}
return nil
return answer, nil
}
type cliBouncers struct {}
func NewCLIBouncers() *cliBouncers {
return &cliBouncers{}
type cliBouncers struct {
db *database.Client
cfg configGetter
}
func (cli cliBouncers) NewCommand() *cobra.Command {
func NewCLIBouncers(cfg configGetter) *cliBouncers {
return &cliBouncers{
cfg: cfg,
}
}
func (cli *cliBouncers) NewCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "bouncers [action]",
Short: "Manage bouncers [requires local API]",
@ -76,94 +56,127 @@ Note: This command requires database direct access, so is intended to be run on
Args: cobra.MinimumNArgs(1),
Aliases: []string{"bouncer"},
DisableAutoGenTag: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
var err error
if err = require.LAPI(csConfig); err != nil {
if err = require.LAPI(cli.cfg()); err != nil {
return err
}
dbClient, err = database.NewClient(csConfig.DbConfig)
cli.db, err = database.NewClient(cli.cfg().DbConfig)
if err != nil {
return fmt.Errorf("unable to create new database client: %s", err)
return fmt.Errorf("can't connect to the database: %s", err)
}
return nil
},
}
cmd.AddCommand(cli.NewListCmd())
cmd.AddCommand(cli.NewAddCmd())
cmd.AddCommand(cli.NewDeleteCmd())
cmd.AddCommand(cli.NewPruneCmd())
cmd.AddCommand(cli.newListCmd())
cmd.AddCommand(cli.newAddCmd())
cmd.AddCommand(cli.newDeleteCmd())
cmd.AddCommand(cli.newPruneCmd())
return cmd
}
func (cli cliBouncers) NewListCmd() *cobra.Command {
func (cli *cliBouncers) list() error {
out := color.Output
bouncers, err := cli.db.ListBouncers()
if err != nil {
return fmt.Errorf("unable to list bouncers: %s", err)
}
switch cli.cfg().Cscli.Output {
case "human":
getBouncersTable(out, bouncers)
case "json":
enc := json.NewEncoder(out)
enc.SetIndent("", " ")
if err := enc.Encode(bouncers); err != nil {
return fmt.Errorf("failed to marshal: %w", err)
}
return nil
case "raw":
csvwriter := csv.NewWriter(out)
if err := csvwriter.Write([]string{"name", "ip", "revoked", "last_pull", "type", "version", "auth_type"}); err != nil {
return fmt.Errorf("failed to write raw header: %w", err)
}
for _, b := range bouncers {
valid := "validated"
if b.Revoked {
valid = "pending"
}
if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, b.LastPull.Format(time.RFC3339), b.Type, b.Version, b.AuthType}); err != nil {
return fmt.Errorf("failed to write raw: %w", err)
}
}
csvwriter.Flush()
}
return nil
}
func (cli *cliBouncers) newListCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "list",
Short: "list all bouncers within the database",
Example: `cscli bouncers list`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, arg []string) error {
err := getBouncers(color.Output, dbClient)
if err != nil {
return fmt.Errorf("unable to list bouncers: %s", err)
}
return nil
RunE: func(_ *cobra.Command, _ []string) error {
return cli.list()
},
}
return cmd
}
func (cli cliBouncers) add(cmd *cobra.Command, args []string) error {
func (cli *cliBouncers) add(bouncerName string, key string) error {
var err error
keyLength := 32
flags := cmd.Flags()
key, err := flags.GetString("key")
if err != nil {
return err
}
keyName := args[0]
var apiKey string
if keyName == "" {
return fmt.Errorf("please provide a name for the api key")
}
apiKey = key
if key == "" {
apiKey, err = middlewares.GenerateAPIKey(keyLength)
key, err = middlewares.GenerateAPIKey(keyLength)
if err != nil {
return fmt.Errorf("unable to generate api key: %s", err)
}
}
if err != nil {
return fmt.Errorf("unable to generate api key: %s", err)
}
_, err = dbClient.CreateBouncer(keyName, "", middlewares.HashSHA512(apiKey), types.ApiKeyAuthType)
_, err = cli.db.CreateBouncer(bouncerName, "", middlewares.HashSHA512(key), types.ApiKeyAuthType)
if err != nil {
return fmt.Errorf("unable to create bouncer: %s", err)
}
switch csConfig.Cscli.Output {
switch cli.cfg().Cscli.Output {
case "human":
fmt.Printf("API key for '%s':\n\n", keyName)
fmt.Printf(" %s\n\n", apiKey)
fmt.Printf("API key for '%s':\n\n", bouncerName)
fmt.Printf(" %s\n\n", key)
fmt.Print("Please keep this key since you will not be able to retrieve it!\n")
case "raw":
fmt.Printf("%s", apiKey)
fmt.Print(key)
case "json":
j, err := json.Marshal(apiKey)
j, err := json.Marshal(key)
if err != nil {
return fmt.Errorf("unable to marshal api key")
}
fmt.Printf("%s", string(j))
fmt.Print(string(j))
}
return nil
}
func (cli cliBouncers) NewAddCmd() *cobra.Command {
func (cli *cliBouncers) newAddCmd() *cobra.Command {
var key string
cmd := &cobra.Command{
Use: "add MyBouncerName",
Short: "add a single bouncer to the database",
@ -171,127 +184,133 @@ func (cli cliBouncers) NewAddCmd() *cobra.Command {
cscli bouncers add MyBouncerName --key <random-key>`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
RunE: cli.add,
RunE: func(_ *cobra.Command, args []string) error {
return cli.add(args[0], key)
},
}
flags := cmd.Flags()
flags.StringP("length", "l", "", "length of the api key")
flags.MarkDeprecated("length", "use --key instead")
flags.StringP("key", "k", "", "api key for the bouncer")
flags.StringVarP(&key, "key", "k", "", "api key for the bouncer")
return cmd
}
func (cli cliBouncers) delete(cmd *cobra.Command, args []string) error {
for _, bouncerID := range args {
err := dbClient.DeleteBouncer(bouncerID)
func (cli *cliBouncers) deleteValid(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
bouncers, err := cli.db.ListBouncers()
if err != nil {
cobra.CompError("unable to list bouncers " + err.Error())
}
ret := []string{}
for _, bouncer := range bouncers {
if strings.Contains(bouncer.Name, toComplete) && !slices.Contains(args, bouncer.Name) {
ret = append(ret, bouncer.Name)
}
}
return ret, cobra.ShellCompDirectiveNoFileComp
}
func (cli *cliBouncers) delete(bouncers []string) error {
for _, bouncerID := range bouncers {
err := cli.db.DeleteBouncer(bouncerID)
if err != nil {
return fmt.Errorf("unable to delete bouncer '%s': %s", bouncerID, err)
}
log.Infof("bouncer '%s' deleted successfully", bouncerID)
}
return nil
}
func (cli cliBouncers) NewDeleteCmd() *cobra.Command {
func (cli *cliBouncers) newDeleteCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "delete MyBouncerName",
Short: "delete bouncer(s) from the database",
Args: cobra.MinimumNArgs(1),
Aliases: []string{"remove"},
DisableAutoGenTag: true,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var err error
dbClient, err = getDBClient()
if err != nil {
cobra.CompError("unable to create new database client: " + err.Error())
return nil, cobra.ShellCompDirectiveNoFileComp
}
bouncers, err := dbClient.ListBouncers()
if err != nil {
cobra.CompError("unable to list bouncers " + err.Error())
}
ret := make([]string, 0)
for _, bouncer := range bouncers {
if strings.Contains(bouncer.Name, toComplete) && !slices.Contains(args, bouncer.Name) {
ret = append(ret, bouncer.Name)
}
}
return ret, cobra.ShellCompDirectiveNoFileComp
ValidArgsFunction: cli.deleteValid,
RunE: func(_ *cobra.Command, args []string) error {
return cli.delete(args)
},
RunE: cli.delete,
}
return cmd
}
func (cli cliBouncers) NewPruneCmd() *cobra.Command {
var parsedDuration time.Duration
func (cli *cliBouncers) prune(duration time.Duration, force bool) error {
if duration < 2*time.Minute {
if yes, err := askYesNo(
"The duration you provided is less than 2 minutes. " +
"This may remove active bouncers. Continue?", false); err != nil {
return err
} else if !yes {
fmt.Println("User aborted prune. No changes were made.")
return nil
}
}
bouncers, err := cli.db.QueryBouncersLastPulltimeLT(time.Now().UTC().Add(duration))
if err != nil {
return fmt.Errorf("unable to query bouncers: %w", err)
}
if len(bouncers) == 0 {
fmt.Println("No bouncers to prune.")
return nil
}
getBouncersTable(color.Output, bouncers)
if !force {
if yes, err := askYesNo(
"You are about to PERMANENTLY remove the above bouncers from the database. " +
"These will NOT be recoverable. Continue?", false); err != nil {
return err
} else if !yes {
fmt.Println("User aborted prune. No changes were made.")
return nil
}
}
deleted, err := cli.db.BulkDeleteBouncers(bouncers)
if err != nil {
return fmt.Errorf("unable to prune bouncers: %s", err)
}
fmt.Fprintf(os.Stderr, "Successfully deleted %d bouncers\n", deleted)
return nil
}
func (cli *cliBouncers) newPruneCmd() *cobra.Command {
var (
duration time.Duration
force bool
)
const defaultDuration = 60 * time.Minute
cmd := &cobra.Command{
Use: "prune",
Short: "prune multiple bouncers from the database",
Args: cobra.NoArgs,
DisableAutoGenTag: true,
Example: `cscli bouncers prune -d 60m
cscli bouncers prune -d 60m --force`,
PreRunE: func(cmd *cobra.Command, args []string) error {
dur, _ := cmd.Flags().GetString("duration")
var err error
parsedDuration, err = time.ParseDuration(fmt.Sprintf("-%s", dur))
if err != nil {
return fmt.Errorf("unable to parse duration '%s': %s", dur, err)
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
force, _ := cmd.Flags().GetBool("force")
if parsedDuration >= 0-2*time.Minute {
var answer bool
prompt := &survey.Confirm{
Message: "The duration you provided is less than or equal 2 minutes this may remove active bouncers continue ?",
Default: false,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask about prune check: %s", err)
}
if !answer {
fmt.Println("user aborted prune no changes were made")
return nil
}
}
bouncers, err := dbClient.QueryBouncersLastPulltimeLT(time.Now().UTC().Add(parsedDuration))
if err != nil {
return fmt.Errorf("unable to query bouncers: %s", err)
}
if len(bouncers) == 0 {
fmt.Println("no bouncers to prune")
return nil
}
getBouncersTable(color.Output, bouncers)
if !force {
var answer bool
prompt := &survey.Confirm{
Message: "You are about to PERMANENTLY remove the above bouncers from the database these will NOT be recoverable, continue ?",
Default: false,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask about prune check: %s", err)
}
if !answer {
fmt.Println("user aborted prune no changes were made")
return nil
}
}
nbDeleted, err := dbClient.BulkDeleteBouncers(bouncers)
if err != nil {
return fmt.Errorf("unable to prune bouncers: %s", err)
}
fmt.Printf("successfully delete %d bouncers\n", nbDeleted)
return nil
Example: `cscli bouncers prune -d 45m
cscli bouncers prune -d 45m --force`,
RunE: func(_ *cobra.Command, _ []string) error {
return cli.prune(duration, force)
},
}
cmd.Flags().StringP("duration", "d", "60m", "duration of time since last pull")
cmd.Flags().Bool("force", false, "force prune without asking for confirmation")
flags := cmd.Flags()
flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since last pull")
flags.BoolVar(&force, "force", false, "force prune without asking for confirmation")
return cmd
}

View file

@ -146,7 +146,12 @@ func restoreConfigFromDirectory(dirPath string, oldBackup bool) error {
// Now we have config.yaml, we should regenerate config struct to have rights paths etc
ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)
initConfig()
log.Debug("Reloading configuration")
csConfig, _, err = loadConfigFor("config")
if err != nil {
return fmt.Errorf("failed to reload configuration: %s", err)
}
backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath)
if _, err = os.Stat(backupCAPICreds); err == nil {
@ -227,7 +232,7 @@ func restoreConfigFromDirectory(dirPath string, oldBackup bool) error {
}
}
// if there is files in the acquis backup dir, restore them
// if there are files in the acquis backup dir, restore them
acquisBackupDir := filepath.Join(dirPath, "acquis", "*.yaml")
if acquisFiles, err := filepath.Glob(acquisBackupDir); err == nil {
for _, acquisFile := range acquisFiles {

View file

@ -25,32 +25,53 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/types"
)
func NewConsoleCmd() *cobra.Command {
var cmdConsole = &cobra.Command{
type cliConsole struct {
cfg configGetter
}
func NewCLIConsole(cfg configGetter) *cliConsole {
return &cliConsole{
cfg: cfg,
}
}
func (cli *cliConsole) NewCommand() *cobra.Command {
var cmd = &cobra.Command{
Use: "console [action]",
Short: "Manage interaction with Crowdsec console (https://app.crowdsec.net)",
Args: cobra.MinimumNArgs(1),
DisableAutoGenTag: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := require.LAPI(csConfig); err != nil {
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
cfg := cli.cfg()
if err := require.LAPI(cfg); err != nil {
return err
}
if err := require.CAPI(csConfig); err != nil {
if err := require.CAPI(cfg); err != nil {
return err
}
if err := require.CAPIRegistered(csConfig); err != nil {
if err := require.CAPIRegistered(cfg); err != nil {
return err
}
return nil
},
}
cmd.AddCommand(cli.newEnrollCmd())
cmd.AddCommand(cli.newEnableCmd())
cmd.AddCommand(cli.newDisableCmd())
cmd.AddCommand(cli.newStatusCmd())
return cmd
}
func (cli *cliConsole) newEnrollCmd() *cobra.Command {
name := ""
overwrite := false
tags := []string{}
opts := []string{}
cmdEnroll := &cobra.Command{
cmd := &cobra.Command{
Use: "enroll [enroll-key]",
Short: "Enroll this instance to https://app.crowdsec.net [requires local API]",
Long: `
@ -66,96 +87,107 @@ After running this command your will need to validate the enrollment in the weba
valid options are : %s,all (see 'cscli console status' for details)`, strings.Join(csconfig.CONSOLE_CONFIGS, ",")),
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
password := strfmt.Password(csConfig.API.Server.OnlineClient.Credentials.Password)
apiURL, err := url.Parse(csConfig.API.Server.OnlineClient.Credentials.URL)
RunE: func(_ *cobra.Command, args []string) error {
cfg := cli.cfg()
password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password)
apiURL, err := url.Parse(cfg.API.Server.OnlineClient.Credentials.URL)
if err != nil {
return fmt.Errorf("could not parse CAPI URL: %s", err)
return fmt.Errorf("could not parse CAPI URL: %w", err)
}
hub, err := require.Hub(csConfig, nil, nil)
hub, err := require.Hub(cfg, nil, nil)
if err != nil {
return err
}
scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS)
if err != nil {
return fmt.Errorf("failed to get installed scenarios: %s", err)
return fmt.Errorf("failed to get installed scenarios: %w", err)
}
if len(scenarios) == 0 {
scenarios = make([]string, 0)
}
enable_opts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS}
enableOpts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS}
if len(opts) != 0 {
for _, opt := range opts {
valid := false
if opt == "all" {
enable_opts = csconfig.CONSOLE_CONFIGS
enableOpts = csconfig.CONSOLE_CONFIGS
break
}
for _, available_opt := range csconfig.CONSOLE_CONFIGS {
if opt == available_opt {
for _, availableOpt := range csconfig.CONSOLE_CONFIGS {
if opt == availableOpt {
valid = true
enable := true
for _, enabled_opt := range enable_opts {
if opt == enabled_opt {
for _, enabledOpt := range enableOpts {
if opt == enabledOpt {
enable = false
continue
}
}
if enable {
enable_opts = append(enable_opts, opt)
enableOpts = append(enableOpts, opt)
}
break
}
}
if !valid {
return fmt.Errorf("option %s doesn't exist", opt)
}
}
}
c, _ := apiclient.NewClient(&apiclient.Config{
MachineID: csConfig.API.Server.OnlineClient.Credentials.Login,
MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login,
Password: password,
Scenarios: scenarios,
UserAgent: fmt.Sprintf("crowdsec/%s", version.String()),
URL: apiURL,
VersionPrefix: "v3",
})
resp, err := c.Auth.EnrollWatcher(context.Background(), args[0], name, tags, overwrite)
if err != nil {
return fmt.Errorf("could not enroll instance: %s", err)
return fmt.Errorf("could not enroll instance: %w", err)
}
if resp.Response.StatusCode == 200 && !overwrite {
log.Warning("Instance already enrolled. You can use '--overwrite' to force enroll")
return nil
}
if err := SetConsoleOpts(enable_opts, true); err != nil {
if err := cli.setConsoleOpts(enableOpts, true); err != nil {
return err
}
for _, opt := range enable_opts {
for _, opt := range enableOpts {
log.Infof("Enabled %s : %s", opt, csconfig.CONSOLE_CONFIGS_HELP[opt])
}
log.Info("Watcher successfully enrolled. Visit https://app.crowdsec.net to accept it.")
log.Info("Please restart crowdsec after accepting the enrollment.")
return nil
},
}
cmdEnroll.Flags().StringVarP(&name, "name", "n", "", "Name to display in the console")
cmdEnroll.Flags().BoolVarP(&overwrite, "overwrite", "", false, "Force enroll the instance")
cmdEnroll.Flags().StringSliceVarP(&tags, "tags", "t", tags, "Tags to display in the console")
cmdEnroll.Flags().StringSliceVarP(&opts, "enable", "e", opts, "Enable console options")
cmdConsole.AddCommand(cmdEnroll)
var enableAll, disableAll bool
flags := cmd.Flags()
flags.StringVarP(&name, "name", "n", "", "Name to display in the console")
flags.BoolVarP(&overwrite, "overwrite", "", false, "Force enroll the instance")
flags.StringSliceVarP(&tags, "tags", "t", tags, "Tags to display in the console")
flags.StringSliceVarP(&opts, "enable", "e", opts, "Enable console options")
cmdEnable := &cobra.Command{
return cmd
}
func (cli *cliConsole) newEnableCmd() *cobra.Command {
var enableAll bool
cmd := &cobra.Command{
Use: "enable [option]",
Short: "Enable a console option",
Example: "sudo cscli console enable tainted",
@ -163,9 +195,9 @@ After running this command your will need to validate the enrollment in the weba
Enable given information push to the central API. Allows to empower the console`,
ValidArgs: csconfig.CONSOLE_CONFIGS,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(_ *cobra.Command, args []string) error {
if enableAll {
if err := SetConsoleOpts(csconfig.CONSOLE_CONFIGS, true); err != nil {
if err := cli.setConsoleOpts(csconfig.CONSOLE_CONFIGS, true); err != nil {
return err
}
log.Infof("All features have been enabled successfully")
@ -173,19 +205,26 @@ Enable given information push to the central API. Allows to empower the console`
if len(args) == 0 {
return fmt.Errorf("you must specify at least one feature to enable")
}
if err := SetConsoleOpts(args, true); err != nil {
if err := cli.setConsoleOpts(args, true); err != nil {
return err
}
log.Infof("%v have been enabled", args)
}
log.Infof(ReloadMessage())
return nil
},
}
cmdEnable.Flags().BoolVarP(&enableAll, "all", "a", false, "Enable all console options")
cmdConsole.AddCommand(cmdEnable)
cmd.Flags().BoolVarP(&enableAll, "all", "a", false, "Enable all console options")
cmdDisable := &cobra.Command{
return cmd
}
func (cli *cliConsole) newDisableCmd() *cobra.Command {
var disableAll bool
cmd := &cobra.Command{
Use: "disable [option]",
Short: "Disable a console option",
Example: "sudo cscli console disable tainted",
@ -193,47 +232,52 @@ Enable given information push to the central API. Allows to empower the console`
Disable given information push to the central API.`,
ValidArgs: csconfig.CONSOLE_CONFIGS,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(_ *cobra.Command, args []string) error {
if disableAll {
if err := SetConsoleOpts(csconfig.CONSOLE_CONFIGS, false); err != nil {
if err := cli.setConsoleOpts(csconfig.CONSOLE_CONFIGS, false); err != nil {
return err
}
log.Infof("All features have been disabled")
} else {
if err := SetConsoleOpts(args, false); err != nil {
if err := cli.setConsoleOpts(args, false); err != nil {
return err
}
log.Infof("%v have been disabled", args)
}
log.Infof(ReloadMessage())
return nil
},
}
cmdDisable.Flags().BoolVarP(&disableAll, "all", "a", false, "Disable all console options")
cmdConsole.AddCommand(cmdDisable)
cmd.Flags().BoolVarP(&disableAll, "all", "a", false, "Disable all console options")
cmdConsoleStatus := &cobra.Command{
return cmd
}
func (cli *cliConsole) newStatusCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "status",
Short: "Shows status of the console options",
Example: `sudo cscli console status`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
switch csConfig.Cscli.Output {
RunE: func(_ *cobra.Command, _ []string) error {
cfg := cli.cfg()
consoleCfg := cfg.API.Server.ConsoleConfig
switch cfg.Cscli.Output {
case "human":
cmdConsoleStatusTable(color.Output, *csConfig)
cmdConsoleStatusTable(color.Output, *consoleCfg)
case "json":
c := csConfig.API.Server.ConsoleConfig
out := map[string](*bool){
csconfig.SEND_MANUAL_SCENARIOS: c.ShareManualDecisions,
csconfig.SEND_CUSTOM_SCENARIOS: c.ShareCustomScenarios,
csconfig.SEND_TAINTED_SCENARIOS: c.ShareTaintedScenarios,
csconfig.SEND_CONTEXT: c.ShareContext,
csconfig.CONSOLE_MANAGEMENT: c.ConsoleManagement,
csconfig.SEND_MANUAL_SCENARIOS: consoleCfg.ShareManualDecisions,
csconfig.SEND_CUSTOM_SCENARIOS: consoleCfg.ShareCustomScenarios,
csconfig.SEND_TAINTED_SCENARIOS: consoleCfg.ShareTaintedScenarios,
csconfig.SEND_CONTEXT: consoleCfg.ShareContext,
csconfig.CONSOLE_MANAGEMENT: consoleCfg.ConsoleManagement,
}
data, err := json.MarshalIndent(out, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal configuration: %s", err)
return fmt.Errorf("failed to marshal configuration: %w", err)
}
fmt.Println(string(data))
case "raw":
@ -244,11 +288,11 @@ Disable given information push to the central API.`,
}
rows := [][]string{
{csconfig.SEND_MANUAL_SCENARIOS, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareManualDecisions)},
{csconfig.SEND_CUSTOM_SCENARIOS, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios)},
{csconfig.SEND_TAINTED_SCENARIOS, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios)},
{csconfig.SEND_CONTEXT, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareContext)},
{csconfig.CONSOLE_MANAGEMENT, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ConsoleManagement)},
{csconfig.SEND_MANUAL_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareManualDecisions)},
{csconfig.SEND_CUSTOM_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareCustomScenarios)},
{csconfig.SEND_TAINTED_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareTaintedScenarios)},
{csconfig.SEND_CONTEXT, fmt.Sprintf("%t", *consoleCfg.ShareContext)},
{csconfig.CONSOLE_MANAGEMENT, fmt.Sprintf("%t", *consoleCfg.ConsoleManagement)},
}
for _, row := range rows {
err = csvwriter.Write(row)
@ -258,132 +302,137 @@ Disable given information push to the central API.`,
}
csvwriter.Flush()
}
return nil
},
}
cmdConsole.AddCommand(cmdConsoleStatus)
return cmdConsole
return cmd
}
func dumpConsoleConfig(c *csconfig.LocalApiServerCfg) error {
out, err := yaml.Marshal(c.ConsoleConfig)
func (cli *cliConsole) dumpConfig() error {
serverCfg := cli.cfg().API.Server
out, err := yaml.Marshal(serverCfg.ConsoleConfig)
if err != nil {
return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", c.ConsoleConfigPath, err)
return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", serverCfg.ConsoleConfigPath, err)
}
if c.ConsoleConfigPath == "" {
c.ConsoleConfigPath = csconfig.DefaultConsoleConfigFilePath
log.Debugf("Empty console_path, defaulting to %s", c.ConsoleConfigPath)
if serverCfg.ConsoleConfigPath == "" {
serverCfg.ConsoleConfigPath = csconfig.DefaultConsoleConfigFilePath
log.Debugf("Empty console_path, defaulting to %s", serverCfg.ConsoleConfigPath)
}
if err := os.WriteFile(c.ConsoleConfigPath, out, 0o600); err != nil {
return fmt.Errorf("while dumping console config to %s: %w", c.ConsoleConfigPath, err)
if err := os.WriteFile(serverCfg.ConsoleConfigPath, out, 0o600); err != nil {
return fmt.Errorf("while dumping console config to %s: %w", serverCfg.ConsoleConfigPath, err)
}
return nil
}
func SetConsoleOpts(args []string, wanted bool) error {
func (cli *cliConsole) setConsoleOpts(args []string, wanted bool) error {
cfg := cli.cfg()
consoleCfg := cfg.API.Server.ConsoleConfig
for _, arg := range args {
switch arg {
case csconfig.CONSOLE_MANAGEMENT:
/*for each flag check if it's already set before setting it*/
if csConfig.API.Server.ConsoleConfig.ConsoleManagement != nil {
if *csConfig.API.Server.ConsoleConfig.ConsoleManagement == wanted {
if consoleCfg.ConsoleManagement != nil {
if *consoleCfg.ConsoleManagement == wanted {
log.Debugf("%s already set to %t", csconfig.CONSOLE_MANAGEMENT, wanted)
} else {
log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted)
*csConfig.API.Server.ConsoleConfig.ConsoleManagement = wanted
*consoleCfg.ConsoleManagement = wanted
}
} else {
log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted)
csConfig.API.Server.ConsoleConfig.ConsoleManagement = ptr.Of(wanted)
consoleCfg.ConsoleManagement = ptr.Of(wanted)
}
if csConfig.API.Server.OnlineClient.Credentials != nil {
if cfg.API.Server.OnlineClient.Credentials != nil {
changed := false
if wanted && csConfig.API.Server.OnlineClient.Credentials.PapiURL == "" {
if wanted && cfg.API.Server.OnlineClient.Credentials.PapiURL == "" {
changed = true
csConfig.API.Server.OnlineClient.Credentials.PapiURL = types.PAPIBaseURL
} else if !wanted && csConfig.API.Server.OnlineClient.Credentials.PapiURL != "" {
cfg.API.Server.OnlineClient.Credentials.PapiURL = types.PAPIBaseURL
} else if !wanted && cfg.API.Server.OnlineClient.Credentials.PapiURL != "" {
changed = true
csConfig.API.Server.OnlineClient.Credentials.PapiURL = ""
cfg.API.Server.OnlineClient.Credentials.PapiURL = ""
}
if changed {
fileContent, err := yaml.Marshal(csConfig.API.Server.OnlineClient.Credentials)
fileContent, err := yaml.Marshal(cfg.API.Server.OnlineClient.Credentials)
if err != nil {
return fmt.Errorf("cannot marshal credentials: %s", err)
return fmt.Errorf("cannot marshal credentials: %w", err)
}
log.Infof("Updating credentials file: %s", csConfig.API.Server.OnlineClient.CredentialsFilePath)
log.Infof("Updating credentials file: %s", cfg.API.Server.OnlineClient.CredentialsFilePath)
err = os.WriteFile(csConfig.API.Server.OnlineClient.CredentialsFilePath, fileContent, 0o600)
err = os.WriteFile(cfg.API.Server.OnlineClient.CredentialsFilePath, fileContent, 0o600)
if err != nil {
return fmt.Errorf("cannot write credentials file: %s", err)
return fmt.Errorf("cannot write credentials file: %w", err)
}
}
}
case csconfig.SEND_CUSTOM_SCENARIOS:
/*for each flag check if it's already set before setting it*/
if csConfig.API.Server.ConsoleConfig.ShareCustomScenarios != nil {
if *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios == wanted {
if consoleCfg.ShareCustomScenarios != nil {
if *consoleCfg.ShareCustomScenarios == wanted {
log.Debugf("%s already set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted)
} else {
log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted)
*csConfig.API.Server.ConsoleConfig.ShareCustomScenarios = wanted
*consoleCfg.ShareCustomScenarios = wanted
}
} else {
log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted)
csConfig.API.Server.ConsoleConfig.ShareCustomScenarios = ptr.Of(wanted)
consoleCfg.ShareCustomScenarios = ptr.Of(wanted)
}
case csconfig.SEND_TAINTED_SCENARIOS:
/*for each flag check if it's already set before setting it*/
if csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios != nil {
if *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios == wanted {
if consoleCfg.ShareTaintedScenarios != nil {
if *consoleCfg.ShareTaintedScenarios == wanted {
log.Debugf("%s already set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted)
} else {
log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted)
*csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios = wanted
*consoleCfg.ShareTaintedScenarios = wanted
}
} else {
log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted)
csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios = ptr.Of(wanted)
consoleCfg.ShareTaintedScenarios = ptr.Of(wanted)
}
case csconfig.SEND_MANUAL_SCENARIOS:
/*for each flag check if it's already set before setting it*/
if csConfig.API.Server.ConsoleConfig.ShareManualDecisions != nil {
if *csConfig.API.Server.ConsoleConfig.ShareManualDecisions == wanted {
if consoleCfg.ShareManualDecisions != nil {
if *consoleCfg.ShareManualDecisions == wanted {
log.Debugf("%s already set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted)
} else {
log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted)
*csConfig.API.Server.ConsoleConfig.ShareManualDecisions = wanted
*consoleCfg.ShareManualDecisions = wanted
}
} else {
log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted)
csConfig.API.Server.ConsoleConfig.ShareManualDecisions = ptr.Of(wanted)
consoleCfg.ShareManualDecisions = ptr.Of(wanted)
}
case csconfig.SEND_CONTEXT:
/*for each flag check if it's already set before setting it*/
if csConfig.API.Server.ConsoleConfig.ShareContext != nil {
if *csConfig.API.Server.ConsoleConfig.ShareContext == wanted {
if consoleCfg.ShareContext != nil {
if *consoleCfg.ShareContext == wanted {
log.Debugf("%s already set to %t", csconfig.SEND_CONTEXT, wanted)
} else {
log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted)
*csConfig.API.Server.ConsoleConfig.ShareContext = wanted
*consoleCfg.ShareContext = wanted
}
} else {
log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted)
csConfig.API.Server.ConsoleConfig.ShareContext = ptr.Of(wanted)
consoleCfg.ShareContext = ptr.Of(wanted)
}
default:
return fmt.Errorf("unknown flag %s", arg)
}
}
if err := dumpConsoleConfig(csConfig.API.Server); err != nil {
return fmt.Errorf("failed writing console config: %s", err)
if err := cli.dumpConfig(); err != nil {
return fmt.Errorf("failed writing console config: %w", err)
}
return nil

View file

@ -9,7 +9,7 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
)
func cmdConsoleStatusTable(out io.Writer, csConfig csconfig.Config) {
func cmdConsoleStatusTable(out io.Writer, consoleCfg csconfig.ConsoleConfig) {
t := newTable(out)
t.SetRowLines(false)
@ -18,28 +18,30 @@ func cmdConsoleStatusTable(out io.Writer, csConfig csconfig.Config) {
for _, option := range csconfig.CONSOLE_CONFIGS {
activated := string(emoji.CrossMark)
switch option {
case csconfig.SEND_CUSTOM_SCENARIOS:
if *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios {
if *consoleCfg.ShareCustomScenarios {
activated = string(emoji.CheckMarkButton)
}
case csconfig.SEND_MANUAL_SCENARIOS:
if *csConfig.API.Server.ConsoleConfig.ShareManualDecisions {
if *consoleCfg.ShareManualDecisions {
activated = string(emoji.CheckMarkButton)
}
case csconfig.SEND_TAINTED_SCENARIOS:
if *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios {
if *consoleCfg.ShareTaintedScenarios {
activated = string(emoji.CheckMarkButton)
}
case csconfig.SEND_CONTEXT:
if *csConfig.API.Server.ConsoleConfig.ShareContext {
if *consoleCfg.ShareContext {
activated = string(emoji.CheckMarkButton)
}
case csconfig.CONSOLE_MANAGEMENT:
if *csConfig.API.Server.ConsoleConfig.ConsoleManagement {
if *consoleCfg.ConsoleManagement {
activated = string(emoji.CheckMarkButton)
}
}
t.AddRow(option, activated, csconfig.CONSOLE_CONFIGS_HELP[option])
}

View file

@ -41,7 +41,7 @@ func copyFileContents(src, dst string) (err error) {
}
/*copy the file, ioutile doesn't offer the feature*/
func CopyFile(sourceSymLink, destinationFile string) (err error) {
func CopyFile(sourceSymLink, destinationFile string) error {
sourceFile, err := filepath.EvalSymlinks(sourceSymLink)
if err != nil {
log.Infof("Not a symlink : %s", err)
@ -51,7 +51,7 @@ func CopyFile(sourceSymLink, destinationFile string) (err error) {
sourceFileStat, err := os.Stat(sourceFile)
if err != nil {
return
return err
}
if !sourceFileStat.Mode().IsRegular() {
@ -63,14 +63,14 @@ func CopyFile(sourceSymLink, destinationFile string) (err error) {
destinationFileStat, err := os.Stat(destinationFile)
if err != nil {
if !os.IsNotExist(err) {
return
return err
}
} else {
if !(destinationFileStat.Mode().IsRegular()) {
return fmt.Errorf("copyFile: non-regular destination file %s (%q)", destinationFileStat.Name(), destinationFileStat.Mode().String())
}
if os.SameFile(sourceFileStat, destinationFileStat) {
return
return err
}
}
@ -78,6 +78,6 @@ func CopyFile(sourceSymLink, destinationFile string) (err error) {
err = copyFileContents(sourceFile, destinationFile)
}
return
return err
}

View file

@ -19,15 +19,14 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/crowdsecurity/crowdsec/pkg/metabase"
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
"github.com/crowdsecurity/crowdsec/pkg/metabase"
)
var (
metabaseUser = "crowdsec@crowdsec.net"
metabasePassword string
metabaseDbPath string
metabaseDBPath string
metabaseConfigPath string
metabaseConfigFolder = "metabase/"
metabaseConfigFile = "metabase.yaml"
@ -43,14 +42,17 @@ var (
// information needed to set up a random password on user's behalf
)
type cliDashboard struct{}
func NewCLIDashboard() *cliDashboard {
return &cliDashboard{}
type cliDashboard struct {
cfg configGetter
}
func (cli cliDashboard) NewCommand() *cobra.Command {
/* ---- UPDATE COMMAND */
func NewCLIDashboard(cfg configGetter) *cliDashboard {
return &cliDashboard{
cfg: cfg,
}
}
func (cli *cliDashboard) NewCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "dashboard [command]",
Short: "Manage your metabase dashboard container [requires local API]",
@ -65,8 +67,9 @@ cscli dashboard start
cscli dashboard stop
cscli dashboard remove
`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := require.LAPI(csConfig); err != nil {
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
cfg := cli.cfg()
if err := require.LAPI(cfg); err != nil {
return err
}
@ -74,13 +77,13 @@ cscli dashboard remove
return err
}
metabaseConfigFolderPath := filepath.Join(csConfig.ConfigPaths.ConfigDir, metabaseConfigFolder)
metabaseConfigFolderPath := filepath.Join(cfg.ConfigPaths.ConfigDir, metabaseConfigFolder)
metabaseConfigPath = filepath.Join(metabaseConfigFolderPath, metabaseConfigFile)
if err := os.MkdirAll(metabaseConfigFolderPath, os.ModePerm); err != nil {
return err
}
if err := require.DB(csConfig); err != nil {
if err := require.DB(cfg); err != nil {
return err
}
@ -95,20 +98,21 @@ cscli dashboard remove
metabaseContainerID = oldContainerID
}
}
return nil
},
}
cmd.AddCommand(cli.NewSetupCmd())
cmd.AddCommand(cli.NewStartCmd())
cmd.AddCommand(cli.NewStopCmd())
cmd.AddCommand(cli.NewShowPasswordCmd())
cmd.AddCommand(cli.NewRemoveCmd())
cmd.AddCommand(cli.newSetupCmd())
cmd.AddCommand(cli.newStartCmd())
cmd.AddCommand(cli.newStopCmd())
cmd.AddCommand(cli.newShowPasswordCmd())
cmd.AddCommand(cli.newRemoveCmd())
return cmd
}
func (cli cliDashboard) NewSetupCmd() *cobra.Command {
func (cli *cliDashboard) newSetupCmd() *cobra.Command {
var force bool
cmd := &cobra.Command{
@ -122,9 +126,9 @@ cscli dashboard setup
cscli dashboard setup --listen 0.0.0.0
cscli dashboard setup -l 0.0.0.0 -p 443 --password <password>
`,
RunE: func(cmd *cobra.Command, args []string) error {
if metabaseDbPath == "" {
metabaseDbPath = csConfig.ConfigPaths.DataDir
RunE: func(_ *cobra.Command, _ []string) error {
if metabaseDBPath == "" {
metabaseDBPath = cli.cfg().ConfigPaths.DataDir
}
if metabasePassword == "" {
@ -145,10 +149,10 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password <password>
if err != nil {
return err
}
if err = chownDatabase(dockerGroup.Gid); err != nil {
if err = cli.chownDatabase(dockerGroup.Gid); err != nil {
return err
}
mb, err := metabase.SetupMetabase(csConfig.API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage)
mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDBPath, dockerGroup.Gid, metabaseContainerID, metabaseImage)
if err != nil {
return err
}
@ -161,29 +165,32 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password <password>
fmt.Printf("\tURL : '%s'\n", mb.Config.ListenURL)
fmt.Printf("\tusername : '%s'\n", mb.Config.Username)
fmt.Printf("\tpassword : '%s'\n", mb.Config.Password)
return nil
},
}
cmd.Flags().BoolVarP(&force, "force", "f", false, "Force setup : override existing files")
cmd.Flags().StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container")
cmd.Flags().StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container")
cmd.Flags().StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use")
cmd.Flags().StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container")
cmd.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
//cmd.Flags().StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user")
cmd.Flags().StringVar(&metabasePassword, "password", "", "metabase password")
flags := cmd.Flags()
flags.BoolVarP(&force, "force", "f", false, "Force setup : override existing files")
flags.StringVarP(&metabaseDBPath, "dir", "d", "", "Shared directory with metabase container")
flags.StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container")
flags.StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use")
flags.StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container")
flags.BoolVarP(&forceYes, "yes", "y", false, "force yes")
// flags.StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user")
flags.StringVar(&metabasePassword, "password", "", "metabase password")
return cmd
}
func (cli cliDashboard) NewStartCmd() *cobra.Command {
func (cli *cliDashboard) newStartCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "start",
Short: "Start the metabase container.",
Long: `Stats the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(_ *cobra.Command, _ []string) error {
mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID)
if err != nil {
return err
@ -197,22 +204,24 @@ func (cli cliDashboard) NewStartCmd() *cobra.Command {
}
log.Infof("Started metabase")
log.Infof("url : http://%s:%s", mb.Config.ListenAddr, mb.Config.ListenPort)
return nil
},
}
cmd.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmd
}
func (cli cliDashboard) NewStopCmd() *cobra.Command {
func (cli *cliDashboard) newStopCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "stop",
Short: "Stops the metabase container.",
Long: `Stops the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(_ *cobra.Command, _ []string) error {
if err := metabase.StopContainer(metabaseContainerID); err != nil {
return fmt.Errorf("unable to stop container '%s': %s", metabaseContainerID, err)
}
@ -223,17 +232,18 @@ func (cli cliDashboard) NewStopCmd() *cobra.Command {
return cmd
}
func (cli cliDashboard) NewShowPasswordCmd() *cobra.Command {
func (cli *cliDashboard) newShowPasswordCmd() *cobra.Command {
cmd := &cobra.Command{Use: "show-password",
Short: "displays password of metabase.",
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(_ *cobra.Command, _ []string) error {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
log.Printf("'%s'", m.Config.Password)
return nil
},
}
@ -241,7 +251,7 @@ func (cli cliDashboard) NewShowPasswordCmd() *cobra.Command {
return cmd
}
func (cli cliDashboard) NewRemoveCmd() *cobra.Command {
func (cli *cliDashboard) newRemoveCmd() *cobra.Command {
var force bool
cmd := &cobra.Command{
@ -254,7 +264,7 @@ func (cli cliDashboard) NewRemoveCmd() *cobra.Command {
cscli dashboard remove
cscli dashboard remove --force
`,
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(_ *cobra.Command, _ []string) error {
if !forceYes {
var answer bool
prompt := &survey.Confirm{
@ -291,8 +301,8 @@ cscli dashboard remove --force
}
log.Infof("container %s stopped & removed", metabaseContainerID)
}
log.Debugf("Removing metabase db %s", csConfig.ConfigPaths.DataDir)
if err := metabase.RemoveDatabase(csConfig.ConfigPaths.DataDir); err != nil {
log.Debugf("Removing metabase db %s", cli.cfg().ConfigPaths.DataDir)
if err := metabase.RemoveDatabase(cli.cfg().ConfigPaths.DataDir); err != nil {
log.Warnf("failed to remove metabase internal db : %s", err)
}
if force {
@ -306,11 +316,14 @@ cscli dashboard remove --force
}
}
}
return nil
},
}
cmd.Flags().BoolVarP(&force, "force", "f", false, "Remove also the metabase image")
cmd.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
flags := cmd.Flags()
flags.BoolVarP(&force, "force", "f", false, "Remove also the metabase image")
flags.BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmd
}
@ -431,22 +444,24 @@ func checkGroups(forceYes *bool) (*user.Group, error) {
return user.LookupGroup(crowdsecGroup)
}
func chownDatabase(gid string) error {
func (cli *cliDashboard) chownDatabase(gid string) error {
cfg := cli.cfg()
intID, err := strconv.Atoi(gid)
if err != nil {
return fmt.Errorf("unable to convert group ID to int: %s", err)
}
if stat, err := os.Stat(csConfig.DbConfig.DbPath); !os.IsNotExist(err) {
if stat, err := os.Stat(cfg.DbConfig.DbPath); !os.IsNotExist(err) {
info := stat.Sys()
if err := os.Chown(csConfig.DbConfig.DbPath, int(info.(*syscall.Stat_t).Uid), intID); err != nil {
return fmt.Errorf("unable to chown sqlite db file '%s': %s", csConfig.DbConfig.DbPath, err)
if err := os.Chown(cfg.DbConfig.DbPath, int(info.(*syscall.Stat_t).Uid), intID); err != nil {
return fmt.Errorf("unable to chown sqlite db file '%s': %s", cfg.DbConfig.DbPath, err)
}
}
if csConfig.DbConfig.Type == "sqlite" && csConfig.DbConfig.UseWal != nil && *csConfig.DbConfig.UseWal {
if cfg.DbConfig.Type == "sqlite" && cfg.DbConfig.UseWal != nil && *cfg.DbConfig.UseWal {
for _, ext := range []string{"-wal", "-shm"} {
file := csConfig.DbConfig.DbPath + ext
file := cfg.DbConfig.DbPath + ext
if stat, err := os.Stat(file); !os.IsNotExist(err) {
info := stat.Sys()
if err := os.Chown(file, int(info.(*syscall.Stat_t).Uid), intID); err != nil {

View file

@ -9,17 +9,21 @@ import (
"github.com/spf13/cobra"
)
type cliDashboard struct{}
type cliDashboard struct{
cfg configGetter
}
func NewCLIDashboard() *cliDashboard {
return &cliDashboard{}
func NewCLIDashboard(cfg configGetter) *cliDashboard {
return &cliDashboard{
cfg: cfg,
}
}
func (cli cliDashboard) NewCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "dashboard",
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
Run: func(_ *cobra.Command, _ []string) {
log.Infof("Dashboard command is disabled on %s", runtime.GOOS)
},
}

View file

@ -25,7 +25,7 @@ import (
var Client *apiclient.ApiClient
func DecisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error {
func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error {
/*here we cheat a bit : to make it more readable for the user, we dedup some entries*/
spamLimit := make(map[string]bool)
skipped := 0
@ -49,7 +49,8 @@ func DecisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error
alertItem.Decisions = newDecisions
}
if csConfig.Cscli.Output == "raw" {
switch cli.cfg().Cscli.Output {
case "raw":
csvwriter := csv.NewWriter(os.Stdout)
header := []string{"id", "source", "ip", "reason", "action", "country", "as", "events_count", "expiration", "simulated", "alert_id"}
@ -89,21 +90,24 @@ func DecisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error
}
csvwriter.Flush()
} else if csConfig.Cscli.Output == "json" {
case "json":
if *alerts == nil {
// avoid returning "null" in `json"
// could be cleaner if we used slice of alerts directly
fmt.Println("[]")
return nil
}
x, _ := json.MarshalIndent(alerts, "", " ")
fmt.Printf("%s", string(x))
} else if csConfig.Cscli.Output == "human" {
case "human":
if len(*alerts) == 0 {
fmt.Println("No active decisions")
return nil
}
decisionsTable(color.Output, alerts, printMachine)
cli.decisionsTable(color.Output, alerts, printMachine)
if skipped > 0 {
fmt.Printf("%d duplicated entries skipped\n", skipped)
}
@ -112,14 +116,17 @@ func DecisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error
return nil
}
type cliDecisions struct {}
func NewCLIDecisions() *cliDecisions {
return &cliDecisions{}
type cliDecisions struct {
cfg configGetter
}
func (cli cliDecisions) NewCommand() *cobra.Command {
func NewCLIDecisions(cfg configGetter) *cliDecisions {
return &cliDecisions{
cfg: cfg,
}
}
func (cli *cliDecisions) NewCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "decisions [action]",
Short: "Manage decisions",
@ -130,16 +137,17 @@ func (cli cliDecisions) NewCommand() *cobra.Command {
Args: cobra.MinimumNArgs(1),
DisableAutoGenTag: true,
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
if err := csConfig.LoadAPIClient(); err != nil {
cfg := cli.cfg()
if err := cfg.LoadAPIClient(); err != nil {
return fmt.Errorf("loading api client: %w", err)
}
password := strfmt.Password(csConfig.API.Client.Credentials.Password)
apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL)
password := strfmt.Password(cfg.API.Client.Credentials.Password)
apiurl, err := url.Parse(cfg.API.Client.Credentials.URL)
if err != nil {
return fmt.Errorf("parsing api url %s: %w", csConfig.API.Client.Credentials.URL, err)
return fmt.Errorf("parsing api url %s: %w", cfg.API.Client.Credentials.URL, err)
}
Client, err = apiclient.NewClient(&apiclient.Config{
MachineID: csConfig.API.Client.Credentials.Login,
MachineID: cfg.API.Client.Credentials.Login,
Password: password,
UserAgent: fmt.Sprintf("crowdsec/%s", version.String()),
URL: apiurl,
@ -148,19 +156,20 @@ func (cli cliDecisions) NewCommand() *cobra.Command {
if err != nil {
return fmt.Errorf("creating api client: %w", err)
}
return nil
},
}
cmd.AddCommand(cli.NewListCmd())
cmd.AddCommand(cli.NewAddCmd())
cmd.AddCommand(cli.NewDeleteCmd())
cmd.AddCommand(cli.NewImportCmd())
cmd.AddCommand(cli.newListCmd())
cmd.AddCommand(cli.newAddCmd())
cmd.AddCommand(cli.newDeleteCmd())
cmd.AddCommand(cli.newImportCmd())
return cmd
}
func (cli cliDecisions) NewListCmd() *cobra.Command {
func (cli *cliDecisions) newListCmd() *cobra.Command {
var filter = apiclient.AlertsListOpts{
ValueEquals: new(string),
ScopeEquals: new(string),
@ -262,7 +271,7 @@ cscli decisions list -t ban
return fmt.Errorf("unable to retrieve decisions: %w", err)
}
err = DecisionsToTable(alerts, printMachine)
err = cli.decisionsToTable(alerts, printMachine)
if err != nil {
return fmt.Errorf("unable to print decisions: %w", err)
}
@ -289,7 +298,7 @@ cscli decisions list -t ban
return cmd
}
func (cli cliDecisions) NewAddCmd() *cobra.Command {
func (cli *cliDecisions) newAddCmd() *cobra.Command {
var (
addIP string
addRange string
@ -325,7 +334,7 @@ cscli decisions add --scope username --value foobar
createdAt := time.Now().UTC().Format(time.RFC3339)
/*take care of shorthand options*/
if err := manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil {
if err = manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil {
return err
}
@ -341,7 +350,7 @@ cscli decisions add --scope username --value foobar
}
if addReason == "" {
addReason = fmt.Sprintf("manual '%s' from '%s'", addType, csConfig.API.Client.Credentials.Login)
addReason = fmt.Sprintf("manual '%s' from '%s'", addType, cli.cfg().API.Client.Credentials.Login)
}
decision := models.Decision{
Duration: &addDuration,
@ -384,6 +393,7 @@ cscli decisions add --scope username --value foobar
}
log.Info("Decision successfully added")
return nil
},
}
@ -400,7 +410,7 @@ cscli decisions add --scope username --value foobar
return cmd
}
func (cli cliDecisions) NewDeleteCmd() *cobra.Command {
func (cli *cliDecisions) newDeleteCmd() *cobra.Command {
var delFilter = apiclient.DecisionsDeleteOpts{
ScopeEquals: new(string),
ValueEquals: new(string),
@ -490,6 +500,7 @@ cscli decisions delete --type captcha
}
}
log.Infof("%s decision(s) deleted", decisions.NbDeleted)
return nil
},
}

View file

@ -67,7 +67,7 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) {
}
func (cli cliDecisions) runImport(cmd *cobra.Command, args []string) error {
func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
input, err := flags.GetString("input")
@ -236,13 +236,14 @@ func (cli cliDecisions) runImport(cmd *cobra.Command, args []string) error {
}
func (cli cliDecisions) NewImportCmd() *cobra.Command {
func (cli *cliDecisions) newImportCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "import [options]",
Short: "Import decisions from a file or pipe",
Long: "expected format:\n" +
"csv : any of duration,reason,scope,type,value, with a header line\n" +
"json :" + "`{" + `"duration" : "24h", "reason" : "my_scenario", "scope" : "ip", "type" : "ban", "value" : "x.y.z.z"` + "}`",
Args: cobra.NoArgs,
DisableAutoGenTag: true,
Example: `decisions.csv:
duration,scope,value

View file

@ -8,13 +8,15 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/models"
)
func decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) {
func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) {
t := newTable(out)
t.SetRowLines(false)
header := []string{"ID", "Source", "Scope:Value", "Reason", "Action", "Country", "AS", "Events", "expiration", "Alert ID"}
if printMachine {
header = append(header, "Machine")
}
t.SetHeaders(header...)
for _, alertItem := range *alerts {
@ -22,6 +24,7 @@ func decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachin
if *alertItem.Simulated {
*decisionItem.Type = fmt.Sprintf("(simul)%s", *decisionItem.Type)
}
row := []string{
strconv.Itoa(int(decisionItem.ID)),
*decisionItem.Origin,
@ -42,5 +45,6 @@ func decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachin
t.AddRow(row...)
}
}
t.Render()
}

View file

@ -16,33 +16,53 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/hubtest"
)
func GetLineCountForFile(filepath string) (int, error) {
func getLineCountForFile(filepath string) (int, error) {
f, err := os.Open(filepath)
if err != nil {
return 0, err
}
defer f.Close()
lc := 0
fs := bufio.NewReader(f)
for {
input, err := fs.ReadBytes('\n')
if len(input) > 1 {
lc++
}
if err != nil && err == io.EOF {
break
}
}
return lc, nil
}
type cliExplain struct{}
func NewCLIExplain() *cliExplain {
return &cliExplain{}
type cliExplain struct {
cfg configGetter
flags struct {
logFile string
dsn string
logLine string
logType string
details bool
skipOk bool
onlySuccessfulParsers bool
noClean bool
crowdsec string
labels string
}
}
func (cli cliExplain) NewCommand() *cobra.Command {
func NewCLIExplain(cfg configGetter) *cliExplain {
return &cliExplain{
cfg: cfg,
}
}
func (cli *cliExplain) NewCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "explain",
Short: "Explain log pipeline",
@ -57,118 +77,50 @@ tail -n 5 myfile.log | cscli explain --type nginx -f -
`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: cli.run,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
logFile, err := flags.GetString("file")
if err != nil {
return err
}
dsn, err := flags.GetString("dsn")
if err != nil {
return err
}
logLine, err := flags.GetString("log")
if err != nil {
return err
}
logType, err := flags.GetString("type")
if err != nil {
return err
}
if logLine == "" && logFile == "" && dsn == "" {
printHelp(cmd)
fmt.Println()
return fmt.Errorf("please provide --log, --file or --dsn flag")
}
if logType == "" {
printHelp(cmd)
fmt.Println()
return fmt.Errorf("please provide --type flag")
}
RunE: func(_ *cobra.Command, _ []string) error {
return cli.run()
},
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
fileInfo, _ := os.Stdin.Stat()
if logFile == "-" && ((fileInfo.Mode() & os.ModeCharDevice) == os.ModeCharDevice) {
if cli.flags.logFile == "-" && ((fileInfo.Mode() & os.ModeCharDevice) == os.ModeCharDevice) {
return fmt.Errorf("the option -f - is intended to work with pipes")
}
return nil
},
}
flags := cmd.Flags()
flags.StringP("file", "f", "", "Log file to test")
flags.StringP("dsn", "d", "", "DSN to test")
flags.StringP("log", "l", "", "Log line to test")
flags.StringP("type", "t", "", "Type of the acquisition to test")
flags.String("labels", "", "Additional labels to add to the acquisition format (key:value,key2:value2)")
flags.BoolP("verbose", "v", false, "Display individual changes")
flags.Bool("failures", false, "Only show failed lines")
flags.Bool("only-successful-parsers", false, "Only show successful parsers")
flags.String("crowdsec", "crowdsec", "Path to crowdsec")
flags.Bool("no-clean", false, "Don't clean runtime environment after tests")
flags.StringVarP(&cli.flags.logFile, "file", "f", "", "Log file to test")
flags.StringVarP(&cli.flags.dsn, "dsn", "d", "", "DSN to test")
flags.StringVarP(&cli.flags.logLine, "log", "l", "", "Log line to test")
flags.StringVarP(&cli.flags.logType, "type", "t", "", "Type of the acquisition to test")
flags.StringVar(&cli.flags.labels, "labels", "", "Additional labels to add to the acquisition format (key:value,key2:value2)")
flags.BoolVarP(&cli.flags.details, "verbose", "v", false, "Display individual changes")
flags.BoolVar(&cli.flags.skipOk, "failures", false, "Only show failed lines")
flags.BoolVar(&cli.flags.onlySuccessfulParsers, "only-successful-parsers", false, "Only show successful parsers")
flags.StringVar(&cli.flags.crowdsec, "crowdsec", "crowdsec", "Path to crowdsec")
flags.BoolVar(&cli.flags.noClean, "no-clean", false, "Don't clean runtime environment after tests")
cmd.MarkFlagRequired("type")
cmd.MarkFlagsOneRequired("log", "file", "dsn")
return cmd
}
func (cli cliExplain) run(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
func (cli *cliExplain) run() error {
logFile := cli.flags.logFile
logLine := cli.flags.logLine
logType := cli.flags.logType
dsn := cli.flags.dsn
labels := cli.flags.labels
crowdsec := cli.flags.crowdsec
logFile, err := flags.GetString("file")
if err != nil {
return err
}
dsn, err := flags.GetString("dsn")
if err != nil {
return err
}
logLine, err := flags.GetString("log")
if err != nil {
return err
}
logType, err := flags.GetString("type")
if err != nil {
return err
}
opts := dumps.DumpOpts{}
opts.Details, err = flags.GetBool("verbose")
if err != nil {
return err
}
no_clean, err := flags.GetBool("no-clean")
if err != nil {
return err
}
opts.SkipOk, err = flags.GetBool("failures")
if err != nil {
return err
}
opts.ShowNotOkParsers, err = flags.GetBool("only-successful-parsers")
opts.ShowNotOkParsers = !opts.ShowNotOkParsers
if err != nil {
return err
}
crowdsec, err := flags.GetString("crowdsec")
if err != nil {
return err
}
labels, err := flags.GetString("labels")
if err != nil {
return err
opts := dumps.DumpOpts{
Details: cli.flags.details,
SkipOk: cli.flags.skipOk,
ShowNotOkParsers: !cli.flags.onlySuccessfulParsers,
}
var f *os.File
@ -176,22 +128,25 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error {
// using empty string fallback to /tmp
dir, err := os.MkdirTemp("", "cscli_explain")
if err != nil {
return fmt.Errorf("couldn't create a temporary directory to store cscli explain result: %s", err)
return fmt.Errorf("couldn't create a temporary directory to store cscli explain result: %w", err)
}
defer func() {
if no_clean {
if cli.flags.noClean {
return
}
if _, err := os.Stat(dir); !os.IsNotExist(err) {
if err := os.RemoveAll(dir); err != nil {
log.Errorf("unable to delete temporary directory '%s': %s", dir, err)
}
}
}()
tmpFile := ""
// we create a temporary log file if a log line/stdin has been provided
if logLine != "" || logFile == "-" {
tmpFile = filepath.Join(dir, "cscli_test_tmp.log")
tmpFile := filepath.Join(dir, "cscli_test_tmp.log")
f, err = os.Create(tmpFile)
if err != nil {
return err
@ -221,6 +176,7 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error {
log.Warnf("Failed to write %d lines to %s", errCount, tmpFile)
}
}
f.Close()
// this is the file that was going to be read by crowdsec anyway
logFile = tmpFile
@ -231,15 +187,20 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error {
if err != nil {
return fmt.Errorf("unable to get absolute path of '%s', exiting", logFile)
}
dsn = fmt.Sprintf("file://%s", absolutePath)
lineCount, err := GetLineCountForFile(absolutePath)
lineCount, err := getLineCountForFile(absolutePath)
if err != nil {
return err
}
log.Debugf("file %s has %d lines", absolutePath, lineCount)
if lineCount == 0 {
return fmt.Errorf("the log file is empty: %s", absolutePath)
}
if lineCount > 100 {
log.Warnf("%s contains %d lines. This may take a lot of resources.", absolutePath, lineCount)
}
@ -250,15 +211,19 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error {
}
cmdArgs := []string{"-c", ConfigFilePath, "-type", logType, "-dsn", dsn, "-dump-data", dir, "-no-api"}
if labels != "" {
log.Debugf("adding labels %s", labels)
cmdArgs = append(cmdArgs, "-label", labels)
}
crowdsecCmd := exec.Command(crowdsec, cmdArgs...)
output, err := crowdsecCmd.CombinedOutput()
if err != nil {
fmt.Println(string(output))
return fmt.Errorf("fail to run crowdsec for test: %v", err)
return fmt.Errorf("fail to run crowdsec for test: %w", err)
}
parserDumpFile := filepath.Join(dir, hubtest.ParserResultFileName)
@ -266,12 +231,12 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error {
parserDump, err := dumps.LoadParserDump(parserDumpFile)
if err != nil {
return fmt.Errorf("unable to load parser dump result: %s", err)
return fmt.Errorf("unable to load parser dump result: %w", err)
}
bucketStateDump, err := dumps.LoadBucketPourDump(bucketStateDumpFile)
if err != nil {
return fmt.Errorf("unable to load bucket dump result: %s", err)
return fmt.Errorf("unable to load bucket dump result: %w", err)
}
dumps.DumpTree(*parserDump, *bucketStateDump, opts)

29
cmd/crowdsec-cli/flag.go Normal file
View file

@ -0,0 +1,29 @@
package main
// Custom types for flag validation and conversion.
import (
"errors"
)
type MachinePassword string
func (p *MachinePassword) String() string {
return string(*p)
}
func (p *MachinePassword) Set(v string) error {
// a password can't be more than 72 characters
// due to bcrypt limitations
if len(v) > 72 {
return errors.New("password too long (max 72 characters)")
}
*p = MachinePassword(v)
return nil
}
func (p *MachinePassword) Type() string {
return "string"
}

View file

@ -13,13 +13,17 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
)
type cliHub struct{}
func NewCLIHub() *cliHub {
return &cliHub{}
type cliHub struct {
cfg configGetter
}
func (cli cliHub) NewCommand() *cobra.Command {
func NewCLIHub(cfg configGetter) *cliHub {
return &cliHub{
cfg: cfg,
}
}
func (cli *cliHub) NewCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "hub [action]",
Short: "Manage hub index",
@ -34,23 +38,16 @@ cscli hub upgrade`,
DisableAutoGenTag: true,
}
cmd.AddCommand(cli.NewListCmd())
cmd.AddCommand(cli.NewUpdateCmd())
cmd.AddCommand(cli.NewUpgradeCmd())
cmd.AddCommand(cli.NewTypesCmd())
cmd.AddCommand(cli.newListCmd())
cmd.AddCommand(cli.newUpdateCmd())
cmd.AddCommand(cli.newUpgradeCmd())
cmd.AddCommand(cli.newTypesCmd())
return cmd
}
func (cli cliHub) list(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
all, err := flags.GetBool("all")
if err != nil {
return err
}
hub, err := require.Hub(csConfig, nil, log.StandardLogger())
func (cli *cliHub) list(all bool) error {
hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger())
if err != nil {
return err
}
@ -80,24 +77,28 @@ func (cli cliHub) list(cmd *cobra.Command, args []string) error {
return nil
}
func (cli cliHub) NewListCmd() *cobra.Command {
func (cli *cliHub) newListCmd() *cobra.Command {
var all bool
cmd := &cobra.Command{
Use: "list [-a]",
Short: "List all installed configurations",
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: cli.list,
RunE: func(_ *cobra.Command, _ []string) error {
return cli.list(all)
},
}
flags := cmd.Flags()
flags.BoolP("all", "a", false, "List disabled items as well")
flags.BoolVarP(&all, "all", "a", false, "List disabled items as well")
return cmd
}
func (cli cliHub) update(cmd *cobra.Command, args []string) error {
local := csConfig.Hub
remote := require.RemoteHub(csConfig)
func (cli *cliHub) update() error {
local := cli.cfg().Hub
remote := require.RemoteHub(cli.cfg())
// don't use require.Hub because if there is no index file, it would fail
hub, err := cwhub.NewHub(local, remote, true, log.StandardLogger())
@ -112,7 +113,7 @@ func (cli cliHub) update(cmd *cobra.Command, args []string) error {
return nil
}
func (cli cliHub) NewUpdateCmd() *cobra.Command {
func (cli *cliHub) newUpdateCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "update",
Short: "Download the latest index (catalog of available configurations)",
@ -121,21 +122,16 @@ Fetches the .index.json file from the hub, containing the list of available conf
`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: cli.update,
RunE: func(_ *cobra.Command, _ []string) error {
return cli.update()
},
}
return cmd
}
func (cli cliHub) upgrade(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
force, err := flags.GetBool("force")
if err != nil {
return err
}
hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger())
func (cli *cliHub) upgrade(force bool) error {
hub, err := require.Hub(cli.cfg(), require.RemoteHub(cli.cfg()), log.StandardLogger())
if err != nil {
return err
}
@ -167,7 +163,9 @@ func (cli cliHub) upgrade(cmd *cobra.Command, args []string) error {
return nil
}
func (cli cliHub) NewUpgradeCmd() *cobra.Command {
func (cli *cliHub) newUpgradeCmd() *cobra.Command {
var force bool
cmd := &cobra.Command{
Use: "upgrade",
Short: "Upgrade all configurations to their latest version",
@ -176,17 +174,19 @@ Upgrade all configs installed from Crowdsec Hub. Run 'sudo cscli hub update' if
`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: cli.upgrade,
RunE: func(_ *cobra.Command, _ []string) error {
return cli.upgrade(force)
},
}
flags := cmd.Flags()
flags.Bool("force", false, "Force upgrade: overwrite tainted and outdated files")
flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files")
return cmd
}
func (cli cliHub) types(cmd *cobra.Command, args []string) error {
switch csConfig.Cscli.Output {
func (cli *cliHub) types() error {
switch cli.cfg().Cscli.Output {
case "human":
s, err := yaml.Marshal(cwhub.ItemTypes)
if err != nil {
@ -210,7 +210,7 @@ func (cli cliHub) types(cmd *cobra.Command, args []string) error {
return nil
}
func (cli cliHub) NewTypesCmd() *cobra.Command {
func (cli *cliHub) newTypesCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "types",
Short: "List supported item types",
@ -219,7 +219,9 @@ List the types of supported hub items.
`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: cli.types,
RunE: func(_ *cobra.Command, _ []string) error {
return cli.types()
},
}
return cmd

View file

@ -51,33 +51,16 @@ func (cli cliItem) NewCommand() *cobra.Command {
DisableAutoGenTag: true,
}
cmd.AddCommand(cli.NewInstallCmd())
cmd.AddCommand(cli.NewRemoveCmd())
cmd.AddCommand(cli.NewUpgradeCmd())
cmd.AddCommand(cli.NewInspectCmd())
cmd.AddCommand(cli.NewListCmd())
cmd.AddCommand(cli.newInstallCmd())
cmd.AddCommand(cli.newRemoveCmd())
cmd.AddCommand(cli.newUpgradeCmd())
cmd.AddCommand(cli.newInspectCmd())
cmd.AddCommand(cli.newListCmd())
return cmd
}
func (cli cliItem) Install(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
downloadOnly, err := flags.GetBool("download-only")
if err != nil {
return err
}
force, err := flags.GetBool("force")
if err != nil {
return err
}
ignoreError, err := flags.GetBool("ignore")
if err != nil {
return err
}
func (cli cliItem) install(args []string, downloadOnly bool, force bool, ignoreError bool) error {
hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger())
if err != nil {
return err
@ -110,7 +93,13 @@ func (cli cliItem) Install(cmd *cobra.Command, args []string) error {
return nil
}
func (cli cliItem) NewInstallCmd() *cobra.Command {
func (cli cliItem) newInstallCmd() *cobra.Command {
var (
downloadOnly bool
force bool
ignoreError bool
)
cmd := &cobra.Command{
Use: coalesce.String(cli.installHelp.use, "install [item]..."),
Short: coalesce.String(cli.installHelp.short, fmt.Sprintf("Install given %s", cli.oneOrMore)),
@ -121,13 +110,15 @@ func (cli cliItem) NewInstallCmd() *cobra.Command {
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compAllItems(cli.name, args, toComplete)
},
RunE: cli.Install,
RunE: func(cmd *cobra.Command, args []string) error {
return cli.install(args, downloadOnly, force, ignoreError)
},
}
flags := cmd.Flags()
flags.BoolP("download-only", "d", false, "Only download packages, don't enable")
flags.Bool("force", false, "Force install: overwrite tainted and outdated files")
flags.Bool("ignore", false, fmt.Sprintf("Ignore errors when installing multiple %s", cli.name))
flags.BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable")
flags.BoolVar(&force, "force", false, "Force install: overwrite tainted and outdated files")
flags.BoolVar(&ignoreError, "ignore", false, fmt.Sprintf("Ignore errors when installing multiple %s", cli.name))
return cmd
}
@ -145,24 +136,7 @@ func istalledParentNames(item *cwhub.Item) []string {
return ret
}
func (cli cliItem) Remove(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
purge, err := flags.GetBool("purge")
if err != nil {
return err
}
force, err := flags.GetBool("force")
if err != nil {
return err
}
all, err := flags.GetBool("all")
if err != nil {
return err
}
func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error {
hub, err := require.Hub(csConfig, nil, log.StandardLogger())
if err != nil {
return err
@ -243,7 +217,13 @@ func (cli cliItem) Remove(cmd *cobra.Command, args []string) error {
return nil
}
func (cli cliItem) NewRemoveCmd() *cobra.Command {
func (cli cliItem) newRemoveCmd() *cobra.Command {
var (
purge bool
force bool
all bool
)
cmd := &cobra.Command{
Use: coalesce.String(cli.removeHelp.use, "remove [item]..."),
Short: coalesce.String(cli.removeHelp.short, fmt.Sprintf("Remove given %s", cli.oneOrMore)),
@ -254,30 +234,20 @@ func (cli cliItem) NewRemoveCmd() *cobra.Command {
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compInstalledItems(cli.name, args, toComplete)
},
RunE: cli.Remove,
RunE: func(cmd *cobra.Command, args []string) error {
return cli.remove(args, purge, force, all)
},
}
flags := cmd.Flags()
flags.Bool("purge", false, "Delete source file too")
flags.Bool("force", false, "Force remove: remove tainted and outdated files")
flags.Bool("all", false, fmt.Sprintf("Remove all the %s", cli.name))
flags.BoolVar(&purge, "purge", false, "Delete source file too")
flags.BoolVar(&force, "force", false, "Force remove: remove tainted and outdated files")
flags.BoolVar(&all, "all", false, fmt.Sprintf("Remove all the %s", cli.name))
return cmd
}
func (cli cliItem) Upgrade(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
force, err := flags.GetBool("force")
if err != nil {
return err
}
all, err := flags.GetBool("all")
if err != nil {
return err
}
func (cli cliItem) upgrade(args []string, force bool, all bool) error {
hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger())
if err != nil {
return err
@ -341,7 +311,12 @@ func (cli cliItem) Upgrade(cmd *cobra.Command, args []string) error {
return nil
}
func (cli cliItem) NewUpgradeCmd() *cobra.Command {
func (cli cliItem) newUpgradeCmd() *cobra.Command {
var (
all bool
force bool
)
cmd := &cobra.Command{
Use: coalesce.String(cli.upgradeHelp.use, "upgrade [item]..."),
Short: coalesce.String(cli.upgradeHelp.short, fmt.Sprintf("Upgrade given %s", cli.oneOrMore)),
@ -351,43 +326,27 @@ func (cli cliItem) NewUpgradeCmd() *cobra.Command {
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compInstalledItems(cli.name, args, toComplete)
},
RunE: cli.Upgrade,
RunE: func(cmd *cobra.Command, args []string) error {
return cli.upgrade(args, force, all)
},
}
flags := cmd.Flags()
flags.BoolP("all", "a", false, fmt.Sprintf("Upgrade all the %s", cli.name))
flags.Bool("force", false, "Force upgrade: overwrite tainted and outdated files")
flags.BoolVarP(&all, "all", "a", false, fmt.Sprintf("Upgrade all the %s", cli.name))
flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files")
return cmd
}
func (cli cliItem) Inspect(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
url, err := flags.GetString("url")
if err != nil {
return err
func (cli cliItem) inspect(args []string, url string, diff bool, rev bool, noMetrics bool) error {
if rev && !diff {
return fmt.Errorf("--rev can only be used with --diff")
}
if url != "" {
csConfig.Cscli.PrometheusUrl = url
}
diff, err := flags.GetBool("diff")
if err != nil {
return err
}
rev, err := flags.GetBool("rev")
if err != nil {
return err
}
noMetrics, err := flags.GetBool("no-metrics")
if err != nil {
return err
}
remote := (*cwhub.RemoteHubCfg)(nil)
if diff {
@ -411,7 +370,7 @@ func (cli cliItem) Inspect(cmd *cobra.Command, args []string) error {
continue
}
if err = InspectItem(item, !noMetrics); err != nil {
if err = inspectItem(item, !noMetrics); err != nil {
return err
}
@ -425,7 +384,14 @@ func (cli cliItem) Inspect(cmd *cobra.Command, args []string) error {
return nil
}
func (cli cliItem) NewInspectCmd() *cobra.Command {
func (cli cliItem) newInspectCmd() *cobra.Command {
var (
url string
diff bool
rev bool
noMetrics bool
)
cmd := &cobra.Command{
Use: coalesce.String(cli.inspectHelp.use, "inspect [item]..."),
Short: coalesce.String(cli.inspectHelp.short, fmt.Sprintf("Inspect given %s", cli.oneOrMore)),
@ -436,45 +402,21 @@ func (cli cliItem) NewInspectCmd() *cobra.Command {
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compInstalledItems(cli.name, args, toComplete)
},
PreRunE: func(cmd *cobra.Command, _ []string) error {
flags := cmd.Flags()
diff, err := flags.GetBool("diff")
if err != nil {
return err
}
rev, err := flags.GetBool("rev")
if err != nil {
return err
}
if rev && !diff {
return fmt.Errorf("--rev can only be used with --diff")
}
return nil
RunE: func(cmd *cobra.Command, args []string) error {
return cli.inspect(args, url, diff, rev, noMetrics)
},
RunE: cli.Inspect,
}
flags := cmd.Flags()
flags.StringP("url", "u", "", "Prometheus url")
flags.Bool("diff", false, "Show diff with latest version (for tainted items)")
flags.Bool("rev", false, "Reverse diff output")
flags.Bool("no-metrics", false, "Don't show metrics (when cscli.output=human)")
flags.StringVarP(&url, "url", "u", "", "Prometheus url")
flags.BoolVar(&diff, "diff", false, "Show diff with latest version (for tainted items)")
flags.BoolVar(&rev, "rev", false, "Reverse diff output")
flags.BoolVar(&noMetrics, "no-metrics", false, "Don't show metrics (when cscli.output=human)")
return cmd
}
func (cli cliItem) List(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
all, err := flags.GetBool("all")
if err != nil {
return err
}
func (cli cliItem) list(args []string, all bool) error {
hub, err := require.Hub(csConfig, nil, log.StandardLogger())
if err != nil {
return err
@ -494,18 +436,22 @@ func (cli cliItem) List(cmd *cobra.Command, args []string) error {
return nil
}
func (cli cliItem) NewListCmd() *cobra.Command {
func (cli cliItem) newListCmd() *cobra.Command {
var all bool
cmd := &cobra.Command{
Use: coalesce.String(cli.listHelp.use, "list [item... | -a]"),
Short: coalesce.String(cli.listHelp.short, fmt.Sprintf("List %s", cli.oneOrMore)),
Long: coalesce.String(cli.listHelp.long, fmt.Sprintf("List of installed/available/specified %s", cli.name)),
Example: cli.listHelp.example,
DisableAutoGenTag: true,
RunE: cli.List,
RunE: func(_ *cobra.Command, args []string) error {
return cli.list(args, all)
},
}
flags := cmd.Flags()
flags.BoolP("all", "a", false, "List disabled items as well")
flags.BoolVarP(&all, "all", "a", false, "List disabled items as well")
return cmd
}

View file

@ -138,14 +138,12 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item
}
csvwriter.Flush()
default:
return fmt.Errorf("unknown output format '%s'", csConfig.Cscli.Output)
}
return nil
}
func InspectItem(item *cwhub.Item, showMetrics bool) error {
func inspectItem(item *cwhub.Item, showMetrics bool) error {
switch csConfig.Cscli.Output {
case "human", "raw":
enc := yaml.NewEncoder(os.Stdout)

View file

@ -6,6 +6,7 @@ import (
"fmt"
"net/url"
"os"
"slices"
"sort"
"strings"
@ -13,7 +14,6 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"slices"
"github.com/crowdsecurity/go-cs-lib/version"
@ -29,15 +29,27 @@ import (
const LAPIURLPrefix = "v1"
func runLapiStatus(cmd *cobra.Command, args []string) error {
password := strfmt.Password(csConfig.API.Client.Credentials.Password)
apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL)
login := csConfig.API.Client.Credentials.Login
type cliLapi struct {
cfg configGetter
}
func NewCLILapi(cfg configGetter) *cliLapi {
return &cliLapi{
cfg: cfg,
}
}
func (cli *cliLapi) status() error {
cfg := cli.cfg()
password := strfmt.Password(cfg.API.Client.Credentials.Password)
login := cfg.API.Client.Credentials.Login
apiurl, err := url.Parse(cfg.API.Client.Credentials.URL)
if err != nil {
return fmt.Errorf("parsing api url: %w", err)
}
hub, err := require.Hub(csConfig, nil, nil)
hub, err := require.Hub(cfg, nil, nil)
if err != nil {
return err
}
@ -54,13 +66,14 @@ func runLapiStatus(cmd *cobra.Command, args []string) error {
if err != nil {
return fmt.Errorf("init default client: %w", err)
}
t := models.WatcherAuthRequest{
MachineID: &login,
Password: &password,
Scenarios: scenarios,
}
log.Infof("Loaded credentials from %s", csConfig.API.Client.CredentialsFilePath)
log.Infof("Loaded credentials from %s", cfg.API.Client.CredentialsFilePath)
log.Infof("Trying to authenticate with username %s on %s", login, apiurl)
_, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t)
@ -69,26 +82,15 @@ func runLapiStatus(cmd *cobra.Command, args []string) error {
}
log.Infof("You can successfully interact with Local API (LAPI)")
return nil
}
func runLapiRegister(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
func (cli *cliLapi) register(apiURL string, outputFile string, machine string) error {
var err error
apiURL, err := flags.GetString("url")
if err != nil {
return err
}
outputFile, err := flags.GetString("file")
if err != nil {
return err
}
lapiUser, err := flags.GetString("machine")
if err != nil {
return err
}
lapiUser := machine
cfg := cli.cfg()
if lapiUser == "" {
lapiUser, err = generateID("")
@ -96,12 +98,15 @@ func runLapiRegister(cmd *cobra.Command, args []string) error {
return fmt.Errorf("unable to generate machine id: %w", err)
}
}
password := strfmt.Password(generatePassword(passwordLength))
if apiURL == "" {
if csConfig.API.Client == nil || csConfig.API.Client.Credentials == nil || csConfig.API.Client.Credentials.URL == "" {
if cfg.API.Client == nil || cfg.API.Client.Credentials == nil || cfg.API.Client.Credentials.URL == "" {
return fmt.Errorf("no Local API URL. Please provide it in your configuration or with the -u parameter")
}
apiURL = csConfig.API.Client.Credentials.URL
apiURL = cfg.API.Client.Credentials.URL
}
/*URL needs to end with /, but user doesn't care*/
if !strings.HasSuffix(apiURL, "/") {
@ -111,10 +116,12 @@ func runLapiRegister(cmd *cobra.Command, args []string) error {
if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") {
apiURL = "http://" + apiURL
}
apiurl, err := url.Parse(apiURL)
if err != nil {
return fmt.Errorf("parsing api url: %w", err)
}
_, err = apiclient.RegisterClient(&apiclient.Config{
MachineID: lapiUser,
Password: password,
@ -130,138 +137,142 @@ func runLapiRegister(cmd *cobra.Command, args []string) error {
log.Printf("Successfully registered to Local API (LAPI)")
var dumpFile string
if outputFile != "" {
dumpFile = outputFile
} else if csConfig.API.Client.CredentialsFilePath != "" {
dumpFile = csConfig.API.Client.CredentialsFilePath
} else if cfg.API.Client.CredentialsFilePath != "" {
dumpFile = cfg.API.Client.CredentialsFilePath
} else {
dumpFile = ""
}
apiCfg := csconfig.ApiCredentialsCfg{
Login: lapiUser,
Password: password.String(),
URL: apiURL,
}
apiConfigDump, err := yaml.Marshal(apiCfg)
if err != nil {
return fmt.Errorf("unable to marshal api credentials: %w", err)
}
if dumpFile != "" {
err = os.WriteFile(dumpFile, apiConfigDump, 0o600)
if err != nil {
return fmt.Errorf("write api credentials to '%s' failed: %w", dumpFile, err)
}
log.Printf("Local API credentials written to '%s'", dumpFile)
} else {
fmt.Printf("%s\n", string(apiConfigDump))
}
log.Warning(ReloadMessage())
return nil
}
func NewLapiStatusCmd() *cobra.Command {
func (cli *cliLapi) newStatusCmd() *cobra.Command {
cmdLapiStatus := &cobra.Command{
Use: "status",
Short: "Check authentication to Local API (LAPI)",
Args: cobra.MinimumNArgs(0),
DisableAutoGenTag: true,
RunE: runLapiStatus,
RunE: func(cmd *cobra.Command, args []string) error {
return cli.status()
},
}
return cmdLapiStatus
}
func NewLapiRegisterCmd() *cobra.Command {
cmdLapiRegister := &cobra.Command{
func (cli *cliLapi) newRegisterCmd() *cobra.Command {
var (
apiURL string
outputFile string
machine string
)
cmd := &cobra.Command{
Use: "register",
Short: "Register a machine to Local API (LAPI)",
Long: `Register your machine to the Local API (LAPI).
Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`,
Args: cobra.MinimumNArgs(0),
DisableAutoGenTag: true,
RunE: runLapiRegister,
RunE: func(_ *cobra.Command, _ []string) error {
return cli.register(apiURL, outputFile, machine)
},
}
flags := cmdLapiRegister.Flags()
flags.StringP("url", "u", "", "URL of the API (ie. http://127.0.0.1)")
flags.StringP("file", "f", "", "output file destination")
flags.String("machine", "", "Name of the machine to register with")
flags := cmd.Flags()
flags.StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)")
flags.StringVarP(&outputFile, "file", "f", "", "output file destination")
flags.StringVar(&machine, "machine", "", "Name of the machine to register with")
return cmdLapiRegister
return cmd
}
func NewLapiCmd() *cobra.Command {
cmdLapi := &cobra.Command{
func (cli *cliLapi) NewCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "lapi [action]",
Short: "Manage interaction with Local API (LAPI)",
Args: cobra.MinimumNArgs(1),
DisableAutoGenTag: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := csConfig.LoadAPIClient(); err != nil {
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
if err := cli.cfg().LoadAPIClient(); err != nil {
return fmt.Errorf("loading api client: %w", err)
}
return nil
},
}
cmdLapi.AddCommand(NewLapiRegisterCmd())
cmdLapi.AddCommand(NewLapiStatusCmd())
cmdLapi.AddCommand(NewLapiContextCmd())
cmd.AddCommand(cli.newRegisterCmd())
cmd.AddCommand(cli.newStatusCmd())
cmd.AddCommand(cli.newContextCmd())
return cmdLapi
return cmd
}
func AddContext(key string, values []string) error {
func (cli *cliLapi) addContext(key string, values []string) error {
cfg := cli.cfg()
if err := alertcontext.ValidateContextExpr(key, values); err != nil {
return fmt.Errorf("invalid context configuration :%s", err)
return fmt.Errorf("invalid context configuration: %w", err)
}
if _, ok := csConfig.Crowdsec.ContextToSend[key]; !ok {
csConfig.Crowdsec.ContextToSend[key] = make([]string, 0)
if _, ok := cfg.Crowdsec.ContextToSend[key]; !ok {
cfg.Crowdsec.ContextToSend[key] = make([]string, 0)
log.Infof("key '%s' added", key)
}
data := csConfig.Crowdsec.ContextToSend[key]
data := cfg.Crowdsec.ContextToSend[key]
for _, val := range values {
if !slices.Contains(data, val) {
log.Infof("value '%s' added to key '%s'", val, key)
data = append(data, val)
}
csConfig.Crowdsec.ContextToSend[key] = data
cfg.Crowdsec.ContextToSend[key] = data
}
if err := csConfig.Crowdsec.DumpContextConfigFile(); err != nil {
if err := cfg.Crowdsec.DumpContextConfigFile(); err != nil {
return err
}
return nil
}
func NewLapiContextCmd() *cobra.Command {
cmdContext := &cobra.Command{
Use: "context [command]",
Short: "Manage context to send with alerts",
DisableAutoGenTag: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := csConfig.LoadCrowdsec(); err != nil {
fileNotFoundMessage := fmt.Sprintf("failed to open context file: open %s: no such file or directory", csConfig.Crowdsec.ConsoleContextPath)
if err.Error() != fileNotFoundMessage {
return fmt.Errorf("unable to load CrowdSec agent configuration: %w", err)
}
}
if csConfig.DisableAgent {
return errors.New("agent is disabled and lapi context can only be used on the agent")
}
func (cli *cliLapi) newContextAddCmd() *cobra.Command {
var (
keyToAdd string
valuesToAdd []string
)
return nil
},
Run: func(cmd *cobra.Command, args []string) {
printHelp(cmd)
},
}
var keyToAdd string
var valuesToAdd []string
cmdContextAdd := &cobra.Command{
cmd := &cobra.Command{
Use: "add",
Short: "Add context to send with alerts. You must specify the output key with the expr value you want",
Example: `cscli lapi context add --key source_ip --value evt.Meta.source_ip
@ -269,18 +280,18 @@ cscli lapi context add --key file_source --value evt.Line.Src
cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
hub, err := require.Hub(csConfig, nil, nil)
RunE: func(_ *cobra.Command, _ []string) error {
hub, err := require.Hub(cli.cfg(), nil, nil)
if err != nil {
return err
}
if err = alertcontext.LoadConsoleContext(csConfig, hub); err != nil {
if err = alertcontext.LoadConsoleContext(cli.cfg(), hub); err != nil {
return fmt.Errorf("while loading context: %w", err)
}
if keyToAdd != "" {
if err := AddContext(keyToAdd, valuesToAdd); err != nil {
if err := cli.addContext(keyToAdd, valuesToAdd); err != nil {
return err
}
return nil
@ -290,7 +301,7 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user
keySlice := strings.Split(v, ".")
key := keySlice[len(keySlice)-1]
value := []string{v}
if err := AddContext(key, value); err != nil {
if err := cli.addContext(key, value); err != nil {
return err
}
}
@ -298,31 +309,37 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user
return nil
},
}
cmdContextAdd.Flags().StringVarP(&keyToAdd, "key", "k", "", "The key of the different values to send")
cmdContextAdd.Flags().StringSliceVar(&valuesToAdd, "value", []string{}, "The expr fields to associate with the key")
cmdContextAdd.MarkFlagRequired("value")
cmdContext.AddCommand(cmdContextAdd)
cmdContextStatus := &cobra.Command{
flags := cmd.Flags()
flags.StringVarP(&keyToAdd, "key", "k", "", "The key of the different values to send")
flags.StringSliceVar(&valuesToAdd, "value", []string{}, "The expr fields to associate with the key")
cmd.MarkFlagRequired("value")
return cmd
}
func (cli *cliLapi) newContextStatusCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "status",
Short: "List context to send with alerts",
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
hub, err := require.Hub(csConfig, nil, nil)
RunE: func(_ *cobra.Command, _ []string) error {
cfg := cli.cfg()
hub, err := require.Hub(cfg, nil, nil)
if err != nil {
return err
}
if err = alertcontext.LoadConsoleContext(csConfig, hub); err != nil {
if err = alertcontext.LoadConsoleContext(cfg, hub); err != nil {
return fmt.Errorf("while loading context: %w", err)
}
if len(csConfig.Crowdsec.ContextToSend) == 0 {
if len(cfg.Crowdsec.ContextToSend) == 0 {
fmt.Println("No context found on this agent. You can use 'cscli lapi context add' to add context to your alerts.")
return nil
}
dump, err := yaml.Marshal(csConfig.Crowdsec.ContextToSend)
dump, err := yaml.Marshal(cfg.Crowdsec.ContextToSend)
if err != nil {
return fmt.Errorf("unable to show context status: %w", err)
}
@ -332,10 +349,14 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user
return nil
},
}
cmdContext.AddCommand(cmdContextStatus)
return cmd
}
func (cli *cliLapi) newContextDetectCmd() *cobra.Command {
var detectAll bool
cmdContextDetect := &cobra.Command{
cmd := &cobra.Command{
Use: "detect",
Short: "Detect available fields from the installed parsers",
Example: `cscli lapi context detect --all
@ -343,6 +364,7 @@ cscli lapi context detect crowdsecurity/sshd-logs
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
cfg := cli.cfg()
if !detectAll && len(args) == 0 {
log.Infof("Please provide parsers to detect or --all flag.")
printHelp(cmd)
@ -355,13 +377,13 @@ cscli lapi context detect crowdsecurity/sshd-logs
return fmt.Errorf("failed to init expr helpers: %w", err)
}
hub, err := require.Hub(csConfig, nil, nil)
hub, err := require.Hub(cfg, nil, nil)
if err != nil {
return err
}
csParsers := parser.NewParsers(hub)
if csParsers, err = parser.LoadParsers(csConfig, csParsers); err != nil {
if csParsers, err = parser.LoadParsers(cfg, csParsers); err != nil {
return fmt.Errorf("unable to load parsers: %w", err)
}
@ -418,47 +440,85 @@ cscli lapi context detect crowdsecurity/sshd-logs
return nil
},
}
cmdContextDetect.Flags().BoolVarP(&detectAll, "all", "a", false, "Detect evt field for all installed parser")
cmdContext.AddCommand(cmdContextDetect)
cmd.Flags().BoolVarP(&detectAll, "all", "a", false, "Detect evt field for all installed parser")
cmdContextDelete := &cobra.Command{
return cmd
}
func (cli *cliLapi) newContextDeleteCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "delete",
DisableAutoGenTag: true,
RunE: func(_ *cobra.Command, _ []string) error {
filePath := csConfig.Crowdsec.ConsoleContextPath
filePath := cli.cfg().Crowdsec.ConsoleContextPath
if filePath == "" {
filePath = "the context file"
}
fmt.Printf("Command \"delete\" is deprecated, please manually edit %s.", filePath)
fmt.Printf("Command 'delete' is deprecated, please manually edit %s.", filePath)
return nil
},
}
cmdContext.AddCommand(cmdContextDelete)
return cmdContext
return cmd
}
func detectStaticField(GrokStatics []parser.ExtraField) []string {
func (cli *cliLapi) newContextCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "context [command]",
Short: "Manage context to send with alerts",
DisableAutoGenTag: true,
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
cfg := cli.cfg()
if err := cfg.LoadCrowdsec(); err != nil {
fileNotFoundMessage := fmt.Sprintf("failed to open context file: open %s: no such file or directory", cfg.Crowdsec.ConsoleContextPath)
if err.Error() != fileNotFoundMessage {
return fmt.Errorf("unable to load CrowdSec agent configuration: %w", err)
}
}
if cfg.DisableAgent {
return errors.New("agent is disabled and lapi context can only be used on the agent")
}
return nil
},
Run: func(cmd *cobra.Command, _ []string) {
printHelp(cmd)
},
}
cmd.AddCommand(cli.newContextAddCmd())
cmd.AddCommand(cli.newContextStatusCmd())
cmd.AddCommand(cli.newContextDetectCmd())
cmd.AddCommand(cli.newContextDeleteCmd())
return cmd
}
func detectStaticField(grokStatics []parser.ExtraField) []string {
ret := make([]string, 0)
for _, static := range GrokStatics {
for _, static := range grokStatics {
if static.Parsed != "" {
fieldName := fmt.Sprintf("evt.Parsed.%s", static.Parsed)
if !slices.Contains(ret, fieldName) {
ret = append(ret, fieldName)
}
}
if static.Meta != "" {
fieldName := fmt.Sprintf("evt.Meta.%s", static.Meta)
if !slices.Contains(ret, fieldName) {
ret = append(ret, fieldName)
}
}
if static.TargetByName != "" {
fieldName := static.TargetByName
if !strings.HasPrefix(fieldName, "evt.") {
fieldName = "evt." + fieldName
}
if !slices.Contains(ret, fieldName) {
ret = append(ret, fieldName)
}
@ -526,6 +586,7 @@ func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string {
}
}
}
if subnode.Grok.RegexpName != "" {
grokCompiled, err := parserCTX.Grok.Get(subnode.Grok.RegexpName)
if err == nil {

View file

@ -5,9 +5,9 @@ import (
"encoding/csv"
"encoding/json"
"fmt"
"io"
"math/big"
"os"
"slices"
"strings"
"time"
@ -18,7 +18,6 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
"slices"
"github.com/crowdsecurity/machineid"
@ -46,6 +45,7 @@ func generatePassword(length int) string {
if err != nil {
log.Fatalf("failed getting data from prng for password generation : %s", err)
}
buf[i] = charset[rInt.Int64()]
}
@ -60,12 +60,14 @@ func generateIDPrefix() (string, error) {
if err == nil {
return prefix, nil
}
log.Debugf("failed to get machine-id with usual files: %s", err)
bID, err := uuid.NewRandom()
if err == nil {
return bID.String(), nil
}
return "", fmt.Errorf("generating machine id: %w", err)
}
@ -76,11 +78,14 @@ func generateID(prefix string) (string, error) {
if prefix == "" {
prefix, err = generateIDPrefix()
}
if err != nil {
return "", err
}
prefix = strings.ReplaceAll(prefix, "-", "")[:32]
suffix := generatePassword(16)
return prefix + suffix, nil
}
@ -101,53 +106,18 @@ func getLastHeartbeat(m *ent.Machine) (string, bool) {
return hb, true
}
func getAgents(out io.Writer, dbClient *database.Client) error {
machines, err := dbClient.ListMachines()
if err != nil {
return fmt.Errorf("unable to list machines: %s", err)
}
switch csConfig.Cscli.Output {
case "human":
getAgentsTable(out, machines)
case "json":
enc := json.NewEncoder(out)
enc.SetIndent("", " ")
if err := enc.Encode(machines); err != nil {
return fmt.Errorf("failed to marshal")
}
return nil
case "raw":
csvwriter := csv.NewWriter(out)
err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat"})
if err != nil {
return fmt.Errorf("failed to write header: %s", err)
}
for _, m := range machines {
validated := "false"
if m.IsValidated {
validated = "true"
}
hb, _ := getLastHeartbeat(m)
err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb})
if err != nil {
return fmt.Errorf("failed to write raw output: %w", err)
}
}
csvwriter.Flush()
default:
return fmt.Errorf("unknown output '%s'", csConfig.Cscli.Output)
}
return nil
type cliMachines struct {
db *database.Client
cfg configGetter
}
type cliMachines struct{}
func NewCLIMachines() *cliMachines {
return &cliMachines{}
func NewCLIMachines(cfg configGetter) *cliMachines {
return &cliMachines{
cfg: cfg,
}
}
func (cli cliMachines) NewCommand() *cobra.Command {
func (cli *cliMachines) NewCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "machines [action]",
Short: "Manage local API machines [requires local API]",
@ -159,27 +129,75 @@ Note: This command requires database direct access, so is intended to be run on
Aliases: []string{"machine"},
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
var err error
if err = require.LAPI(csConfig); err != nil {
if err = require.LAPI(cli.cfg()); err != nil {
return err
}
dbClient, err = database.NewClient(csConfig.DbConfig)
cli.db, err = database.NewClient(cli.cfg().DbConfig)
if err != nil {
return fmt.Errorf("unable to create new database client: %s", err)
}
return nil
},
}
cmd.AddCommand(cli.NewListCmd())
cmd.AddCommand(cli.NewAddCmd())
cmd.AddCommand(cli.NewDeleteCmd())
cmd.AddCommand(cli.NewValidateCmd())
cmd.AddCommand(cli.NewPruneCmd())
cmd.AddCommand(cli.newListCmd())
cmd.AddCommand(cli.newAddCmd())
cmd.AddCommand(cli.newDeleteCmd())
cmd.AddCommand(cli.newValidateCmd())
cmd.AddCommand(cli.newPruneCmd())
return cmd
}
func (cli cliMachines) NewListCmd() *cobra.Command {
func (cli *cliMachines) list() error {
out := color.Output
machines, err := cli.db.ListMachines()
if err != nil {
return fmt.Errorf("unable to list machines: %s", err)
}
switch cli.cfg().Cscli.Output {
case "human":
getAgentsTable(out, machines)
case "json":
enc := json.NewEncoder(out)
enc.SetIndent("", " ")
if err := enc.Encode(machines); err != nil {
return fmt.Errorf("failed to marshal")
}
return nil
case "raw":
csvwriter := csv.NewWriter(out)
err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat"})
if err != nil {
return fmt.Errorf("failed to write header: %s", err)
}
for _, m := range machines {
validated := "false"
if m.IsValidated {
validated = "true"
}
hb, _ := getLastHeartbeat(m)
if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb}); err != nil {
return fmt.Errorf("failed to write raw output: %w", err)
}
}
csvwriter.Flush()
}
return nil
}
func (cli *cliMachines) newListCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "list",
Short: "list all machines in the database",
@ -188,84 +206,60 @@ func (cli cliMachines) NewListCmd() *cobra.Command {
Args: cobra.NoArgs,
DisableAutoGenTag: true,
RunE: func(_ *cobra.Command, _ []string) error {
err := getAgents(color.Output, dbClient)
if err != nil {
return fmt.Errorf("unable to list machines: %s", err)
}
return nil
return cli.list()
},
}
return cmd
}
func (cli cliMachines) NewAddCmd() *cobra.Command {
func (cli *cliMachines) newAddCmd() *cobra.Command {
var (
password MachinePassword
dumpFile string
apiURL string
interactive bool
autoAdd bool
force bool
)
cmd := &cobra.Command{
Use: "add",
Short: "add a single machine to the database",
DisableAutoGenTag: true,
Long: `Register a new machine in the database. cscli should be on the same machine as LAPI.`,
Example: `
cscli machines add --auto
Example: `cscli machines add --auto
cscli machines add MyTestMachine --auto
cscli machines add MyTestMachine --password MyPassword
`,
RunE: cli.add,
cscli machines add -f- --auto > /tmp/mycreds.yaml`,
RunE: func(_ *cobra.Command, args []string) error {
return cli.add(args, string(password), dumpFile, apiURL, interactive, autoAdd, force)
},
}
flags := cmd.Flags()
flags.StringP("password", "p", "", "machine password to login to the API")
flags.StringP("file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")+")")
flags.StringP("url", "u", "", "URL of the local API")
flags.BoolP("interactive", "i", false, "interfactive mode to enter the password")
flags.BoolP("auto", "a", false, "automatically generate password (and username if not provided)")
flags.Bool("force", false, "will force add the machine if it already exist")
flags.VarP(&password, "password", "p", "machine password to login to the API")
flags.StringVarP(&dumpFile, "file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")+")")
flags.StringVarP(&apiURL, "url", "u", "", "URL of the local API")
flags.BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password")
flags.BoolVarP(&autoAdd, "auto", "a", false, "automatically generate password (and username if not provided)")
flags.BoolVar(&force, "force", false, "will force add the machine if it already exist")
return cmd
}
func (cli cliMachines) add(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
machinePassword, err := flags.GetString("password")
if err != nil {
return err
}
dumpFile, err := flags.GetString("file")
if err != nil {
return err
}
apiURL, err := flags.GetString("url")
if err != nil {
return err
}
interactive, err := flags.GetBool("interactive")
if err != nil {
return err
}
autoAdd, err := flags.GetBool("auto")
if err != nil {
return err
}
force, err := flags.GetBool("force")
if err != nil {
return err
}
var machineID string
func (cli *cliMachines) add(args []string, machinePassword string, dumpFile string, apiURL string, interactive bool, autoAdd bool, force bool) error {
var (
err error
machineID string
)
// create machineID if not specified by user
if len(args) == 0 {
if !autoAdd {
printHelp(cmd)
return nil
return fmt.Errorf("please specify a machine name to add, or use --auto")
}
machineID, err = generateID("")
if err != nil {
return fmt.Errorf("unable to generate machine id: %s", err)
@ -274,15 +268,18 @@ func (cli cliMachines) add(cmd *cobra.Command, args []string) error {
machineID = args[0]
}
clientCfg := cli.cfg().API.Client
serverCfg := cli.cfg().API.Server
/*check if file already exists*/
if dumpFile == "" && csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" {
credFile := csConfig.API.Client.CredentialsFilePath
if dumpFile == "" && clientCfg != nil && clientCfg.CredentialsFilePath != "" {
credFile := clientCfg.CredentialsFilePath
// use the default only if the file does not exist
_, err = os.Stat(credFile)
switch {
case os.IsNotExist(err) || force:
dumpFile = csConfig.API.Client.CredentialsFilePath
dumpFile = credFile
case err != nil:
return fmt.Errorf("unable to stat '%s': %s", credFile, err)
default:
@ -299,52 +296,89 @@ func (cli cliMachines) add(cmd *cobra.Command, args []string) error {
if !autoAdd {
return fmt.Errorf("please specify a password with --password or use --auto")
}
machinePassword = generatePassword(passwordLength)
} else if machinePassword == "" && interactive {
qs := &survey.Password{
Message: "Please provide a password for the machine",
Message: "Please provide a password for the machine:",
}
survey.AskOne(qs, &machinePassword)
}
password := strfmt.Password(machinePassword)
_, err = dbClient.CreateMachine(&machineID, &password, "", true, force, types.PasswordAuthType)
_, err = cli.db.CreateMachine(&machineID, &password, "", true, force, types.PasswordAuthType)
if err != nil {
return fmt.Errorf("unable to create machine: %s", err)
}
fmt.Printf("Machine '%s' successfully added to the local API.\n", machineID)
fmt.Fprintf(os.Stderr, "Machine '%s' successfully added to the local API.\n", machineID)
if apiURL == "" {
if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" {
apiURL = csConfig.API.Client.Credentials.URL
} else if csConfig.API.Server != nil && csConfig.API.Server.ListenURI != "" {
apiURL = "http://" + csConfig.API.Server.ListenURI
if clientCfg != nil && clientCfg.Credentials != nil && clientCfg.Credentials.URL != "" {
apiURL = clientCfg.Credentials.URL
} else if serverCfg != nil && serverCfg.ListenURI != "" {
apiURL = "http://" + serverCfg.ListenURI
} else {
return fmt.Errorf("unable to dump an api URL. Please provide it in your configuration or with the -u parameter")
}
}
apiCfg := csconfig.ApiCredentialsCfg{
Login: machineID,
Password: password.String(),
URL: apiURL,
}
apiConfigDump, err := yaml.Marshal(apiCfg)
if err != nil {
return fmt.Errorf("unable to marshal api credentials: %s", err)
}
if dumpFile != "" && dumpFile != "-" {
err = os.WriteFile(dumpFile, apiConfigDump, 0o600)
if err != nil {
if err = os.WriteFile(dumpFile, apiConfigDump, 0o600); err != nil {
return fmt.Errorf("write api credentials in '%s' failed: %s", dumpFile, err)
}
fmt.Printf("API credentials written to '%s'.\n", dumpFile)
fmt.Fprintf(os.Stderr, "API credentials written to '%s'.\n", dumpFile)
} else {
fmt.Printf("%s\n", string(apiConfigDump))
fmt.Print(string(apiConfigDump))
}
return nil
}
func (cli cliMachines) NewDeleteCmd() *cobra.Command {
func (cli *cliMachines) deleteValid(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
machines, err := cli.db.ListMachines()
if err != nil {
cobra.CompError("unable to list machines " + err.Error())
}
ret := []string{}
for _, machine := range machines {
if strings.Contains(machine.MachineId, toComplete) && !slices.Contains(args, machine.MachineId) {
ret = append(ret, machine.MachineId)
}
}
return ret, cobra.ShellCompDirectiveNoFileComp
}
func (cli *cliMachines) delete(machines []string) error {
for _, machineID := range machines {
if err := cli.db.DeleteWatcher(machineID); err != nil {
log.Errorf("unable to delete machine '%s': %s", machineID, err)
return nil
}
log.Infof("machine '%s' deleted successfully", machineID)
}
return nil
}
func (cli *cliMachines) newDeleteCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "delete [machine_name]...",
Short: "delete machine(s) by name",
@ -352,40 +386,75 @@ func (cli cliMachines) NewDeleteCmd() *cobra.Command {
Args: cobra.MinimumNArgs(1),
Aliases: []string{"remove"},
DisableAutoGenTag: true,
ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
machines, err := dbClient.ListMachines()
if err != nil {
cobra.CompError("unable to list machines " + err.Error())
}
ret := make([]string, 0)
for _, machine := range machines {
if strings.Contains(machine.MachineId, toComplete) && !slices.Contains(args, machine.MachineId) {
ret = append(ret, machine.MachineId)
}
}
return ret, cobra.ShellCompDirectiveNoFileComp
ValidArgsFunction: cli.deleteValid,
RunE: func(_ *cobra.Command, args []string) error {
return cli.delete(args)
},
RunE: cli.delete,
}
return cmd
}
func (cli cliMachines) delete(_ *cobra.Command, args []string) error {
for _, machineID := range args {
err := dbClient.DeleteWatcher(machineID)
if err != nil {
log.Errorf("unable to delete machine '%s': %s", machineID, err)
func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force bool) error {
if duration < 2*time.Minute && !notValidOnly {
if yes, err := askYesNo(
"The duration you provided is less than 2 minutes. " +
"This can break installations if the machines are only temporarily disconnected. Continue?", false); err != nil {
return err
} else if !yes {
fmt.Println("User aborted prune. No changes were made.")
return nil
}
log.Infof("machine '%s' deleted successfully", machineID)
}
machines := []*ent.Machine{}
if pending, err := cli.db.QueryPendingMachine(); err == nil {
machines = append(machines, pending...)
}
if !notValidOnly {
if pending, err := cli.db.QueryLastValidatedHeartbeatLT(time.Now().UTC().Add(duration)); err == nil {
machines = append(machines, pending...)
}
}
if len(machines) == 0 {
fmt.Println("no machines to prune")
return nil
}
getAgentsTable(color.Output, machines)
if !force {
if yes, err := askYesNo(
"You are about to PERMANENTLY remove the above machines from the database. " +
"These will NOT be recoverable. Continue?", false); err != nil {
return err
} else if !yes {
fmt.Println("User aborted prune. No changes were made.")
return nil
}
}
deleted, err := cli.db.BulkDeleteWatchers(machines)
if err != nil {
return fmt.Errorf("unable to prune machines: %s", err)
}
fmt.Fprintf(os.Stderr, "successfully delete %d machines\n", deleted)
return nil
}
func (cli cliMachines) NewPruneCmd() *cobra.Command {
var parsedDuration time.Duration
func (cli *cliMachines) newPruneCmd() *cobra.Command {
var (
duration time.Duration
notValidOnly bool
force bool
)
const defaultDuration = 10 * time.Minute
cmd := &cobra.Command{
Use: "prune",
Short: "prune multiple machines from the database",
@ -395,76 +464,30 @@ cscli machines prune --duration 1h
cscli machines prune --not-validated-only --force`,
Args: cobra.NoArgs,
DisableAutoGenTag: true,
PreRunE: func(cmd *cobra.Command, _ []string) error {
dur, _ := cmd.Flags().GetString("duration")
var err error
parsedDuration, err = time.ParseDuration(fmt.Sprintf("-%s", dur))
if err != nil {
return fmt.Errorf("unable to parse duration '%s': %s", dur, err)
}
return nil
},
RunE: func(cmd *cobra.Command, _ []string) error {
notValidOnly, _ := cmd.Flags().GetBool("not-validated-only")
force, _ := cmd.Flags().GetBool("force")
if parsedDuration >= 0-60*time.Second && !notValidOnly {
var answer bool
prompt := &survey.Confirm{
Message: "The duration you provided is less than or equal 60 seconds this can break installations do you want to continue ?",
Default: false,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask about prune check: %s", err)
}
if !answer {
fmt.Println("user aborted prune no changes were made")
return nil
}
}
machines := make([]*ent.Machine, 0)
if pending, err := dbClient.QueryPendingMachine(); err == nil {
machines = append(machines, pending...)
}
if !notValidOnly {
if pending, err := dbClient.QueryLastValidatedHeartbeatLT(time.Now().UTC().Add(parsedDuration)); err == nil {
machines = append(machines, pending...)
}
}
if len(machines) == 0 {
fmt.Println("no machines to prune")
return nil
}
getAgentsTable(color.Output, machines)
if !force {
var answer bool
prompt := &survey.Confirm{
Message: "You are about to PERMANENTLY remove the above machines from the database these will NOT be recoverable, continue ?",
Default: false,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask about prune check: %s", err)
}
if !answer {
fmt.Println("user aborted prune no changes were made")
return nil
}
}
nbDeleted, err := dbClient.BulkDeleteWatchers(machines)
if err != nil {
return fmt.Errorf("unable to prune machines: %s", err)
}
fmt.Printf("successfully delete %d machines\n", nbDeleted)
return nil
RunE: func(_ *cobra.Command, _ []string) error {
return cli.prune(duration, notValidOnly, force)
},
}
cmd.Flags().StringP("duration", "d", "10m", "duration of time since validated machine last heartbeat")
cmd.Flags().Bool("not-validated-only", false, "only prune machines that are not validated")
cmd.Flags().Bool("force", false, "force prune without asking for confirmation")
flags := cmd.Flags()
flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since validated machine last heartbeat")
flags.BoolVar(&notValidOnly, "not-validated-only", false, "only prune machines that are not validated")
flags.BoolVar(&force, "force", false, "force prune without asking for confirmation")
return cmd
}
func (cli cliMachines) NewValidateCmd() *cobra.Command {
func (cli *cliMachines) validate(machineID string) error {
if err := cli.db.ValidateMachine(machineID); err != nil {
return fmt.Errorf("unable to validate machine '%s': %s", machineID, err)
}
log.Infof("machine '%s' validated successfully", machineID)
return nil
}
func (cli *cliMachines) newValidateCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "validate",
Short: "validate a machine to access the local API",
@ -472,14 +495,8 @@ func (cli cliMachines) NewValidateCmd() *cobra.Command {
Example: `cscli machines validate "machine_name"`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
RunE: func(_ *cobra.Command, args []string) error {
machineID := args[0]
if err := dbClient.ValidateMachine(machineID); err != nil {
return fmt.Errorf("unable to validate machine '%s': %s", machineID, err)
}
log.Infof("machine '%s' validated successfully", machineID)
return nil
RunE: func(cmd *cobra.Command, args []string) error {
return cli.validate(args[0])
},
}

View file

@ -15,43 +15,88 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/fflag"
)
var trace_lvl, dbg_lvl, nfo_lvl, wrn_lvl, err_lvl bool
var ConfigFilePath string
var csConfig *csconfig.Config
var dbClient *database.Client
var OutputFormat string
var OutputColor string
type configGetter func() *csconfig.Config
var mergedConfig string
// flagBranch overrides the value in csConfig.Cscli.HubBranch
var flagBranch = ""
type cliRoot struct {
logTrace bool
logDebug bool
logInfo bool
logWarn bool
logErr bool
outputColor string
outputFormat string
// flagBranch overrides the value in csConfig.Cscli.HubBranch
flagBranch string
}
func initConfig() {
var err error
func newCliRoot() *cliRoot {
return &cliRoot{}
}
if trace_lvl {
log.SetLevel(log.TraceLevel)
} else if dbg_lvl {
log.SetLevel(log.DebugLevel)
} else if nfo_lvl {
log.SetLevel(log.InfoLevel)
} else if wrn_lvl {
log.SetLevel(log.WarnLevel)
} else if err_lvl {
log.SetLevel(log.ErrorLevel)
// cfg() is a helper function to get the configuration loaded from config.yaml,
// we pass it to subcommands because the file is not read until the Execute() call
func (cli *cliRoot) cfg() *csconfig.Config {
return csConfig
}
// wantedLogLevel returns the log level requested in the command line flags.
func (cli *cliRoot) wantedLogLevel() log.Level {
switch {
case cli.logTrace:
return log.TraceLevel
case cli.logDebug:
return log.DebugLevel
case cli.logInfo:
return log.InfoLevel
case cli.logWarn:
return log.WarnLevel
case cli.logErr:
return log.ErrorLevel
default:
return log.InfoLevel
}
}
// loadConfigFor loads the configuration file for the given sub-command.
// If the sub-command does not need it, it returns a default configuration.
func loadConfigFor(command string) (*csconfig.Config, string, error) {
noNeedConfig := []string{
"doc",
"help",
"completion",
"version",
"hubtest",
}
if !slices.Contains(NoNeedConfig, os.Args[1]) {
if !slices.Contains(noNeedConfig, command) {
log.Debugf("Using %s as configuration file", ConfigFilePath)
csConfig, mergedConfig, err = csconfig.NewConfig(ConfigFilePath, false, false, true)
config, merged, err := csconfig.NewConfig(ConfigFilePath, false, false, true)
if err != nil {
log.Fatal(err)
return nil, "", err
}
} else {
csConfig = csconfig.NewDefaultConfig()
return config, merged, nil
}
return csconfig.NewDefaultConfig(), "", nil
}
// initialize is called before the subcommand is executed.
func (cli *cliRoot) initialize() {
var err error
log.SetLevel(cli.wantedLogLevel())
csConfig, mergedConfig, err = loadConfigFor(os.Args[1])
if err != nil {
log.Fatal(err)
}
// recap of the enabled feature flags, because logging
@ -60,20 +105,22 @@ func initConfig() {
log.Debugf("Enabled feature flags: %s", fflist)
}
if flagBranch != "" {
csConfig.Cscli.HubBranch = flagBranch
if cli.flagBranch != "" {
csConfig.Cscli.HubBranch = cli.flagBranch
}
if OutputFormat != "" {
csConfig.Cscli.Output = OutputFormat
if OutputFormat != "json" && OutputFormat != "raw" && OutputFormat != "human" {
log.Fatalf("output format %s unknown", OutputFormat)
}
if cli.outputFormat != "" {
csConfig.Cscli.Output = cli.outputFormat
}
if csConfig.Cscli.Output == "" {
csConfig.Cscli.Output = "human"
}
if csConfig.Cscli.Output != "human" && csConfig.Cscli.Output != "json" && csConfig.Cscli.Output != "raw" {
log.Fatalf("output format '%s' not supported: must be one of human, json, raw", csConfig.Cscli.Output)
}
if csConfig.Cscli.Output == "json" {
log.SetFormatter(&log.JSONFormatter{})
log.SetLevel(log.ErrorLevel)
@ -81,11 +128,11 @@ func initConfig() {
log.SetLevel(log.ErrorLevel)
}
if OutputColor != "" {
csConfig.Cscli.Color = OutputColor
if cli.outputColor != "" {
csConfig.Cscli.Color = cli.outputColor
if OutputColor != "yes" && OutputColor != "no" && OutputColor != "auto" {
log.Fatalf("output color %s unknown", OutputColor)
if cli.outputColor != "yes" && cli.outputColor != "no" && cli.outputColor != "auto" {
log.Fatalf("output color %s unknown", cli.outputColor)
}
}
}
@ -98,15 +145,25 @@ var validArgs = []string{
"postoverflows", "scenarios", "simulation", "support", "version",
}
var NoNeedConfig = []string{
"doc",
"help",
"completion",
"version",
"hubtest",
func (cli *cliRoot) colorize(cmd *cobra.Command) {
cc.Init(&cc.Config{
RootCmd: cmd,
Headings: cc.Yellow,
Commands: cc.Green + cc.Bold,
CmdShortDescr: cc.Cyan,
Example: cc.Italic,
ExecName: cc.Bold,
Aliases: cc.Bold + cc.Italic,
FlagsDataType: cc.White,
Flags: cc.Green,
FlagsDescr: cc.Cyan,
NoExtraNewlines: true,
NoBottomNewline: true,
})
cmd.SetOut(color.Output)
}
func main() {
func (cli *cliRoot) NewCommand() *cobra.Command {
// set the formatter asap and worry about level later
logFormatter := &log.TextFormatter{TimestampFormat: time.RFC3339, FullTimestamp: true}
log.SetFormatter(logFormatter)
@ -131,31 +188,25 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall
/*TBD examples*/
}
cc.Init(&cc.Config{
RootCmd: cmd,
Headings: cc.Yellow,
Commands: cc.Green + cc.Bold,
CmdShortDescr: cc.Cyan,
Example: cc.Italic,
ExecName: cc.Bold,
Aliases: cc.Bold + cc.Italic,
FlagsDataType: cc.White,
Flags: cc.Green,
FlagsDescr: cc.Cyan,
})
cmd.SetOut(color.Output)
cli.colorize(cmd)
cmd.PersistentFlags().StringVarP(&ConfigFilePath, "config", "c", csconfig.DefaultConfigPath("config.yaml"), "path to crowdsec config file")
cmd.PersistentFlags().StringVarP(&OutputFormat, "output", "o", "", "Output format: human, json, raw")
cmd.PersistentFlags().StringVarP(&OutputColor, "color", "", "auto", "Output color: yes, no, auto")
cmd.PersistentFlags().BoolVar(&dbg_lvl, "debug", false, "Set logging to debug")
cmd.PersistentFlags().BoolVar(&nfo_lvl, "info", false, "Set logging to info")
cmd.PersistentFlags().BoolVar(&wrn_lvl, "warning", false, "Set logging to warning")
cmd.PersistentFlags().BoolVar(&err_lvl, "error", false, "Set logging to error")
cmd.PersistentFlags().BoolVar(&trace_lvl, "trace", false, "Set logging to trace")
/*don't sort flags so we can enforce order*/
cmd.Flags().SortFlags = false
cmd.PersistentFlags().StringVar(&flagBranch, "branch", "", "Override hub branch on github")
if err := cmd.PersistentFlags().MarkHidden("branch"); err != nil {
pflags := cmd.PersistentFlags()
pflags.SortFlags = false
pflags.StringVarP(&ConfigFilePath, "config", "c", csconfig.DefaultConfigPath("config.yaml"), "path to crowdsec config file")
pflags.StringVarP(&cli.outputFormat, "output", "o", "", "Output format: human, json, raw")
pflags.StringVarP(&cli.outputColor, "color", "", "auto", "Output color: yes, no, auto")
pflags.BoolVar(&cli.logDebug, "debug", false, "Set logging to debug")
pflags.BoolVar(&cli.logInfo, "info", false, "Set logging to info")
pflags.BoolVar(&cli.logWarn, "warning", false, "Set logging to warning")
pflags.BoolVar(&cli.logErr, "error", false, "Set logging to error")
pflags.BoolVar(&cli.logTrace, "trace", false, "Set logging to trace")
pflags.StringVar(&cli.flagBranch, "branch", "", "Override hub branch on github")
if err := pflags.MarkHidden("branch"); err != nil {
log.Fatalf("failed to hide flag: %s", err)
}
@ -175,33 +226,29 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall
}
if len(os.Args) > 1 {
cobra.OnInitialize(initConfig)
cobra.OnInitialize(cli.initialize)
}
/*don't sort flags so we can enforce order*/
cmd.Flags().SortFlags = false
cmd.PersistentFlags().SortFlags = false
cmd.AddCommand(NewCLIDoc().NewCommand(cmd))
cmd.AddCommand(NewCLIVersion().NewCommand())
cmd.AddCommand(NewConfigCmd())
cmd.AddCommand(NewCLIHub().NewCommand())
cmd.AddCommand(NewMetricsCmd())
cmd.AddCommand(NewCLIDashboard().NewCommand())
cmd.AddCommand(NewCLIDecisions().NewCommand())
cmd.AddCommand(NewCLIAlerts().NewCommand())
cmd.AddCommand(NewCLISimulation().NewCommand())
cmd.AddCommand(NewCLIBouncers().NewCommand())
cmd.AddCommand(NewCLIMachines().NewCommand())
cmd.AddCommand(NewCLIHub(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIMetrics(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand())
cmd.AddCommand(NewCLISimulation(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand())
cmd.AddCommand(NewCLICapi().NewCommand())
cmd.AddCommand(NewLapiCmd())
cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand())
cmd.AddCommand(NewCompletionCmd())
cmd.AddCommand(NewConsoleCmd())
cmd.AddCommand(NewCLIExplain().NewCommand())
cmd.AddCommand(NewCLIConsole(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIHubTest().NewCommand())
cmd.AddCommand(NewCLINotifications().NewCommand())
cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand())
cmd.AddCommand(NewCLISupport().NewCommand())
cmd.AddCommand(NewCLIPapi().NewCommand())
cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand())
cmd.AddCommand(NewCLICollection().NewCommand())
cmd.AddCommand(NewCLIParser().NewCommand())
cmd.AddCommand(NewCLIScenario().NewCommand())
@ -214,10 +261,11 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall
cmd.AddCommand(NewSetupCmd())
}
if fflag.PapiClient.IsEnabled() {
cmd.AddCommand(NewCLIPapi().NewCommand())
}
return cmd
}
func main() {
cmd := newCliRoot().NewCommand()
if err := cmd.Execute(); err != nil {
log.Fatal(err)
}

View file

@ -2,6 +2,7 @@ package main
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@ -16,11 +17,63 @@ import (
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
"github.com/crowdsecurity/go-cs-lib/maptools"
"github.com/crowdsecurity/go-cs-lib/trace"
)
// FormatPrometheusMetrics is a complete rip from prom2json
func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error {
type (
statAcquis map[string]map[string]int
statParser map[string]map[string]int
statBucket map[string]map[string]int
statWhitelist map[string]map[string]map[string]int
statLapi map[string]map[string]int
statLapiMachine map[string]map[string]map[string]int
statLapiBouncer map[string]map[string]map[string]int
statLapiDecision map[string]struct {
NonEmpty int
Empty int
}
statDecision map[string]map[string]map[string]int
statAppsecEngine map[string]map[string]int
statAppsecRule map[string]map[string]map[string]int
statAlert map[string]int
statStash map[string]struct {
Type string
Count int
}
)
var (
ErrMissingConfig = errors.New("prometheus section missing, can't show metrics")
ErrMetricsDisabled = errors.New("prometheus is not enabled, can't show metrics")
)
type metricSection interface {
Table(out io.Writer, noUnit bool, showEmpty bool)
Description() (string, string)
}
type metricStore map[string]metricSection
func NewMetricStore() metricStore {
return metricStore{
"acquisition": statAcquis{},
"scenarios": statBucket{},
"parsers": statParser{},
"lapi": statLapi{},
"lapi-machine": statLapiMachine{},
"lapi-bouncer": statLapiBouncer{},
"lapi-decisions": statLapiDecision{},
"decisions": statDecision{},
"alerts": statAlert{},
"stash": statStash{},
"appsec-engine": statAppsecEngine{},
"appsec-rule": statAppsecRule{},
"whitelists": statWhitelist{},
}
}
func (ms metricStore) Fetch(url string) error {
mfChan := make(chan *dto.MetricFamily, 1024)
errChan := make(chan error, 1)
@ -33,9 +86,10 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
transport.ResponseHeaderTimeout = time.Minute
go func() {
defer trace.CatchPanic("crowdsec/ShowPrometheus")
err := prom2json.FetchMetricFamilies(url, mfChan, transport)
if err != nil {
errChan <- fmt.Errorf("failed to fetch prometheus metrics: %w", err)
errChan <- fmt.Errorf("failed to fetch metrics: %w", err)
return
}
errChan <- nil
@ -50,42 +104,42 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
return err
}
log.Debugf("Finished reading prometheus output, %d entries", len(result))
log.Debugf("Finished reading metrics output, %d entries", len(result))
/*walk*/
lapi_decisions_stats := map[string]struct {
NonEmpty int
Empty int
}{}
acquis_stats := map[string]map[string]int{}
parsers_stats := map[string]map[string]int{}
buckets_stats := map[string]map[string]int{}
lapi_stats := map[string]map[string]int{}
lapi_machine_stats := map[string]map[string]map[string]int{}
lapi_bouncer_stats := map[string]map[string]map[string]int{}
decisions_stats := map[string]map[string]map[string]int{}
appsec_engine_stats := map[string]map[string]int{}
appsec_rule_stats := map[string]map[string]map[string]int{}
alerts_stats := map[string]int{}
stash_stats := map[string]struct {
Type string
Count int
}{}
mAcquis := ms["acquisition"].(statAcquis)
mParser := ms["parsers"].(statParser)
mBucket := ms["scenarios"].(statBucket)
mLapi := ms["lapi"].(statLapi)
mLapiMachine := ms["lapi-machine"].(statLapiMachine)
mLapiBouncer := ms["lapi-bouncer"].(statLapiBouncer)
mLapiDecision := ms["lapi-decisions"].(statLapiDecision)
mDecision := ms["decisions"].(statDecision)
mAppsecEngine := ms["appsec-engine"].(statAppsecEngine)
mAppsecRule := ms["appsec-rule"].(statAppsecRule)
mAlert := ms["alerts"].(statAlert)
mStash := ms["stash"].(statStash)
mWhitelist := ms["whitelists"].(statWhitelist)
for idx, fam := range result {
if !strings.HasPrefix(fam.Name, "cs_") {
continue
}
log.Tracef("round %d", idx)
for _, m := range fam.Metrics {
metric, ok := m.(prom2json.Metric)
if !ok {
log.Debugf("failed to convert metric to prom2json.Metric")
continue
}
name, ok := metric.Labels["name"]
if !ok {
log.Debugf("no name in Metric %v", metric.Labels)
}
source, ok := metric.Labels["source"]
if !ok {
log.Debugf("no source in Metric %v for %s", metric.Labels, fam.Name)
@ -106,148 +160,89 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
origin := metric.Labels["origin"]
action := metric.Labels["action"]
appsecEngine := metric.Labels["appsec_engine"]
appsecRule := metric.Labels["rule_name"]
mtype := metric.Labels["type"]
fval, err := strconv.ParseFloat(value, 32)
if err != nil {
log.Errorf("Unexpected int value %s : %s", value, err)
}
ival := int(fval)
switch fam.Name {
/*buckets*/
//
// buckets
//
case "cs_bucket_created_total":
if _, ok := buckets_stats[name]; !ok {
buckets_stats[name] = make(map[string]int)
}
buckets_stats[name]["instantiation"] += ival
mBucket.Process(name, "instantiation", ival)
case "cs_buckets":
if _, ok := buckets_stats[name]; !ok {
buckets_stats[name] = make(map[string]int)
}
buckets_stats[name]["curr_count"] += ival
mBucket.Process(name, "curr_count", ival)
case "cs_bucket_overflowed_total":
if _, ok := buckets_stats[name]; !ok {
buckets_stats[name] = make(map[string]int)
}
buckets_stats[name]["overflow"] += ival
mBucket.Process(name, "overflow", ival)
case "cs_bucket_poured_total":
if _, ok := buckets_stats[name]; !ok {
buckets_stats[name] = make(map[string]int)
}
if _, ok := acquis_stats[source]; !ok {
acquis_stats[source] = make(map[string]int)
}
buckets_stats[name]["pour"] += ival
acquis_stats[source]["pour"] += ival
mBucket.Process(name, "pour", ival)
mAcquis.Process(source, "pour", ival)
case "cs_bucket_underflowed_total":
if _, ok := buckets_stats[name]; !ok {
buckets_stats[name] = make(map[string]int)
}
buckets_stats[name]["underflow"] += ival
/*acquis*/
mBucket.Process(name, "underflow", ival)
//
// parsers
//
case "cs_parser_hits_total":
if _, ok := acquis_stats[source]; !ok {
acquis_stats[source] = make(map[string]int)
}
acquis_stats[source]["reads"] += ival
mAcquis.Process(source, "reads", ival)
case "cs_parser_hits_ok_total":
if _, ok := acquis_stats[source]; !ok {
acquis_stats[source] = make(map[string]int)
}
acquis_stats[source]["parsed"] += ival
mAcquis.Process(source, "parsed", ival)
case "cs_parser_hits_ko_total":
if _, ok := acquis_stats[source]; !ok {
acquis_stats[source] = make(map[string]int)
}
acquis_stats[source]["unparsed"] += ival
mAcquis.Process(source, "unparsed", ival)
case "cs_node_hits_total":
if _, ok := parsers_stats[name]; !ok {
parsers_stats[name] = make(map[string]int)
}
parsers_stats[name]["hits"] += ival
mParser.Process(name, "hits", ival)
case "cs_node_hits_ok_total":
if _, ok := parsers_stats[name]; !ok {
parsers_stats[name] = make(map[string]int)
}
parsers_stats[name]["parsed"] += ival
mParser.Process(name, "parsed", ival)
case "cs_node_hits_ko_total":
if _, ok := parsers_stats[name]; !ok {
parsers_stats[name] = make(map[string]int)
}
parsers_stats[name]["unparsed"] += ival
mParser.Process(name, "unparsed", ival)
//
// whitelists
//
case "cs_node_wl_hits_total":
mWhitelist.Process(name, reason, "hits", ival)
case "cs_node_wl_hits_ok_total":
mWhitelist.Process(name, reason, "whitelisted", ival)
// track as well whitelisted lines at acquis level
mAcquis.Process(source, "whitelisted", ival)
//
// lapi
//
case "cs_lapi_route_requests_total":
if _, ok := lapi_stats[route]; !ok {
lapi_stats[route] = make(map[string]int)
}
lapi_stats[route][method] += ival
mLapi.Process(route, method, ival)
case "cs_lapi_machine_requests_total":
if _, ok := lapi_machine_stats[machine]; !ok {
lapi_machine_stats[machine] = make(map[string]map[string]int)
}
if _, ok := lapi_machine_stats[machine][route]; !ok {
lapi_machine_stats[machine][route] = make(map[string]int)
}
lapi_machine_stats[machine][route][method] += ival
mLapiMachine.Process(machine, route, method, ival)
case "cs_lapi_bouncer_requests_total":
if _, ok := lapi_bouncer_stats[bouncer]; !ok {
lapi_bouncer_stats[bouncer] = make(map[string]map[string]int)
}
if _, ok := lapi_bouncer_stats[bouncer][route]; !ok {
lapi_bouncer_stats[bouncer][route] = make(map[string]int)
}
lapi_bouncer_stats[bouncer][route][method] += ival
mLapiBouncer.Process(bouncer, route, method, ival)
case "cs_lapi_decisions_ko_total", "cs_lapi_decisions_ok_total":
if _, ok := lapi_decisions_stats[bouncer]; !ok {
lapi_decisions_stats[bouncer] = struct {
NonEmpty int
Empty int
}{}
}
x := lapi_decisions_stats[bouncer]
if fam.Name == "cs_lapi_decisions_ko_total" {
x.Empty += ival
} else if fam.Name == "cs_lapi_decisions_ok_total" {
x.NonEmpty += ival
}
lapi_decisions_stats[bouncer] = x
mLapiDecision.Process(bouncer, fam.Name, ival)
//
// decisions
//
case "cs_active_decisions":
if _, ok := decisions_stats[reason]; !ok {
decisions_stats[reason] = make(map[string]map[string]int)
}
if _, ok := decisions_stats[reason][origin]; !ok {
decisions_stats[reason][origin] = make(map[string]int)
}
decisions_stats[reason][origin][action] += ival
mDecision.Process(reason, origin, action, ival)
case "cs_alerts":
/*if _, ok := alerts_stats[scenario]; !ok {
alerts_stats[scenario] = make(map[string]int)
}*/
alerts_stats[reason] += ival
mAlert.Process(reason, ival)
//
// stash
//
case "cs_cache_size":
stash_stats[name] = struct {
Type string
Count int
}{Type: mtype, Count: ival}
mStash.Process(name, mtype, ival)
//
// appsec
//
case "cs_appsec_reqs_total":
if _, ok := appsec_engine_stats[metric.Labels["appsec_engine"]]; !ok {
appsec_engine_stats[metric.Labels["appsec_engine"]] = make(map[string]int, 0)
}
appsec_engine_stats[metric.Labels["appsec_engine"]]["processed"] = ival
mAppsecEngine.Process(appsecEngine, "processed", ival)
case "cs_appsec_block_total":
if _, ok := appsec_engine_stats[metric.Labels["appsec_engine"]]; !ok {
appsec_engine_stats[metric.Labels["appsec_engine"]] = make(map[string]int, 0)
}
appsec_engine_stats[metric.Labels["appsec_engine"]]["blocked"] = ival
mAppsecEngine.Process(appsecEngine, "blocked", ival)
case "cs_appsec_rule_hits":
appsecEngine := metric.Labels["appsec_engine"]
ruleID := metric.Labels["rule_name"]
if _, ok := appsec_rule_stats[appsecEngine]; !ok {
appsec_rule_stats[appsecEngine] = make(map[string]map[string]int, 0)
}
if _, ok := appsec_rule_stats[appsecEngine][ruleID]; !ok {
appsec_rule_stats[appsecEngine][ruleID] = make(map[string]int, 0)
}
appsec_rule_stats[appsecEngine][ruleID]["triggered"] = ival
mAppsecRule.Process(appsecEngine, appsecRule, "triggered", ival)
default:
log.Debugf("unknown: %+v", fam.Name)
continue
@ -255,46 +250,52 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
}
}
if formatType == "human" {
acquisStatsTable(out, acquis_stats)
bucketStatsTable(out, buckets_stats)
parserStatsTable(out, parsers_stats)
lapiStatsTable(out, lapi_stats)
lapiMachineStatsTable(out, lapi_machine_stats)
lapiBouncerStatsTable(out, lapi_bouncer_stats)
lapiDecisionStatsTable(out, lapi_decisions_stats)
decisionStatsTable(out, decisions_stats)
alertStatsTable(out, alerts_stats)
stashStatsTable(out, stash_stats)
appsecMetricsToTable(out, appsec_engine_stats)
appsecRulesToTable(out, appsec_rule_stats)
return nil
return nil
}
type cliMetrics struct {
cfg configGetter
}
func NewCLIMetrics(cfg configGetter) *cliMetrics {
return &cliMetrics{
cfg: cfg,
}
}
func (ms metricStore) Format(out io.Writer, sections []string, formatType string, noUnit bool) error {
// copy only the sections we want
want := map[string]metricSection{}
// if explicitly asking for sections, we want to show empty tables
showEmpty := len(sections) > 0
// if no sections are specified, we want all of them
if len(sections) == 0 {
for section := range ms {
sections = append(sections, section)
}
}
stats := make(map[string]any)
stats["acquisition"] = acquis_stats
stats["buckets"] = buckets_stats
stats["parsers"] = parsers_stats
stats["lapi"] = lapi_stats
stats["lapi_machine"] = lapi_machine_stats
stats["lapi_bouncer"] = lapi_bouncer_stats
stats["lapi_decisions"] = lapi_decisions_stats
stats["decisions"] = decisions_stats
stats["alerts"] = alerts_stats
stats["stash"] = stash_stats
for _, section := range sections {
want[section] = ms[section]
}
switch formatType {
case "human":
for section := range want {
want[section].Table(out, noUnit, showEmpty)
}
case "json":
x, err := json.MarshalIndent(stats, "", " ")
x, err := json.MarshalIndent(want, "", " ")
if err != nil {
return fmt.Errorf("failed to unmarshal metrics : %v", err)
return fmt.Errorf("failed to marshal metrics: %w", err)
}
out.Write(x)
case "raw":
x, err := yaml.Marshal(stats)
x, err := yaml.Marshal(want)
if err != nil {
return fmt.Errorf("failed to unmarshal metrics : %v", err)
return fmt.Errorf("failed to marshal metrics: %w", err)
}
out.Write(x)
default:
@ -304,52 +305,195 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
return nil
}
var noUnit bool
func runMetrics(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
url, err := flags.GetString("url")
if err != nil {
return err
}
func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error {
cfg := cli.cfg()
if url != "" {
csConfig.Cscli.PrometheusUrl = url
cfg.Cscli.PrometheusUrl = url
}
noUnit, err = flags.GetBool("no-unit")
if err != nil {
if cfg.Prometheus == nil {
return ErrMissingConfig
}
if !cfg.Prometheus.Enabled {
return ErrMetricsDisabled
}
ms := NewMetricStore()
if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil {
return err
}
if csConfig.Prometheus == nil {
return fmt.Errorf("prometheus section missing, can't show metrics")
// any section that we don't have in the store is an error
for _, section := range sections {
if _, ok := ms[section]; !ok {
return fmt.Errorf("unknown metrics type: %s", section)
}
}
if !csConfig.Prometheus.Enabled {
return fmt.Errorf("prometheus is not enabled, can't show metrics")
}
if err = FormatPrometheusMetrics(color.Output, csConfig.Cscli.PrometheusUrl, csConfig.Cscli.Output); err != nil {
if err := ms.Format(color.Output, sections, cfg.Cscli.Output, noUnit); err != nil {
return err
}
return nil
}
func NewMetricsCmd() *cobra.Command {
cmdMetrics := &cobra.Command{
Use: "metrics",
Short: "Display crowdsec prometheus metrics.",
Long: `Fetch metrics from the prometheus server and display them in a human-friendly way`,
func (cli *cliMetrics) NewCommand() *cobra.Command {
var (
url string
noUnit bool
)
cmd := &cobra.Command{
Use: "metrics",
Short: "Display crowdsec prometheus metrics.",
Long: `Fetch metrics from a Local API server and display them`,
Example: `# Show all Metrics, skip empty tables (same as "cecli metrics show")
cscli metrics
# Show only some metrics, connect to a different url
cscli metrics --url http://lapi.local:6060/metrics show acquisition parsers
# List available metric types
cscli metrics list`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: runMetrics,
RunE: func(_ *cobra.Command, _ []string) error {
return cli.show(nil, url, noUnit)
},
}
flags := cmdMetrics.PersistentFlags()
flags.StringP("url", "u", "", "Prometheus url (http://<ip>:<port>/metrics)")
flags.Bool("no-unit", false, "Show the real number instead of formatted with units")
flags := cmd.Flags()
flags.StringVarP(&url, "url", "u", "", "Prometheus url (http://<ip>:<port>/metrics)")
flags.BoolVar(&noUnit, "no-unit", false, "Show the real number instead of formatted with units")
return cmdMetrics
cmd.AddCommand(cli.newShowCmd())
cmd.AddCommand(cli.newListCmd())
return cmd
}
// expandAlias returns a list of sections. The input can be a list of sections or alias.
func (cli *cliMetrics) expandSectionGroups(args []string) []string {
ret := []string{}
for _, section := range args {
switch section {
case "engine":
ret = append(ret, "acquisition", "parsers", "scenarios", "stash", "whitelists")
case "lapi":
ret = append(ret, "alerts", "decisions", "lapi", "lapi-bouncer", "lapi-decisions", "lapi-machine")
case "appsec":
ret = append(ret, "appsec-engine", "appsec-rule")
default:
ret = append(ret, section)
}
}
return ret
}
func (cli *cliMetrics) newShowCmd() *cobra.Command {
var (
url string
noUnit bool
)
cmd := &cobra.Command{
Use: "show [type]...",
Short: "Display all or part of the available metrics.",
Long: `Fetch metrics from a Local API server and display them, optionally filtering on specific types.`,
Example: `# Show all Metrics, skip empty tables
cscli metrics show
# Use an alias: "engine", "lapi" or "appsec" to show a group of metrics
cscli metrics show engine
# Show some specific metrics, show empty tables, connect to a different url
cscli metrics show acquisition parsers scenarios stash --url http://lapi.local:6060/metrics
# To list available metric types, use "cscli metrics list"
cscli metrics list; cscli metrics list -o json
# Show metrics in json format
cscli metrics show acquisition parsers scenarios stash -o json`,
// Positional args are optional
DisableAutoGenTag: true,
RunE: func(_ *cobra.Command, args []string) error {
args = cli.expandSectionGroups(args)
return cli.show(args, url, noUnit)
},
}
flags := cmd.Flags()
flags.StringVarP(&url, "url", "u", "", "Metrics url (http://<ip>:<port>/metrics)")
flags.BoolVar(&noUnit, "no-unit", false, "Show the real number instead of formatted with units")
return cmd
}
func (cli *cliMetrics) list() error {
type metricType struct {
Type string `json:"type" yaml:"type"`
Title string `json:"title" yaml:"title"`
Description string `json:"description" yaml:"description"`
}
var allMetrics []metricType
ms := NewMetricStore()
for _, section := range maptools.SortedKeys(ms) {
title, description := ms[section].Description()
allMetrics = append(allMetrics, metricType{
Type: section,
Title: title,
Description: description,
})
}
switch cli.cfg().Cscli.Output {
case "human":
t := newTable(color.Output)
t.SetRowLines(true)
t.SetHeaders("Type", "Title", "Description")
for _, metric := range allMetrics {
t.AddRow(metric.Type, metric.Title, metric.Description)
}
t.Render()
case "json":
x, err := json.MarshalIndent(allMetrics, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal metric types: %w", err)
}
fmt.Println(string(x))
case "raw":
x, err := yaml.Marshal(allMetrics)
if err != nil {
return fmt.Errorf("failed to marshal metric types: %w", err)
}
fmt.Println(string(x))
}
return nil
}
func (cli *cliMetrics) newListCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "list",
Short: "List available types of metrics.",
Long: `List available types of metrics.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(_ *cobra.Command, _ []string) error {
return cli.list()
},
}
return cmd
}

View file

@ -1,25 +1,33 @@
package main
import (
"errors"
"fmt"
"io"
"sort"
"strconv"
"github.com/aquasecurity/table"
log "github.com/sirupsen/logrus"
"github.com/crowdsecurity/go-cs-lib/maptools"
)
// ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error.
var ErrNilTable = errors.New("nil table")
func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int) int {
// stats: machine -> route -> method -> count
// sort keys to keep consistent order when printing
machineKeys := []string{}
for k := range stats {
machineKeys = append(machineKeys, k)
}
sort.Strings(machineKeys)
numRows := 0
for _, machine := range machineKeys {
// oneRow: route -> method -> count
machineRow := stats[machine]
@ -31,41 +39,79 @@ func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]i
methodName,
}
if count != 0 {
row = append(row, fmt.Sprintf("%d", count))
row = append(row, strconv.Itoa(count))
} else {
row = append(row, "-")
}
t.AddRow(row...)
numRows++
}
}
}
return numRows
}
func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string) (int, error) {
func wlMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) {
if t == nil {
return 0, fmt.Errorf("nil table")
return 0, ErrNilTable
}
// sort keys to keep consistent order when printing
sortedKeys := []string{}
for k := range stats {
sortedKeys = append(sortedKeys, k)
}
sort.Strings(sortedKeys)
numRows := 0
for _, alabel := range sortedKeys {
for _, name := range maptools.SortedKeys(stats) {
for _, reason := range maptools.SortedKeys(stats[name]) {
row := []string{
name,
reason,
"-",
"-",
}
for _, action := range maptools.SortedKeys(stats[name][reason]) {
value := stats[name][reason][action]
switch action {
case "whitelisted":
row[3] = strconv.Itoa(value)
case "hits":
row[2] = strconv.Itoa(value)
default:
log.Debugf("unexpected counter '%s' for whitelists = %d", action, value)
}
}
t.AddRow(row...)
numRows++
}
}
return numRows, nil
}
func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) {
if t == nil {
return 0, ErrNilTable
}
numRows := 0
for _, alabel := range maptools.SortedKeys(stats) {
astats, ok := stats[alabel]
if !ok {
continue
}
row := []string{
alabel,
}
for _, sl := range keys {
if v, ok := astats[sl]; ok && v != 0 {
numberToShow := fmt.Sprintf("%d", v)
numberToShow := strconv.Itoa(v)
if !noUnit {
numberToShow = formatNumber(v)
}
@ -75,76 +121,192 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri
row = append(row, "-")
}
}
t.AddRow(row...)
numRows++
}
return numRows, nil
}
func bucketStatsTable(out io.Writer, stats map[string]map[string]int) {
func (s statBucket) Description() (string, string) {
return "Scenario Metrics",
`Measure events in different scenarios. Current count is the number of buckets during metrics collection. ` +
`Overflows are past event-producing buckets, while Expired are the ones that didnt receive enough events to Overflow.`
}
func (s statBucket) Process(bucket, metric string, val int) {
if _, ok := s[bucket]; !ok {
s[bucket] = make(map[string]int)
}
s[bucket][metric] += val
}
func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Bucket", "Current Count", "Overflows", "Instantiated", "Poured", "Expired")
t.SetHeaders("Scenario", "Current Count", "Overflows", "Instantiated", "Poured", "Expired")
t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft)
keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"}
if numRows, err := metricsToTable(t, stats, keys); err != nil {
log.Warningf("while collecting bucket stats: %s", err)
} else if numRows > 0 {
renderTableTitle(out, "\nBucket Metrics:")
if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil {
log.Warningf("while collecting scenario stats: %s", err)
} else if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
func acquisStatsTable(out io.Writer, stats map[string]map[string]int) {
func (s statAcquis) Description() (string, string) {
return "Acquisition Metrics",
`Measures the lines read, parsed, and unparsed per datasource. ` +
`Zero read lines indicate a misconfigured or inactive datasource. ` +
`Zero parsed lines mean the parser(s) failed. ` +
`Non-zero parsed lines are fine as crowdsec selects relevant lines.`
}
func (s statAcquis) Process(source, metric string, val int) {
if _, ok := s[source]; !ok {
s[source] = make(map[string]int)
}
s[source][metric] += val
}
func (s statAcquis) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket")
t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted")
t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft)
keys := []string{"reads", "parsed", "unparsed", "pour"}
keys := []string{"reads", "parsed", "unparsed", "pour", "whitelisted"}
if numRows, err := metricsToTable(t, stats, keys); err != nil {
if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil {
log.Warningf("while collecting acquis stats: %s", err)
} else if numRows > 0 {
renderTableTitle(out, "\nAcquisition Metrics:")
} else if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
func appsecMetricsToTable(out io.Writer, metrics map[string]map[string]int) {
func (s statAppsecEngine) Description() (string, string) {
return "Appsec Metrics",
`Measures the number of parsed and blocked requests by the AppSec Component.`
}
func (s statAppsecEngine) Process(appsecEngine, metric string, val int) {
if _, ok := s[appsecEngine]; !ok {
s[appsecEngine] = make(map[string]int)
}
s[appsecEngine][metric] += val
}
func (s statAppsecEngine) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Appsec Engine", "Processed", "Blocked")
t.SetAlignment(table.AlignLeft, table.AlignLeft)
keys := []string{"processed", "blocked"}
if numRows, err := metricsToTable(t, metrics, keys); err != nil {
if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil {
log.Warningf("while collecting appsec stats: %s", err)
} else if numRows > 0 {
renderTableTitle(out, "\nAppsec Metrics:")
} else if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
func appsecRulesToTable(out io.Writer, metrics map[string]map[string]map[string]int) {
for appsecEngine, appsecEngineRulesStats := range metrics {
func (s statAppsecRule) Description() (string, string) {
return "Appsec Rule Metrics",
`Provides “per AppSec Component” information about the number of matches for loaded AppSec Rules.`
}
func (s statAppsecRule) Process(appsecEngine, appsecRule string, metric string, val int) {
if _, ok := s[appsecEngine]; !ok {
s[appsecEngine] = make(map[string]map[string]int)
}
if _, ok := s[appsecEngine][appsecRule]; !ok {
s[appsecEngine][appsecRule] = make(map[string]int)
}
s[appsecEngine][appsecRule][metric] += val
}
func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) {
for appsecEngine, appsecEngineRulesStats := range s {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Rule ID", "Triggered")
t.SetAlignment(table.AlignLeft, table.AlignLeft)
keys := []string{"triggered"}
if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys); err != nil {
if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil {
log.Warningf("while collecting appsec rules stats: %s", err)
} else if numRows > 0 {
} else if numRows > 0 || showEmpty {
renderTableTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine))
t.Render()
}
}
}
func parserStatsTable(out io.Writer, stats map[string]map[string]int) {
func (s statWhitelist) Description() (string, string) {
return "Whitelist Metrics",
`Tracks the number of events processed and possibly whitelisted by each parser whitelist.`
}
func (s statWhitelist) Process(whitelist, reason, metric string, val int) {
if _, ok := s[whitelist]; !ok {
s[whitelist] = make(map[string]map[string]int)
}
if _, ok := s[whitelist][reason]; !ok {
s[whitelist][reason] = make(map[string]int)
}
s[whitelist][reason][metric] += val
}
func (s statWhitelist) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Whitelist", "Reason", "Hits", "Whitelisted")
t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft)
if numRows, err := wlMetricsToTable(t, s, noUnit); err != nil {
log.Warningf("while collecting parsers stats: %s", err)
} else if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
func (s statParser) Description() (string, string) {
return "Parser Metrics",
`Tracks the number of events processed by each parser and indicates success of failure. ` +
`Zero parsed lines means the parer(s) failed. ` +
`Non-zero unparsed lines are fine as crowdsec select relevant lines.`
}
func (s statParser) Process(parser, metric string, val int) {
if _, ok := s[parser]; !ok {
s[parser] = make(map[string]int)
}
s[parser][metric] += val
}
func (s statParser) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed")
@ -152,187 +314,302 @@ func parserStatsTable(out io.Writer, stats map[string]map[string]int) {
keys := []string{"hits", "parsed", "unparsed"}
if numRows, err := metricsToTable(t, stats, keys); err != nil {
if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil {
log.Warningf("while collecting parsers stats: %s", err)
} else if numRows > 0 {
renderTableTitle(out, "\nParser Metrics:")
} else if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
func stashStatsTable(out io.Writer, stats map[string]struct {
Type string
Count int
}) {
func (s statStash) Description() (string, string) {
return "Parser Stash Metrics",
`Tracks the status of stashes that might be created by various parsers and scenarios.`
}
func (s statStash) Process(name, mtype string, val int) {
s[name] = struct {
Type string
Count int
}{
Type: mtype,
Count: val,
}
}
func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Name", "Type", "Items")
t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft)
// unfortunately, we can't reuse metricsToTable as the structure is too different :/
sortedKeys := []string{}
for k := range stats {
sortedKeys = append(sortedKeys, k)
}
sort.Strings(sortedKeys)
numRows := 0
for _, alabel := range sortedKeys {
astats := stats[alabel]
for _, alabel := range maptools.SortedKeys(s) {
astats := s[alabel]
row := []string{
alabel,
astats.Type,
fmt.Sprintf("%d", astats.Count),
strconv.Itoa(astats.Count),
}
t.AddRow(row...)
numRows++
}
if numRows > 0 {
renderTableTitle(out, "\nParser Stash Metrics:")
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
func lapiStatsTable(out io.Writer, stats map[string]map[string]int) {
func (s statLapi) Description() (string, string) {
return "Local API Metrics",
`Monitors the requests made to local API routes.`
}
func (s statLapi) Process(route, method string, val int) {
if _, ok := s[route]; !ok {
s[route] = make(map[string]int)
}
s[route][method] += val
}
func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Route", "Method", "Hits")
t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft)
// unfortunately, we can't reuse metricsToTable as the structure is too different :/
sortedKeys := []string{}
for k := range stats {
sortedKeys = append(sortedKeys, k)
}
sort.Strings(sortedKeys)
numRows := 0
for _, alabel := range sortedKeys {
astats := stats[alabel]
for _, alabel := range maptools.SortedKeys(s) {
astats := s[alabel]
subKeys := []string{}
for skey := range astats {
subKeys = append(subKeys, skey)
}
sort.Strings(subKeys)
for _, sl := range subKeys {
row := []string{
alabel,
sl,
fmt.Sprintf("%d", astats[sl]),
strconv.Itoa(astats[sl]),
}
t.AddRow(row...)
numRows++
}
}
if numRows > 0 {
renderTableTitle(out, "\nLocal API Metrics:")
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
func lapiMachineStatsTable(out io.Writer, stats map[string]map[string]map[string]int) {
func (s statLapiMachine) Description() (string, string) {
return "Local API Machines Metrics",
`Tracks the number of calls to the local API from each registered machine.`
}
func (s statLapiMachine) Process(machine, route, method string, val int) {
if _, ok := s[machine]; !ok {
s[machine] = make(map[string]map[string]int)
}
if _, ok := s[machine][route]; !ok {
s[machine][route] = make(map[string]int)
}
s[machine][route][method] += val
}
func (s statLapiMachine) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Machine", "Route", "Method", "Hits")
t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft)
numRows := lapiMetricsToTable(t, stats)
numRows := lapiMetricsToTable(t, s)
if numRows > 0 {
renderTableTitle(out, "\nLocal API Machines Metrics:")
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
func lapiBouncerStatsTable(out io.Writer, stats map[string]map[string]map[string]int) {
func (s statLapiBouncer) Description() (string, string) {
return "Local API Bouncers Metrics",
`Tracks total hits to remediation component related API routes.`
}
func (s statLapiBouncer) Process(bouncer, route, method string, val int) {
if _, ok := s[bouncer]; !ok {
s[bouncer] = make(map[string]map[string]int)
}
if _, ok := s[bouncer][route]; !ok {
s[bouncer][route] = make(map[string]int)
}
s[bouncer][route][method] += val
}
func (s statLapiBouncer) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Bouncer", "Route", "Method", "Hits")
t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft)
numRows := lapiMetricsToTable(t, stats)
numRows := lapiMetricsToTable(t, s)
if numRows > 0 {
renderTableTitle(out, "\nLocal API Bouncers Metrics:")
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
func lapiDecisionStatsTable(out io.Writer, stats map[string]struct {
NonEmpty int
Empty int
},
) {
func (s statLapiDecision) Description() (string, string) {
return "Local API Bouncers Decisions",
`Tracks the number of empty/non-empty answers from LAPI to bouncers that are working in "live" mode.`
}
func (s statLapiDecision) Process(bouncer, fam string, val int) {
if _, ok := s[bouncer]; !ok {
s[bouncer] = struct {
NonEmpty int
Empty int
}{}
}
x := s[bouncer]
switch fam {
case "cs_lapi_decisions_ko_total":
x.Empty += val
case "cs_lapi_decisions_ok_total":
x.NonEmpty += val
}
s[bouncer] = x
}
func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers")
t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft)
numRows := 0
for bouncer, hits := range stats {
for bouncer, hits := range s {
t.AddRow(
bouncer,
fmt.Sprintf("%d", hits.Empty),
fmt.Sprintf("%d", hits.NonEmpty),
strconv.Itoa(hits.Empty),
strconv.Itoa(hits.NonEmpty),
)
numRows++
}
if numRows > 0 {
renderTableTitle(out, "\nLocal API Bouncers Decisions:")
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
func decisionStatsTable(out io.Writer, stats map[string]map[string]map[string]int) {
func (s statDecision) Description() (string, string) {
return "Local API Decisions",
`Provides information about all currently active decisions. ` +
`Includes both local (crowdsec) and global decisions (CAPI), and lists subscriptions (lists).`
}
func (s statDecision) Process(reason, origin, action string, val int) {
if _, ok := s[reason]; !ok {
s[reason] = make(map[string]map[string]int)
}
if _, ok := s[reason][origin]; !ok {
s[reason][origin] = make(map[string]int)
}
s[reason][origin][action] += val
}
func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Reason", "Origin", "Action", "Count")
t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft)
numRows := 0
for reason, origins := range stats {
for reason, origins := range s {
for origin, actions := range origins {
for action, hits := range actions {
t.AddRow(
reason,
origin,
action,
fmt.Sprintf("%d", hits),
strconv.Itoa(hits),
)
numRows++
}
}
}
if numRows > 0 {
renderTableTitle(out, "\nLocal API Decisions:")
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
func alertStatsTable(out io.Writer, stats map[string]int) {
func (s statAlert) Description() (string, string) {
return "Local API Alerts",
`Tracks the total number of past and present alerts for the installed scenarios.`
}
func (s statAlert) Process(reason string, val int) {
s[reason] += val
}
func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Reason", "Count")
t.SetAlignment(table.AlignLeft, table.AlignLeft)
numRows := 0
for scenario, hits := range stats {
for scenario, hits := range s {
t.AddRow(
scenario,
fmt.Sprintf("%d", hits),
strconv.Itoa(hits),
)
numRows++
}
if numRows > 0 {
renderTableTitle(out, "\nLocal API Alerts:")
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}

View file

@ -23,14 +23,13 @@ import (
"github.com/crowdsecurity/go-cs-lib/ptr"
"github.com/crowdsecurity/go-cs-lib/version"
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
"github.com/crowdsecurity/crowdsec/pkg/apiclient"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/csplugin"
"github.com/crowdsecurity/crowdsec/pkg/csprofiles"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
"github.com/crowdsecurity/crowdsec/pkg/models"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
type NotificationsCfg struct {
@ -39,13 +38,17 @@ type NotificationsCfg struct {
ids []uint
}
type cliNotifications struct{}
func NewCLINotifications() *cliNotifications {
return &cliNotifications{}
type cliNotifications struct {
cfg configGetter
}
func (cli cliNotifications) NewCommand() *cobra.Command {
func NewCLINotifications(cfg configGetter) *cliNotifications {
return &cliNotifications{
cfg: cfg,
}
}
func (cli *cliNotifications) NewCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "notifications [action]",
Short: "Helper for notification plugin configuration",
@ -53,14 +56,15 @@ func (cli cliNotifications) NewCommand() *cobra.Command {
Args: cobra.MinimumNArgs(1),
Aliases: []string{"notifications", "notification"},
DisableAutoGenTag: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := require.LAPI(csConfig); err != nil {
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
cfg := cli.cfg()
if err := require.LAPI(cfg); err != nil {
return err
}
if err := csConfig.LoadAPIClient(); err != nil {
if err := cfg.LoadAPIClient(); err != nil {
return fmt.Errorf("loading api client: %w", err)
}
if err := require.Notifications(csConfig); err != nil {
if err := require.Notifications(cfg); err != nil {
return err
}
@ -76,67 +80,79 @@ func (cli cliNotifications) NewCommand() *cobra.Command {
return cmd
}
func getPluginConfigs() (map[string]csplugin.PluginConfig, error) {
func (cli *cliNotifications) getPluginConfigs() (map[string]csplugin.PluginConfig, error) {
cfg := cli.cfg()
pcfgs := map[string]csplugin.PluginConfig{}
wf := func(path string, info fs.FileInfo, err error) error {
if info == nil {
return fmt.Errorf("error while traversing directory %s: %w", path, err)
}
name := filepath.Join(csConfig.ConfigPaths.NotificationDir, info.Name()) //Avoid calling info.Name() twice
name := filepath.Join(cfg.ConfigPaths.NotificationDir, info.Name()) //Avoid calling info.Name() twice
if (strings.HasSuffix(name, "yaml") || strings.HasSuffix(name, "yml")) && !(info.IsDir()) {
ts, err := csplugin.ParsePluginConfigFile(name)
if err != nil {
return fmt.Errorf("loading notifification plugin configuration with %s: %w", name, err)
}
for _, t := range ts {
csplugin.SetRequiredFields(&t)
pcfgs[t.Name] = t
}
}
return nil
}
if err := filepath.Walk(csConfig.ConfigPaths.NotificationDir, wf); err != nil {
if err := filepath.Walk(cfg.ConfigPaths.NotificationDir, wf); err != nil {
return nil, fmt.Errorf("while loading notifification plugin configuration: %w", err)
}
return pcfgs, nil
}
func getProfilesConfigs() (map[string]NotificationsCfg, error) {
func (cli *cliNotifications) getProfilesConfigs() (map[string]NotificationsCfg, error) {
cfg := cli.cfg()
// A bit of a tricky stuf now: reconcile profiles and notification plugins
pcfgs, err := getPluginConfigs()
pcfgs, err := cli.getPluginConfigs()
if err != nil {
return nil, err
}
ncfgs := map[string]NotificationsCfg{}
for _, pc := range pcfgs {
ncfgs[pc.Name] = NotificationsCfg{
Config: pc,
}
}
profiles, err := csprofiles.NewProfile(csConfig.API.Server.Profiles)
profiles, err := csprofiles.NewProfile(cfg.API.Server.Profiles)
if err != nil {
return nil, fmt.Errorf("while extracting profiles from configuration: %w", err)
}
for profileID, profile := range profiles {
for _, notif := range profile.Cfg.Notifications {
pc, ok := pcfgs[notif]
if !ok {
return nil, fmt.Errorf("notification plugin '%s' does not exist", notif)
}
tmp, ok := ncfgs[pc.Name]
if !ok {
return nil, fmt.Errorf("notification plugin '%s' does not exist", pc.Name)
}
tmp.Profiles = append(tmp.Profiles, profile.Cfg)
tmp.ids = append(tmp.ids, uint(profileID))
ncfgs[pc.Name] = tmp
}
}
return ncfgs, nil
}
func (cli cliNotifications) NewListCmd() *cobra.Command {
func (cli *cliNotifications) NewListCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "list",
Short: "list active notifications plugins",
@ -144,21 +160,22 @@ func (cli cliNotifications) NewListCmd() *cobra.Command {
Example: `cscli notifications list`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, arg []string) error {
ncfgs, err := getProfilesConfigs()
RunE: func(_ *cobra.Command, _ []string) error {
cfg := cli.cfg()
ncfgs, err := cli.getProfilesConfigs()
if err != nil {
return fmt.Errorf("can't build profiles configuration: %w", err)
}
if csConfig.Cscli.Output == "human" {
if cfg.Cscli.Output == "human" {
notificationListTable(color.Output, ncfgs)
} else if csConfig.Cscli.Output == "json" {
} else if cfg.Cscli.Output == "json" {
x, err := json.MarshalIndent(ncfgs, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal notification configuration: %w", err)
}
fmt.Printf("%s", string(x))
} else if csConfig.Cscli.Output == "raw" {
} else if cfg.Cscli.Output == "raw" {
csvwriter := csv.NewWriter(os.Stdout)
err := csvwriter.Write([]string{"Name", "Type", "Profile name"})
if err != nil {
@ -176,6 +193,7 @@ func (cli cliNotifications) NewListCmd() *cobra.Command {
}
csvwriter.Flush()
}
return nil
},
}
@ -183,7 +201,7 @@ func (cli cliNotifications) NewListCmd() *cobra.Command {
return cmd
}
func (cli cliNotifications) NewInspectCmd() *cobra.Command {
func (cli *cliNotifications) NewInspectCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "inspect",
Short: "Inspect active notifications plugin configuration",
@ -191,36 +209,32 @@ func (cli cliNotifications) NewInspectCmd() *cobra.Command {
Example: `cscli notifications inspect <plugin_name>`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
PreRunE: func(cmd *cobra.Command, args []string) error {
if args[0] == "" {
return fmt.Errorf("please provide a plugin name to inspect")
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
ncfgs, err := getProfilesConfigs()
RunE: func(_ *cobra.Command, args []string) error {
cfg := cli.cfg()
ncfgs, err := cli.getProfilesConfigs()
if err != nil {
return fmt.Errorf("can't build profiles configuration: %w", err)
}
cfg, ok := ncfgs[args[0]]
ncfg, ok := ncfgs[args[0]]
if !ok {
return fmt.Errorf("plugin '%s' does not exist or is not active", args[0])
}
if csConfig.Cscli.Output == "human" || csConfig.Cscli.Output == "raw" {
fmt.Printf(" - %15s: %15s\n", "Type", cfg.Config.Type)
fmt.Printf(" - %15s: %15s\n", "Name", cfg.Config.Name)
fmt.Printf(" - %15s: %15s\n", "Timeout", cfg.Config.TimeOut)
fmt.Printf(" - %15s: %15s\n", "Format", cfg.Config.Format)
for k, v := range cfg.Config.Config {
if cfg.Cscli.Output == "human" || cfg.Cscli.Output == "raw" {
fmt.Printf(" - %15s: %15s\n", "Type", ncfg.Config.Type)
fmt.Printf(" - %15s: %15s\n", "Name", ncfg.Config.Name)
fmt.Printf(" - %15s: %15s\n", "Timeout", ncfg.Config.TimeOut)
fmt.Printf(" - %15s: %15s\n", "Format", ncfg.Config.Format)
for k, v := range ncfg.Config.Config {
fmt.Printf(" - %15s: %15v\n", k, v)
}
} else if csConfig.Cscli.Output == "json" {
} else if cfg.Cscli.Output == "json" {
x, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal notification configuration: %w", err)
}
fmt.Printf("%s", string(x))
}
return nil
},
}
@ -228,12 +242,13 @@ func (cli cliNotifications) NewInspectCmd() *cobra.Command {
return cmd
}
func (cli cliNotifications) NewTestCmd() *cobra.Command {
func (cli *cliNotifications) NewTestCmd() *cobra.Command {
var (
pluginBroker csplugin.PluginBroker
pluginTomb tomb.Tomb
alertOverride string
)
cmd := &cobra.Command{
Use: "test [plugin name]",
Short: "send a generic test alert to notification plugin",
@ -241,25 +256,26 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command {
Example: `cscli notifications test [plugin_name]`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
PreRunE: func(cmd *cobra.Command, args []string) error {
pconfigs, err := getPluginConfigs()
PreRunE: func(_ *cobra.Command, args []string) error {
cfg := cli.cfg()
pconfigs, err := cli.getPluginConfigs()
if err != nil {
return fmt.Errorf("can't build profiles configuration: %w", err)
}
cfg, ok := pconfigs[args[0]]
pcfg, ok := pconfigs[args[0]]
if !ok {
return fmt.Errorf("plugin name: '%s' does not exist", args[0])
}
//Create a single profile with plugin name as notification name
return pluginBroker.Init(csConfig.PluginConfig, []*csconfig.ProfileCfg{
return pluginBroker.Init(cfg.PluginConfig, []*csconfig.ProfileCfg{
{
Notifications: []string{
cfg.Name,
pcfg.Name,
},
},
}, csConfig.ConfigPaths)
}, cfg.ConfigPaths)
},
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(_ *cobra.Command, _ []string) error {
pluginTomb.Go(func() error {
pluginBroker.Run(&pluginTomb)
return nil
@ -298,13 +314,16 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command {
if err := yaml.Unmarshal([]byte(alertOverride), alert); err != nil {
return fmt.Errorf("failed to unmarshal alert override: %w", err)
}
pluginBroker.PluginChannel <- csplugin.ProfileAlert{
ProfileID: uint(0),
Alert: alert,
}
//time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent
pluginTomb.Kill(fmt.Errorf("terminating"))
pluginTomb.Wait()
return nil
},
}
@ -313,9 +332,11 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command {
return cmd
}
func (cli cliNotifications) NewReinjectCmd() *cobra.Command {
var alertOverride string
var alert *models.Alert
func (cli *cliNotifications) NewReinjectCmd() *cobra.Command {
var (
alertOverride string
alert *models.Alert
)
cmd := &cobra.Command{
Use: "reinject",
@ -328,25 +349,30 @@ cscli notifications reinject <alert_id> -a '{"remediation": true,"scenario":"not
`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
PreRunE: func(cmd *cobra.Command, args []string) error {
PreRunE: func(_ *cobra.Command, args []string) error {
var err error
alert, err = FetchAlertFromArgString(args[0])
alert, err = cli.fetchAlertFromArgString(args[0])
if err != nil {
return err
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(_ *cobra.Command, _ []string) error {
var (
pluginBroker csplugin.PluginBroker
pluginTomb tomb.Tomb
)
cfg := cli.cfg()
if alertOverride != "" {
if err := json.Unmarshal([]byte(alertOverride), alert); err != nil {
return fmt.Errorf("can't unmarshal data in the alert flag: %w", err)
}
}
err := pluginBroker.Init(csConfig.PluginConfig, csConfig.API.Server.Profiles, csConfig.ConfigPaths)
err := pluginBroker.Init(cfg.PluginConfig, cfg.API.Server.Profiles, cfg.ConfigPaths)
if err != nil {
return fmt.Errorf("can't initialize plugins: %w", err)
}
@ -356,7 +382,7 @@ cscli notifications reinject <alert_id> -a '{"remediation": true,"scenario":"not
return nil
})
profiles, err := csprofiles.NewProfile(csConfig.API.Server.Profiles)
profiles, err := csprofiles.NewProfile(cfg.API.Server.Profiles)
if err != nil {
return fmt.Errorf("cannot extract profiles from configuration: %w", err)
}
@ -382,9 +408,9 @@ cscli notifications reinject <alert_id> -a '{"remediation": true,"scenario":"not
default:
time.Sleep(50 * time.Millisecond)
log.Info("sleeping\n")
}
}
if profile.Cfg.OnSuccess == "break" {
log.Infof("The profile %s contains a 'on_success: break' so bailing out", profile.Cfg.Name)
break
@ -393,6 +419,7 @@ cscli notifications reinject <alert_id> -a '{"remediation": true,"scenario":"not
//time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent
pluginTomb.Kill(fmt.Errorf("terminating"))
pluginTomb.Wait()
return nil
},
}
@ -401,18 +428,22 @@ cscli notifications reinject <alert_id> -a '{"remediation": true,"scenario":"not
return cmd
}
func FetchAlertFromArgString(toParse string) (*models.Alert, error) {
func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Alert, error) {
cfg := cli.cfg()
id, err := strconv.Atoi(toParse)
if err != nil {
return nil, fmt.Errorf("bad alert id %s", toParse)
}
apiURL, err := url.Parse(csConfig.API.Client.Credentials.URL)
apiURL, err := url.Parse(cfg.API.Client.Credentials.URL)
if err != nil {
return nil, fmt.Errorf("error parsing the URL of the API: %w", err)
}
client, err := apiclient.NewClient(&apiclient.Config{
MachineID: csConfig.API.Client.Credentials.Login,
Password: strfmt.Password(csConfig.API.Client.Credentials.Password),
MachineID: cfg.API.Client.Credentials.Login,
Password: strfmt.Password(cfg.API.Client.Credentials.Password),
UserAgent: fmt.Sprintf("crowdsec/%s", version.String()),
URL: apiURL,
VersionPrefix: "v1",
@ -420,9 +451,11 @@ func FetchAlertFromArgString(toParse string) (*models.Alert, error) {
if err != nil {
return nil, fmt.Errorf("error creating the client for the API: %w", err)
}
alert, _, err := client.Alerts.GetByID(context.Background(), id)
if err != nil {
return nil, fmt.Errorf("can't find alert with id %d: %w", id, err)
}
return alert, nil
}

View file

@ -1,6 +1,7 @@
package main
import (
"fmt"
"time"
log "github.com/sirupsen/logrus"
@ -9,34 +10,39 @@ import (
"github.com/crowdsecurity/go-cs-lib/ptr"
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
"github.com/crowdsecurity/crowdsec/pkg/apiserver"
"github.com/crowdsecurity/crowdsec/pkg/database"
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
)
type cliPapi struct {}
func NewCLIPapi() *cliPapi {
return &cliPapi{}
type cliPapi struct {
cfg configGetter
}
func (cli cliPapi) NewCommand() *cobra.Command {
var cmd = &cobra.Command{
func NewCLIPapi(cfg configGetter) *cliPapi {
return &cliPapi{
cfg: cfg,
}
}
func (cli *cliPapi) NewCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "papi [action]",
Short: "Manage interaction with Polling API (PAPI)",
Args: cobra.MinimumNArgs(1),
DisableAutoGenTag: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := require.LAPI(csConfig); err != nil {
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
cfg := cli.cfg()
if err := require.LAPI(cfg); err != nil {
return err
}
if err := require.CAPI(csConfig); err != nil {
if err := require.CAPI(cfg); err != nil {
return err
}
if err := require.PAPI(csConfig); err != nil {
if err := require.PAPI(cfg); err != nil {
return err
}
return nil
},
}
@ -47,35 +53,36 @@ func (cli cliPapi) NewCommand() *cobra.Command {
return cmd
}
func (cli cliPapi) NewStatusCmd() *cobra.Command {
func (cli *cliPapi) NewStatusCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "status",
Short: "Get status of the Polling API",
Args: cobra.MinimumNArgs(0),
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
RunE: func(_ *cobra.Command, _ []string) error {
var err error
dbClient, err = database.NewClient(csConfig.DbConfig)
cfg := cli.cfg()
dbClient, err = database.NewClient(cfg.DbConfig)
if err != nil {
log.Fatalf("unable to initialize database client : %s", err)
return fmt.Errorf("unable to initialize database client: %s", err)
}
apic, err := apiserver.NewAPIC(csConfig.API.Server.OnlineClient, dbClient, csConfig.API.Server.ConsoleConfig, csConfig.API.Server.CapiWhitelists)
apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists)
if err != nil {
log.Fatalf("unable to initialize API client : %s", err)
return fmt.Errorf("unable to initialize API client: %s", err)
}
papi, err := apiserver.NewPAPI(apic, dbClient, csConfig.API.Server.ConsoleConfig, log.GetLevel())
papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel())
if err != nil {
log.Fatalf("unable to initialize PAPI client : %s", err)
return fmt.Errorf("unable to initialize PAPI client: %s", err)
}
perms, err := papi.GetPermissions()
if err != nil {
log.Fatalf("unable to get PAPI permissions: %s", err)
return fmt.Errorf("unable to get PAPI permissions: %s", err)
}
var lastTimestampStr *string
lastTimestampStr, err = dbClient.GetConfigItem(apiserver.PapiPullKey)
@ -90,45 +97,48 @@ func (cli cliPapi) NewStatusCmd() *cobra.Command {
for _, sub := range perms.Categories {
log.Infof(" - %s", sub)
}
return nil
},
}
return cmd
}
func (cli cliPapi) NewSyncCmd() *cobra.Command {
func (cli *cliPapi) NewSyncCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "sync",
Short: "Sync with the Polling API, pulling all non-expired orders for the instance",
Args: cobra.MinimumNArgs(0),
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
RunE: func(_ *cobra.Command, _ []string) error {
var err error
cfg := cli.cfg()
t := tomb.Tomb{}
dbClient, err = database.NewClient(csConfig.DbConfig)
dbClient, err = database.NewClient(cfg.DbConfig)
if err != nil {
log.Fatalf("unable to initialize database client : %s", err)
return fmt.Errorf("unable to initialize database client: %s", err)
}
apic, err := apiserver.NewAPIC(csConfig.API.Server.OnlineClient, dbClient, csConfig.API.Server.ConsoleConfig, csConfig.API.Server.CapiWhitelists)
apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists)
if err != nil {
log.Fatalf("unable to initialize API client : %s", err)
return fmt.Errorf("unable to initialize API client: %s", err)
}
t.Go(apic.Push)
papi, err := apiserver.NewPAPI(apic, dbClient, csConfig.API.Server.ConsoleConfig, log.GetLevel())
papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel())
if err != nil {
log.Fatalf("unable to initialize PAPI client : %s", err)
return fmt.Errorf("unable to initialize PAPI client: %s", err)
}
t.Go(papi.SyncDecisions)
err = papi.PullOnce(time.Time{}, true)
if err != nil {
log.Fatalf("unable to sync decisions: %s", err)
return fmt.Errorf("unable to sync decisions: %s", err)
}
log.Infof("Sending acknowledgements to CAPI")
@ -138,6 +148,7 @@ func (cli cliPapi) NewSyncCmd() *cobra.Command {
t.Wait()
time.Sleep(5 * time.Second) //FIXME: the push done by apic.Push is run inside a sub goroutine, sleep to make sure it's done
return nil
},
}

View file

@ -56,3 +56,7 @@ func HubBranch(cfg *csconfig.Config) string {
return branch
}
func HubURLTemplate(cfg *csconfig.Config) string {
return cfg.Cscli.HubURLTemplate
}

View file

@ -11,7 +11,7 @@ import (
)
func LAPI(c *csconfig.Config) error {
if err := c.LoadAPIServer(); err != nil {
if err := c.LoadAPIServer(true); err != nil {
return fmt.Errorf("failed to load Local API: %w", err)
}
@ -47,7 +47,7 @@ func CAPIRegistered(c *csconfig.Config) error {
}
func DB(c *csconfig.Config) error {
if err := c.LoadDBConfig(); err != nil {
if err := c.LoadDBConfig(true); err != nil {
return fmt.Errorf("this command requires direct database access (must be run on the local API machine): %w", err)
}
@ -66,10 +66,10 @@ func Notifications(c *csconfig.Config) error {
func RemoteHub(c *csconfig.Config) *cwhub.RemoteHubCfg {
// set branch in config, and log if necessary
branch := HubBranch(c)
urlTemplate := HubURLTemplate(c)
remote := &cwhub.RemoteHubCfg{
Branch: branch,
URLTemplate: "https://hub-cdn.crowdsec.net/%s/%s",
// URLTemplate: "http://localhost:8000/crowdsecurity/%s/hub/%s",
URLTemplate: urlTemplate,
IndexPath: ".index.json",
}

View file

@ -3,23 +3,27 @@ package main
import (
"fmt"
"os"
"slices"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"slices"
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
)
type cliSimulation struct{}
func NewCLISimulation() *cliSimulation {
return &cliSimulation{}
type cliSimulation struct {
cfg configGetter
}
func (cli cliSimulation) NewCommand() *cobra.Command {
func NewCLISimulation(cfg configGetter) *cliSimulation {
return &cliSimulation{
cfg: cfg,
}
}
func (cli *cliSimulation) NewCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "simulation [command]",
Short: "Manage simulation status of scenarios",
@ -27,16 +31,17 @@ func (cli cliSimulation) NewCommand() *cobra.Command {
cscli simulation enable crowdsecurity/ssh-bf
cscli simulation disable crowdsecurity/ssh-bf`,
DisableAutoGenTag: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := csConfig.LoadSimulation(); err != nil {
log.Fatal(err)
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
if err := cli.cfg().LoadSimulation(); err != nil {
return err
}
if csConfig.Cscli.SimulationConfig == nil {
if cli.cfg().Cscli.SimulationConfig == nil {
return fmt.Errorf("no simulation configured")
}
return nil
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
PersistentPostRun: func(cmd *cobra.Command, _ []string) {
if cmd.Name() != "status" {
log.Infof(ReloadMessage())
}
@ -52,7 +57,7 @@ cscli simulation disable crowdsecurity/ssh-bf`,
return cmd
}
func (cli cliSimulation) NewEnableCmd() *cobra.Command {
func (cli *cliSimulation) NewEnableCmd() *cobra.Command {
var forceGlobalSimulation bool
cmd := &cobra.Command{
@ -60,10 +65,10 @@ func (cli cliSimulation) NewEnableCmd() *cobra.Command {
Short: "Enable the simulation, globally or on specified scenarios",
Example: `cscli simulation enable`,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
hub, err := require.Hub(csConfig, nil, nil)
RunE: func(cmd *cobra.Command, args []string) error {
hub, err := require.Hub(cli.cfg(), nil, nil)
if err != nil {
log.Fatal(err)
return err
}
if len(args) > 0 {
@ -76,37 +81,35 @@ func (cli cliSimulation) NewEnableCmd() *cobra.Command {
if !item.State.Installed {
log.Warningf("'%s' isn't enabled", scenario)
}
isExcluded := slices.Contains(csConfig.Cscli.SimulationConfig.Exclusions, scenario)
if *csConfig.Cscli.SimulationConfig.Simulation && !isExcluded {
isExcluded := slices.Contains(cli.cfg().Cscli.SimulationConfig.Exclusions, scenario)
if *cli.cfg().Cscli.SimulationConfig.Simulation && !isExcluded {
log.Warning("global simulation is already enabled")
continue
}
if !*csConfig.Cscli.SimulationConfig.Simulation && isExcluded {
if !*cli.cfg().Cscli.SimulationConfig.Simulation && isExcluded {
log.Warningf("simulation for '%s' already enabled", scenario)
continue
}
if *csConfig.Cscli.SimulationConfig.Simulation && isExcluded {
if err := removeFromExclusion(scenario); err != nil {
log.Fatal(err)
}
if *cli.cfg().Cscli.SimulationConfig.Simulation && isExcluded {
cli.removeFromExclusion(scenario)
log.Printf("simulation enabled for '%s'", scenario)
continue
}
if err := addToExclusion(scenario); err != nil {
log.Fatal(err)
}
cli.addToExclusion(scenario)
log.Printf("simulation mode for '%s' enabled", scenario)
}
if err := dumpSimulationFile(); err != nil {
log.Fatalf("simulation enable: %s", err)
if err := cli.dumpSimulationFile(); err != nil {
return fmt.Errorf("simulation enable: %s", err)
}
} else if forceGlobalSimulation {
if err := enableGlobalSimulation(); err != nil {
log.Fatalf("unable to enable global simulation mode : %s", err)
if err := cli.enableGlobalSimulation(); err != nil {
return fmt.Errorf("unable to enable global simulation mode: %s", err)
}
} else {
printHelp(cmd)
}
return nil
},
}
cmd.Flags().BoolVarP(&forceGlobalSimulation, "global", "g", false, "Enable global simulation (reverse mode)")
@ -114,7 +117,7 @@ func (cli cliSimulation) NewEnableCmd() *cobra.Command {
return cmd
}
func (cli cliSimulation) NewDisableCmd() *cobra.Command {
func (cli *cliSimulation) NewDisableCmd() *cobra.Command {
var forceGlobalSimulation bool
cmd := &cobra.Command{
@ -122,18 +125,16 @@ func (cli cliSimulation) NewDisableCmd() *cobra.Command {
Short: "Disable the simulation mode. Disable only specified scenarios",
Example: `cscli simulation disable`,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
for _, scenario := range args {
isExcluded := slices.Contains(csConfig.Cscli.SimulationConfig.Exclusions, scenario)
if !*csConfig.Cscli.SimulationConfig.Simulation && !isExcluded {
isExcluded := slices.Contains(cli.cfg().Cscli.SimulationConfig.Exclusions, scenario)
if !*cli.cfg().Cscli.SimulationConfig.Simulation && !isExcluded {
log.Warningf("%s isn't in simulation mode", scenario)
continue
}
if !*csConfig.Cscli.SimulationConfig.Simulation && isExcluded {
if err := removeFromExclusion(scenario); err != nil {
log.Fatal(err)
}
if !*cli.cfg().Cscli.SimulationConfig.Simulation && isExcluded {
cli.removeFromExclusion(scenario)
log.Printf("simulation mode for '%s' disabled", scenario)
continue
}
@ -141,21 +142,21 @@ func (cli cliSimulation) NewDisableCmd() *cobra.Command {
log.Warningf("simulation mode is enabled but is already disable for '%s'", scenario)
continue
}
if err := addToExclusion(scenario); err != nil {
log.Fatal(err)
}
cli.addToExclusion(scenario)
log.Printf("simulation mode for '%s' disabled", scenario)
}
if err := dumpSimulationFile(); err != nil {
log.Fatalf("simulation disable: %s", err)
if err := cli.dumpSimulationFile(); err != nil {
return fmt.Errorf("simulation disable: %s", err)
}
} else if forceGlobalSimulation {
if err := disableGlobalSimulation(); err != nil {
log.Fatalf("unable to disable global simulation mode : %s", err)
if err := cli.disableGlobalSimulation(); err != nil {
return fmt.Errorf("unable to disable global simulation mode: %s", err)
}
} else {
printHelp(cmd)
}
return nil
},
}
cmd.Flags().BoolVarP(&forceGlobalSimulation, "global", "g", false, "Disable global simulation (reverse mode)")
@ -163,16 +164,14 @@ func (cli cliSimulation) NewDisableCmd() *cobra.Command {
return cmd
}
func (cli cliSimulation) NewStatusCmd() *cobra.Command {
func (cli *cliSimulation) NewStatusCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "status",
Short: "Show simulation mode status",
Example: `cscli simulation status`,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
if err := simulationStatus(); err != nil {
log.Fatal(err)
}
Run: func(_ *cobra.Command, _ []string) {
cli.status()
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
},
@ -181,29 +180,29 @@ func (cli cliSimulation) NewStatusCmd() *cobra.Command {
return cmd
}
func addToExclusion(name string) error {
csConfig.Cscli.SimulationConfig.Exclusions = append(csConfig.Cscli.SimulationConfig.Exclusions, name)
return nil
func (cli *cliSimulation) addToExclusion(name string) {
cfg := cli.cfg()
cfg.Cscli.SimulationConfig.Exclusions = append(cfg.Cscli.SimulationConfig.Exclusions, name)
}
func removeFromExclusion(name string) error {
index := slices.Index(csConfig.Cscli.SimulationConfig.Exclusions, name)
func (cli *cliSimulation) removeFromExclusion(name string) {
cfg := cli.cfg()
index := slices.Index(cfg.Cscli.SimulationConfig.Exclusions, name)
// Remove element from the slice
csConfig.Cscli.SimulationConfig.Exclusions[index] = csConfig.Cscli.SimulationConfig.Exclusions[len(csConfig.Cscli.SimulationConfig.Exclusions)-1]
csConfig.Cscli.SimulationConfig.Exclusions[len(csConfig.Cscli.SimulationConfig.Exclusions)-1] = ""
csConfig.Cscli.SimulationConfig.Exclusions = csConfig.Cscli.SimulationConfig.Exclusions[:len(csConfig.Cscli.SimulationConfig.Exclusions)-1]
return nil
cfg.Cscli.SimulationConfig.Exclusions[index] = cfg.Cscli.SimulationConfig.Exclusions[len(cfg.Cscli.SimulationConfig.Exclusions)-1]
cfg.Cscli.SimulationConfig.Exclusions[len(cfg.Cscli.SimulationConfig.Exclusions)-1] = ""
cfg.Cscli.SimulationConfig.Exclusions = cfg.Cscli.SimulationConfig.Exclusions[:len(cfg.Cscli.SimulationConfig.Exclusions)-1]
}
func enableGlobalSimulation() error {
csConfig.Cscli.SimulationConfig.Simulation = new(bool)
*csConfig.Cscli.SimulationConfig.Simulation = true
csConfig.Cscli.SimulationConfig.Exclusions = []string{}
func (cli *cliSimulation) enableGlobalSimulation() error {
cfg := cli.cfg()
cfg.Cscli.SimulationConfig.Simulation = new(bool)
*cfg.Cscli.SimulationConfig.Simulation = true
cfg.Cscli.SimulationConfig.Exclusions = []string{}
if err := dumpSimulationFile(); err != nil {
log.Fatalf("unable to dump simulation file: %s", err)
if err := cli.dumpSimulationFile(); err != nil {
return fmt.Errorf("unable to dump simulation file: %s", err)
}
log.Printf("global simulation: enabled")
@ -211,59 +210,70 @@ func enableGlobalSimulation() error {
return nil
}
func dumpSimulationFile() error {
newConfigSim, err := yaml.Marshal(csConfig.Cscli.SimulationConfig)
func (cli *cliSimulation) dumpSimulationFile() error {
cfg := cli.cfg()
newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig)
if err != nil {
return fmt.Errorf("unable to marshal simulation configuration: %s", err)
}
err = os.WriteFile(csConfig.ConfigPaths.SimulationFilePath, newConfigSim, 0o644)
err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644)
if err != nil {
return fmt.Errorf("write simulation config in '%s' failed: %s", csConfig.ConfigPaths.SimulationFilePath, err)
return fmt.Errorf("write simulation config in '%s' failed: %s", cfg.ConfigPaths.SimulationFilePath, err)
}
log.Debugf("updated simulation file %s", csConfig.ConfigPaths.SimulationFilePath)
log.Debugf("updated simulation file %s", cfg.ConfigPaths.SimulationFilePath)
return nil
}
func disableGlobalSimulation() error {
csConfig.Cscli.SimulationConfig.Simulation = new(bool)
*csConfig.Cscli.SimulationConfig.Simulation = false
func (cli *cliSimulation) disableGlobalSimulation() error {
cfg := cli.cfg()
cfg.Cscli.SimulationConfig.Simulation = new(bool)
*cfg.Cscli.SimulationConfig.Simulation = false
csConfig.Cscli.SimulationConfig.Exclusions = []string{}
newConfigSim, err := yaml.Marshal(csConfig.Cscli.SimulationConfig)
cfg.Cscli.SimulationConfig.Exclusions = []string{}
newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig)
if err != nil {
return fmt.Errorf("unable to marshal new simulation configuration: %s", err)
}
err = os.WriteFile(csConfig.ConfigPaths.SimulationFilePath, newConfigSim, 0o644)
err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644)
if err != nil {
return fmt.Errorf("unable to write new simulation config in '%s' : %s", csConfig.ConfigPaths.SimulationFilePath, err)
return fmt.Errorf("unable to write new simulation config in '%s': %s", cfg.ConfigPaths.SimulationFilePath, err)
}
log.Printf("global simulation: disabled")
return nil
}
func simulationStatus() error {
if csConfig.Cscli.SimulationConfig == nil {
func (cli *cliSimulation) status() {
cfg := cli.cfg()
if cfg.Cscli.SimulationConfig == nil {
log.Printf("global simulation: disabled (configuration file is missing)")
return nil
return
}
if *csConfig.Cscli.SimulationConfig.Simulation {
if *cfg.Cscli.SimulationConfig.Simulation {
log.Println("global simulation: enabled")
if len(csConfig.Cscli.SimulationConfig.Exclusions) > 0 {
if len(cfg.Cscli.SimulationConfig.Exclusions) > 0 {
log.Println("Scenarios not in simulation mode :")
for _, scenario := range csConfig.Cscli.SimulationConfig.Exclusions {
for _, scenario := range cfg.Cscli.SimulationConfig.Exclusions {
log.Printf(" - %s", scenario)
}
}
} else {
log.Println("global simulation: disabled")
if len(csConfig.Cscli.SimulationConfig.Exclusions) > 0 {
if len(cfg.Cscli.SimulationConfig.Exclusions) > 0 {
log.Println("Scenarios in simulation mode :")
for _, scenario := range csConfig.Cscli.SimulationConfig.Exclusions {
for _, scenario := range cfg.Cscli.SimulationConfig.Exclusions {
log.Printf(" - %s", scenario)
}
}
}
return nil
}

View file

@ -66,19 +66,25 @@ func collectMetrics() ([]byte, []byte, error) {
}
humanMetrics := bytes.NewBuffer(nil)
err := FormatPrometheusMetrics(humanMetrics, csConfig.Cscli.PrometheusUrl, "human")
if err != nil {
return nil, nil, fmt.Errorf("could not fetch promtheus metrics: %s", err)
ms := NewMetricStore()
if err := ms.Fetch(csConfig.Cscli.PrometheusUrl); err != nil {
return nil, nil, fmt.Errorf("could not fetch prometheus metrics: %s", err)
}
if err := ms.Format(humanMetrics, nil, "human", false); err != nil {
return nil, nil, err
}
req, err := http.NewRequest(http.MethodGet, csConfig.Cscli.PrometheusUrl, nil)
if err != nil {
return nil, nil, fmt.Errorf("could not create requests to prometheus endpoint: %s", err)
}
client := &http.Client{}
resp, err := client.Do(req)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, nil, fmt.Errorf("could not get metrics from prometheus endpoint: %s", err)
}
@ -100,17 +106,20 @@ func collectVersion() []byte {
func collectFeatures() []byte {
log.Info("Collecting feature flags")
enabledFeatures := fflag.Crowdsec.GetEnabledFeatures()
w := bytes.NewBuffer(nil)
for _, k := range enabledFeatures {
fmt.Fprintf(w, "%s\n", k)
}
return w.Bytes()
}
func collectOSInfo() ([]byte, error) {
log.Info("Collecting OS info")
info, err := osinfo.GetOSInfo()
if err != nil {
@ -133,6 +142,7 @@ func collectHubItems(hub *cwhub.Hub, itemType string) []byte {
var err error
out := bytes.NewBuffer(nil)
log.Infof("Collecting %s list", itemType)
items := make(map[string][]*cwhub.Item)
@ -144,24 +154,33 @@ func collectHubItems(hub *cwhub.Hub, itemType string) []byte {
if err := listItems(out, []string{itemType}, items, false); err != nil {
log.Warnf("could not collect %s list: %s", itemType, err)
}
return out.Bytes()
}
func collectBouncers(dbClient *database.Client) ([]byte, error) {
out := bytes.NewBuffer(nil)
err := getBouncers(out, dbClient)
bouncers, err := dbClient.ListBouncers()
if err != nil {
return nil, err
return nil, fmt.Errorf("unable to list bouncers: %s", err)
}
getBouncersTable(out, bouncers)
return out.Bytes(), nil
}
func collectAgents(dbClient *database.Client) ([]byte, error) {
out := bytes.NewBuffer(nil)
err := getAgents(out, dbClient)
machines, err := dbClient.ListMachines()
if err != nil {
return nil, err
return nil, fmt.Errorf("unable to list machines: %s", err)
}
getAgentsTable(out, machines)
return out.Bytes(), nil
}
@ -169,12 +188,14 @@ func collectAPIStatus(login string, password string, endpoint string, prefix str
if csConfig.API.Client == nil || csConfig.API.Client.Credentials == nil {
return []byte("No agent credentials found, are we LAPI ?")
}
pwd := strfmt.Password(password)
apiurl, err := url.Parse(endpoint)
pwd := strfmt.Password(password)
apiurl, err := url.Parse(endpoint)
if err != nil {
return []byte(fmt.Sprintf("cannot parse API URL: %s", err))
}
scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS)
if err != nil {
return []byte(fmt.Sprintf("could not collect scenarios: %s", err))
@ -187,6 +208,7 @@ func collectAPIStatus(login string, password string, endpoint string, prefix str
if err != nil {
return []byte(fmt.Sprintf("could not init client: %s", err))
}
t := models.WatcherAuthRequest{
MachineID: &login,
Password: &pwd,
@ -203,6 +225,7 @@ func collectAPIStatus(login string, password string, endpoint string, prefix str
func collectCrowdsecConfig() []byte {
log.Info("Collecting crowdsec config")
config, err := os.ReadFile(*csConfig.FilePath)
if err != nil {
return []byte(fmt.Sprintf("could not read config file: %s", err))
@ -215,15 +238,18 @@ func collectCrowdsecConfig() []byte {
func collectCrowdsecProfile() []byte {
log.Info("Collecting crowdsec profile")
config, err := os.ReadFile(csConfig.API.Server.ProfilesPath)
if err != nil {
return []byte(fmt.Sprintf("could not read profile file: %s", err))
}
return config
}
func collectAcquisitionConfig() map[string][]byte {
log.Info("Collecting acquisition config")
ret := make(map[string][]byte)
for _, filename := range csConfig.Crowdsec.AcquisitionFiles {
@ -285,7 +311,7 @@ cscli support dump -f /tmp/crowdsec-support.zip
`,
Args: cobra.NoArgs,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
Run: func(_ *cobra.Command, _ []string) {
var err error
var skipHub, skipDB, skipCAPI, skipLAPI, skipAgent bool
infos := map[string][]byte{
@ -305,13 +331,13 @@ cscli support dump -f /tmp/crowdsec-support.zip
infos[SUPPORT_AGENTS_PATH] = []byte(err.Error())
}
if err := csConfig.LoadAPIServer(); err != nil {
if err = csConfig.LoadAPIServer(true); err != nil {
log.Warnf("could not load LAPI, skipping CAPI check")
skipLAPI = true
infos[SUPPORT_CAPI_STATUS_PATH] = []byte(err.Error())
}
if err := csConfig.LoadCrowdsec(); err != nil {
if err = csConfig.LoadCrowdsec(); err != nil {
log.Warnf("could not load agent config, skipping crowdsec config check")
skipAgent = true
}
@ -397,7 +423,6 @@ cscli support dump -f /tmp/crowdsec-support.zip
}
if !skipAgent {
acquis := collectAcquisitionConfig()
for filename, content := range acquis {

View file

@ -8,7 +8,6 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/crowdsecurity/crowdsec/pkg/database"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
@ -26,6 +25,7 @@ func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value *
return fmt.Errorf("%s isn't a valid range", *ipRange)
}
}
if *ip != "" {
ipRepr := net.ParseIP(*ip)
if ipRepr == nil {
@ -33,7 +33,7 @@ func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value *
}
}
//avoid confusion on scope (ip vs Ip and range vs Range)
// avoid confusion on scope (ip vs Ip and range vs Range)
switch strings.ToLower(*scope) {
case "ip":
*scope = types.Ip
@ -44,18 +44,8 @@ func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value *
case "as":
*scope = types.AS
}
return nil
}
func getDBClient() (*database.Client, error) {
if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
return nil, err
}
ret, err := database.NewClient(csConfig.DbConfig)
if err != nil {
return nil, err
}
return ret, nil
return nil
}
func removeFromSlice(val string, slice []string) []string {

View file

@ -56,7 +56,8 @@ func initAPIServer(cConfig *csconfig.Config) (*apiserver.APIServer, error) {
return apiServer, nil
}
func serveAPIServer(apiServer *apiserver.APIServer, apiReady chan bool) {
func serveAPIServer(apiServer *apiserver.APIServer) {
apiReady := make(chan bool, 1)
apiTomb.Go(func() error {
defer trace.CatchPanic("crowdsec/serveAPIServer")
go func() {
@ -80,6 +81,7 @@ func serveAPIServer(apiServer *apiserver.APIServer, apiReady chan bool) {
}
return nil
})
<-apiReady
}
func hasPlugins(profiles []*csconfig.ProfileCfg) bool {

View file

@ -1,6 +1,7 @@
package main
import (
"context"
"fmt"
"os"
"path/filepath"
@ -13,8 +14,8 @@ import (
"github.com/crowdsecurity/go-cs-lib/trace"
"github.com/crowdsecurity/crowdsec/pkg/acquisition"
"github.com/crowdsecurity/crowdsec/pkg/appsec"
"github.com/crowdsecurity/crowdsec/pkg/alertcontext"
"github.com/crowdsecurity/crowdsec/pkg/appsec"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
@ -56,63 +57,86 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H
//start go-routines for parsing, buckets pour and outputs.
parserWg := &sync.WaitGroup{}
parsersTomb.Go(func() error {
parserWg.Add(1)
for i := 0; i < cConfig.Crowdsec.ParserRoutinesCount; i++ {
parsersTomb.Go(func() error {
defer trace.CatchPanic("crowdsec/runParse")
if err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes); err != nil { //this error will never happen as parser.Parse is not able to return errors
log.Fatalf("starting parse error : %s", err)
return err
}
return nil
})
}
parserWg.Done()
return nil
})
parserWg.Wait()
bucketWg := &sync.WaitGroup{}
bucketsTomb.Go(func() error {
bucketWg.Add(1)
/*restore previous state as well if present*/
if cConfig.Crowdsec.BucketStateFile != "" {
log.Warningf("Restoring buckets state from %s", cConfig.Crowdsec.BucketStateFile)
if err := leaky.LoadBucketsState(cConfig.Crowdsec.BucketStateFile, buckets, holders); err != nil {
return fmt.Errorf("unable to restore buckets : %s", err)
return fmt.Errorf("unable to restore buckets: %w", err)
}
}
for i := 0; i < cConfig.Crowdsec.BucketsRoutinesCount; i++ {
bucketsTomb.Go(func() error {
defer trace.CatchPanic("crowdsec/runPour")
if err := runPour(inputEventChan, holders, buckets, cConfig); err != nil {
log.Fatalf("starting pour error : %s", err)
return err
}
return nil
})
}
bucketWg.Done()
return nil
})
bucketWg.Wait()
apiClient, err := AuthenticatedLAPIClient(*cConfig.API.Client.Credentials, hub)
if err != nil {
return err
}
log.Debugf("Starting HeartBeat service")
apiClient.HeartBeat.StartHeartBeat(context.Background(), &outputsTomb)
outputWg := &sync.WaitGroup{}
outputsTomb.Go(func() error {
outputWg.Add(1)
for i := 0; i < cConfig.Crowdsec.OutputRoutinesCount; i++ {
outputsTomb.Go(func() error {
defer trace.CatchPanic("crowdsec/runOutput")
if err := runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, *cConfig.API.Client.Credentials, hub); err != nil {
if err := runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, apiClient); err != nil {
log.Fatalf("starting outputs error : %s", err)
return err
}
return nil
})
}
outputWg.Done()
return nil
})
outputWg.Wait()
@ -122,16 +146,16 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H
if cConfig.Prometheus.Level == "aggregated" {
aggregated = true
}
if err := acquisition.GetMetrics(dataSources, aggregated); err != nil {
return fmt.Errorf("while fetching prometheus metrics for datasources: %w", err)
}
}
log.Info("Starting processing data")
if err := acquisition.StartAcquisition(dataSources, inputLineChan, &acquisTomb); err != nil {
log.Fatalf("starting acquisition error : %s", err)
return err
return fmt.Errorf("starting acquisition error: %w", err)
}
return nil
@ -140,11 +164,13 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H
func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub.Hub, agentReady chan bool) {
crowdsecTomb.Go(func() error {
defer trace.CatchPanic("crowdsec/serveCrowdsec")
go func() {
defer trace.CatchPanic("crowdsec/runCrowdsec")
// this logs every time, even at config reload
log.Debugf("running agent after %s ms", time.Since(crowdsecT0))
agentReady <- true
if err := runCrowdsec(cConfig, parsers, hub); err != nil {
log.Fatalf("unable to start crowdsec routines: %s", err)
}
@ -156,16 +182,20 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub
*/
waitOnTomb()
log.Debugf("Shutting down crowdsec routines")
if err := ShutdownCrowdsecRoutines(); err != nil {
log.Fatalf("unable to shutdown crowdsec routines: %s", err)
}
log.Debugf("everything is dead, return crowdsecTomb")
if dumpStates {
dumpParserState()
dumpOverflowState()
dumpBucketsPour()
os.Exit(0)
}
return nil
})
}
@ -175,55 +205,65 @@ func dumpBucketsPour() {
if err != nil {
log.Fatalf("open: %s", err)
}
out, err := yaml.Marshal(leaky.BucketPourCache)
if err != nil {
log.Fatalf("marshal: %s", err)
}
b, err := fd.Write(out)
if err != nil {
log.Fatalf("write: %s", err)
}
log.Tracef("wrote %d bytes", b)
if err := fd.Close(); err != nil {
log.Fatalf(" close: %s", err)
}
}
func dumpParserState() {
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
if err != nil {
log.Fatalf("open: %s", err)
}
out, err := yaml.Marshal(parser.StageParseCache)
if err != nil {
log.Fatalf("marshal: %s", err)
}
b, err := fd.Write(out)
if err != nil {
log.Fatalf("write: %s", err)
}
log.Tracef("wrote %d bytes", b)
if err := fd.Close(); err != nil {
log.Fatalf(" close: %s", err)
}
}
func dumpOverflowState() {
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
if err != nil {
log.Fatalf("open: %s", err)
}
out, err := yaml.Marshal(bucketOverflows)
if err != nil {
log.Fatalf("marshal: %s", err)
}
b, err := fd.Write(out)
if err != nil {
log.Fatalf("write: %s", err)
}
log.Tracef("wrote %d bytes", b)
if err := fd.Close(); err != nil {
log.Fatalf(" close: %s", err)
}

View file

@ -0,0 +1,92 @@
package main
import (
"context"
"fmt"
"net/url"
"time"
"github.com/go-openapi/strfmt"
"github.com/crowdsecurity/go-cs-lib/version"
"github.com/crowdsecurity/crowdsec/pkg/apiclient"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
"github.com/crowdsecurity/crowdsec/pkg/models"
)
func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub.Hub) (*apiclient.ApiClient, error) {
scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS)
if err != nil {
return nil, fmt.Errorf("loading list of installed hub scenarios: %w", err)
}
appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES)
if err != nil {
return nil, fmt.Errorf("loading list of installed hub appsec rules: %w", err)
}
installedScenariosAndAppsecRules := make([]string, 0, len(scenarios)+len(appsecRules))
installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, scenarios...)
installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, appsecRules...)
apiURL, err := url.Parse(credentials.URL)
if err != nil {
return nil, fmt.Errorf("parsing api url ('%s'): %w", credentials.URL, err)
}
papiURL, err := url.Parse(credentials.PapiURL)
if err != nil {
return nil, fmt.Errorf("parsing polling api url ('%s'): %w", credentials.PapiURL, err)
}
password := strfmt.Password(credentials.Password)
client, err := apiclient.NewClient(&apiclient.Config{
MachineID: credentials.Login,
Password: password,
Scenarios: installedScenariosAndAppsecRules,
UserAgent: fmt.Sprintf("crowdsec/%s", version.String()),
URL: apiURL,
PapiURL: papiURL,
VersionPrefix: "v1",
UpdateScenario: func() ([]string, error) {
scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS)
if err != nil {
return nil, err
}
appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES)
if err != nil {
return nil, err
}
ret := make([]string, 0, len(scenarios)+len(appsecRules))
ret = append(ret, scenarios...)
ret = append(ret, appsecRules...)
return ret, nil
},
})
if err != nil {
return nil, fmt.Errorf("new client api: %w", err)
}
authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{
MachineID: &credentials.Login,
Password: &password,
Scenarios: installedScenariosAndAppsecRules,
})
if err != nil {
return nil, fmt.Errorf("authenticate watcher (%s): %w", credentials.Login, err)
}
var expiration time.Time
if err := expiration.UnmarshalText([]byte(authResp.Expire)); err != nil {
return nil, fmt.Errorf("unable to parse jwt expiration: %w", err)
}
client.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token
client.GetClient().Transport.(*apiclient.JWTTransport).Expiration = expiration
return client, nil
}

View file

@ -262,7 +262,7 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo
}
if !cConfig.DisableAPI {
if err := cConfig.LoadAPIServer(); err != nil {
if err := cConfig.LoadAPIServer(false); err != nil {
return nil, err
}
}

View file

@ -114,13 +114,17 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha
}
decisionsFilters := make(map[string][]string, 0)
decisions, err := dbClient.QueryDecisionCountByScenario(decisionsFilters)
if err != nil {
log.Errorf("Error querying decisions for metrics: %v", err)
next.ServeHTTP(w, r)
return
}
globalActiveDecisions.Reset()
for _, d := range decisions {
globalActiveDecisions.With(prometheus.Labels{"reason": d.Scenario, "origin": d.Origin, "action": d.Type}).Set(float64(d.Count))
}
@ -136,6 +140,7 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha
if err != nil {
log.Errorf("Error querying alerts for metrics: %v", err)
next.ServeHTTP(w, r)
return
}
@ -161,7 +166,7 @@ func registerPrometheus(config *csconfig.PrometheusCfg) {
leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow,
v1.LapiRouteHits,
leaky.BucketsCurrentCount,
cache.CacheMetrics, exprhelpers.RegexpCacheMetrics,
cache.CacheMetrics, exprhelpers.RegexpCacheMetrics, parser.NodesWlHitsOk, parser.NodesWlHits,
)
} else {
log.Infof("Loading prometheus collectors")
@ -170,14 +175,15 @@ func registerPrometheus(config *csconfig.PrometheusCfg) {
globalCsInfo, globalParsingHistogram, globalPourHistogram,
v1.LapiRouteHits, v1.LapiMachineHits, v1.LapiBouncerHits, v1.LapiNilDecisions, v1.LapiNonNilDecisions, v1.LapiResponseTime,
leaky.BucketsPour, leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow, leaky.BucketsCurrentCount,
globalActiveDecisions, globalAlerts,
globalActiveDecisions, globalAlerts, parser.NodesWlHitsOk, parser.NodesWlHits,
cache.CacheMetrics, exprhelpers.RegexpCacheMetrics,
)
}
}
func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client, apiReady chan bool, agentReady chan bool) {
func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client, agentReady chan bool) {
<-agentReady
if !config.Enabled {
return
}
@ -185,9 +191,8 @@ func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client,
defer trace.CatchPanic("crowdsec/servePrometheus")
http.Handle("/metrics", computeDynamicMetrics(promhttp.Handler(), dbClient))
<-apiReady
<-agentReady
log.Debugf("serving metrics after %s ms", time.Since(crowdsecT0))
if err := http.ListenAndServe(fmt.Sprintf("%s:%d", config.ListenAddr, config.ListenPort), nil); err != nil {
log.Warningf("prometheus: %s", err)
}

View file

@ -3,18 +3,12 @@ package main
import (
"context"
"fmt"
"net/url"
"sync"
"time"
"github.com/go-openapi/strfmt"
log "github.com/sirupsen/logrus"
"github.com/crowdsecurity/go-cs-lib/version"
"github.com/crowdsecurity/crowdsec/pkg/apiclient"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
"github.com/crowdsecurity/crowdsec/pkg/models"
"github.com/crowdsecurity/crowdsec/pkg/parser"
@ -22,7 +16,6 @@ import (
)
func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) {
var dedupCache []*models.Alert
for idx, alert := range alerts {
@ -32,16 +25,21 @@ func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) {
dedupCache = append(dedupCache, alert.Alert)
continue
}
for k, src := range alert.Sources {
refsrc := *alert.Alert //copy
log.Tracef("source[%s]", k)
refsrc.Source = &src
dedupCache = append(dedupCache, &refsrc)
}
}
if len(dedupCache) != len(alerts) {
log.Tracef("went from %d to %d alerts", len(alerts), len(dedupCache))
}
return dedupCache, nil
}
@ -52,93 +50,25 @@ func PushAlerts(alerts []types.RuntimeAlert, client *apiclient.ApiClient) error
if err != nil {
return fmt.Errorf("failed to transform alerts for api: %w", err)
}
_, _, err = client.Alerts.Add(ctx, alertsToPush)
if err != nil {
return fmt.Errorf("failed sending alert to LAPI: %w", err)
}
return nil
}
var bucketOverflows []types.Event
func runOutput(input chan types.Event, overflow chan types.Event, buckets *leaky.Buckets,
postOverflowCTX parser.UnixParserCtx, postOverflowNodes []parser.Node,
apiConfig csconfig.ApiCredentialsCfg, hub *cwhub.Hub) error {
func runOutput(input chan types.Event, overflow chan types.Event, buckets *leaky.Buckets, postOverflowCTX parser.UnixParserCtx,
postOverflowNodes []parser.Node, client *apiclient.ApiClient) error {
var (
cache []types.RuntimeAlert
cacheMutex sync.Mutex
)
var err error
ticker := time.NewTicker(1 * time.Second)
var cache []types.RuntimeAlert
var cacheMutex sync.Mutex
scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS)
if err != nil {
return fmt.Errorf("loading list of installed hub scenarios: %w", err)
}
appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES)
if err != nil {
return fmt.Errorf("loading list of installed hub appsec rules: %w", err)
}
installedScenariosAndAppsecRules := make([]string, 0, len(scenarios)+len(appsecRules))
installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, scenarios...)
installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, appsecRules...)
apiURL, err := url.Parse(apiConfig.URL)
if err != nil {
return fmt.Errorf("parsing api url ('%s'): %w", apiConfig.URL, err)
}
papiURL, err := url.Parse(apiConfig.PapiURL)
if err != nil {
return fmt.Errorf("parsing polling api url ('%s'): %w", apiConfig.PapiURL, err)
}
password := strfmt.Password(apiConfig.Password)
Client, err := apiclient.NewClient(&apiclient.Config{
MachineID: apiConfig.Login,
Password: password,
Scenarios: installedScenariosAndAppsecRules,
UserAgent: fmt.Sprintf("crowdsec/%s", version.String()),
URL: apiURL,
PapiURL: papiURL,
VersionPrefix: "v1",
UpdateScenario: func() ([]string, error) {
scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS)
if err != nil {
return nil, err
}
appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES)
if err != nil {
return nil, err
}
ret := make([]string, 0, len(scenarios)+len(appsecRules))
ret = append(ret, scenarios...)
ret = append(ret, appsecRules...)
return ret, nil
},
})
if err != nil {
return fmt.Errorf("new client api: %w", err)
}
authResp, _, err := Client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{
MachineID: &apiConfig.Login,
Password: &password,
Scenarios: installedScenariosAndAppsecRules,
})
if err != nil {
return fmt.Errorf("authenticate watcher (%s): %w", apiConfig.Login, err)
}
if err := Client.GetClient().Transport.(*apiclient.JWTTransport).Expiration.UnmarshalText([]byte(authResp.Expire)); err != nil {
return fmt.Errorf("unable to parse jwt expiration: %w", err)
}
Client.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token
//start the heartbeat service
log.Debugf("Starting HeartBeat service")
Client.HeartBeat.StartHeartBeat(context.Background(), &outputsTomb)
LOOP:
for {
select {
@ -149,7 +79,7 @@ LOOP:
newcache := make([]types.RuntimeAlert, 0)
cache = newcache
cacheMutex.Unlock()
if err := PushAlerts(cachecopy, Client); err != nil {
if err := PushAlerts(cachecopy, client); err != nil {
log.Errorf("while pushing to api : %s", err)
//just push back the events to the queue
cacheMutex.Lock()
@ -162,10 +92,11 @@ LOOP:
cacheMutex.Lock()
cachecopy := cache
cacheMutex.Unlock()
if err := PushAlerts(cachecopy, Client); err != nil {
if err := PushAlerts(cachecopy, client); err != nil {
log.Errorf("while pushing leftovers to api : %s", err)
}
}
break LOOP
case event := <-overflow:
/*if alert is empty and mapKey is present, the overflow is just to cleanup bucket*/
@ -176,7 +107,7 @@ LOOP:
/* process post overflow parser nodes */
event, err := parser.Parse(postOverflowCTX, event, postOverflowNodes)
if err != nil {
return fmt.Errorf("postoverflow failed : %s", err)
return fmt.Errorf("postoverflow failed: %w", err)
}
log.Printf("%s", *event.Overflow.Alert.Message)
//if the Alert is nil, it's to signal bucket is ready for GC, don't track this
@ -206,6 +137,6 @@ LOOP:
}
ticker.Stop()
return nil
return nil
}

View file

@ -33,7 +33,6 @@ func StartRunSvc() error {
log.Infof("Crowdsec %s", version.String())
apiReady := make(chan bool, 1)
agentReady := make(chan bool, 1)
// Enable profiling early
@ -46,14 +45,19 @@ func StartRunSvc() error {
dbClient, err = database.NewClient(cConfig.DbConfig)
if err != nil {
return fmt.Errorf("unable to create database client: %s", err)
return fmt.Errorf("unable to create database client: %w", err)
}
}
registerPrometheus(cConfig.Prometheus)
go servePrometheus(cConfig.Prometheus, dbClient, apiReady, agentReady)
go servePrometheus(cConfig.Prometheus, dbClient, agentReady)
} else {
// avoid leaking the channel
go func() {
<-agentReady
}()
}
return Serve(cConfig, apiReady, agentReady)
return Serve(cConfig, agentReady)
}

View file

@ -73,7 +73,6 @@ func WindowsRun() error {
log.Infof("Crowdsec %s", version.String())
apiReady := make(chan bool, 1)
agentReady := make(chan bool, 1)
// Enable profiling early
@ -85,11 +84,11 @@ func WindowsRun() error {
dbClient, err = database.NewClient(cConfig.DbConfig)
if err != nil {
return fmt.Errorf("unable to create database client: %s", err)
return fmt.Errorf("unable to create database client: %w", err)
}
}
registerPrometheus(cConfig.Prometheus)
go servePrometheus(cConfig.Prometheus, dbClient, apiReady, agentReady)
go servePrometheus(cConfig.Prometheus, dbClient, agentReady)
}
return Serve(cConfig, apiReady, agentReady)
return Serve(cConfig, agentReady)
}

View file

@ -42,7 +42,9 @@ func debugHandler(sig os.Signal, cConfig *csconfig.Config) error {
if err := leaky.ShutdownAllBuckets(buckets); err != nil {
log.Warningf("Failed to shut down routines : %s", err)
}
log.Printf("Shutdown is finished, buckets are in %s", tmpFile)
return nil
}
@ -66,15 +68,16 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) {
if !cConfig.DisableAPI {
if flags.DisableCAPI {
log.Warningf("Communication with CrowdSec Central API disabled from args")
cConfig.API.Server.OnlineClient = nil
}
apiServer, err := initAPIServer(cConfig)
if err != nil {
return nil, fmt.Errorf("unable to init api server: %w", err)
}
apiReady := make(chan bool, 1)
serveAPIServer(apiServer, apiReady)
serveAPIServer(apiServer)
}
if !cConfig.DisableAgent {
@ -110,6 +113,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) {
log.Warningf("Failed to delete temp file (%s) : %s", tmpFile, err)
}
}
return cConfig, nil
}
@ -117,10 +121,12 @@ func ShutdownCrowdsecRoutines() error {
var reterr error
log.Debugf("Shutting down crowdsec sub-routines")
if len(dataSources) > 0 {
acquisTomb.Kill(nil)
log.Debugf("waiting for acquisition to finish")
drainChan(inputLineChan)
if err := acquisTomb.Wait(); err != nil {
log.Warningf("Acquisition returned error : %s", err)
reterr = err
@ -130,6 +136,7 @@ func ShutdownCrowdsecRoutines() error {
log.Debugf("acquisition is finished, wait for parser/bucket/ouputs.")
parsersTomb.Kill(nil)
drainChan(inputEventChan)
if err := parsersTomb.Wait(); err != nil {
log.Warningf("Parsers returned error : %s", err)
reterr = err
@ -160,6 +167,7 @@ func ShutdownCrowdsecRoutines() error {
log.Warningf("Outputs returned error : %s", err)
reterr = err
}
log.Debugf("outputs are done")
case <-time.After(3 * time.Second):
// this can happen if outputs are stuck in a http retry loop
@ -181,6 +189,7 @@ func shutdownAPI() error {
}
log.Debugf("done")
return nil
}
@ -193,6 +202,7 @@ func shutdownCrowdsec() error {
}
log.Debugf("done")
return nil
}
@ -292,10 +302,11 @@ func HandleSignals(cConfig *csconfig.Config) error {
if err == nil {
log.Warning("Crowdsec service shutting down")
}
return err
}
func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) error {
func Serve(cConfig *csconfig.Config, agentReady chan bool) error {
acquisTomb = tomb.Tomb{}
parsersTomb = tomb.Tomb{}
bucketsTomb = tomb.Tomb{}
@ -325,6 +336,7 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e
if cConfig.API.CTI != nil && *cConfig.API.CTI.Enabled {
log.Infof("Crowdsec CTI helper enabled")
if err := exprhelpers.InitCrowdsecCTI(cConfig.API.CTI.Key, cConfig.API.CTI.CacheTimeout, cConfig.API.CTI.CacheSize, cConfig.API.CTI.LogLevel); err != nil {
return fmt.Errorf("failed to init crowdsec cti: %w", err)
}
@ -337,6 +349,7 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e
if flags.DisableCAPI {
log.Warningf("Communication with CrowdSec Central API disabled from args")
cConfig.API.Server.OnlineClient = nil
}
@ -346,10 +359,8 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e
}
if !flags.TestMode {
serveAPIServer(apiServer, apiReady)
serveAPIServer(apiServer)
}
} else {
apiReady <- true
}
if !cConfig.DisableAgent {
@ -366,6 +377,8 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e
// if it's just linting, we're done
if !flags.TestMode {
serveCrowdsec(csParsers, cConfig, hub, agentReady)
} else {
agentReady <- true
}
} else {
agentReady <- true
@ -395,6 +408,7 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e
for _, ch := range waitChans {
<-ch
switch ch {
case apiTomb.Dead():
log.Infof("api shutdown")
@ -402,5 +416,6 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e
log.Infof("crowdsec shutdown")
}
}
return nil
}

View file

@ -23,7 +23,7 @@ type crowdsec_winservice struct {
config *csconfig.Config
}
func (m *crowdsec_winservice) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) {
func (m *crowdsec_winservice) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (bool, uint32) {
const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown
changes <- svc.Status{State: svc.StartPending}
tick := time.Tick(500 * time.Millisecond)
@ -59,7 +59,8 @@ func (m *crowdsec_winservice) Execute(args []string, r <-chan svc.ChangeRequest,
if err != nil {
log.Fatal(err)
}
return
return false, 0
}
func runService(name string) error {

View file

@ -316,6 +316,7 @@ config.yaml) each time the container is run.
| `BOUNCERS_ALLOWED_OU` | bouncer-ou | OU values allowed for bouncers, separated by comma |
| | | |
| __Hub management__ | | |
| `NO_HUB_UPGRADE` | false | Skip hub update / upgrade when the container starts |
| `COLLECTIONS` | | Collections to install, separated by space: `-e COLLECTIONS="crowdsecurity/linux crowdsecurity/apache2"` |
| `PARSERS` | | Parsers to install, separated by space |
| `SCENARIOS` | | Scenarios to install, separated by space |

View file

@ -109,6 +109,8 @@ cscli_if_clean() {
for obj in $objs; do
if cscli "$itemtype" inspect "$obj" -o json | yq -e '.tainted // false' >/dev/null 2>&1; then
echo "Object $itemtype/$obj is tainted, skipping"
elif cscli "$itemtype" inspect "$obj" -o json | yq -e '.local // false' >/dev/null 2>&1; then
echo "Object $itemtype/$obj is local, skipping"
else
# # Too verbose? Only show errors if not in debug mode
# if [ "$DEBUG" != "true" ]; then
@ -301,10 +303,13 @@ fi
conf_set_if "$PLUGIN_DIR" '.config_paths.plugin_dir = strenv(PLUGIN_DIR)'
## Install hub items
cscli hub update
cscli_if_clean collections upgrade crowdsecurity/linux
cscli_if_clean parsers upgrade crowdsecurity/whitelists
cscli hub update || true
if isfalse "$NO_HUB_UPGRADE"; then
cscli hub upgrade || true
fi
cscli_if_clean parsers install crowdsecurity/docker-logs
cscli_if_clean parsers install crowdsecurity/cri-logs

View file

@ -1,7 +1,7 @@
[packages]
pytest-dotenv = "0.5.2"
pytest-xdist = "3.5.0"
pytest-cs = {ref = "0.7.18", git = "https://github.com/crowdsecurity/pytest-cs.git"}
pytest-cs = {ref = "0.7.19", git = "https://github.com/crowdsecurity/pytest-cs.git"}
[dev-packages]
gnureadline = "8.1.2"

142
docker/test/Pipfile.lock generated
View file

@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
"sha256": "575cb97d0b7fb66caf843191b843724307f7bc39c3c160f22330ba38ee055c80"
"sha256": "b5d25a7199d15a900b285be1af97cf7b7083c6637d631ad777b454471c8319fe"
},
"pipfile-spec": 6,
"requires": {
@ -79,7 +79,7 @@
"sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956",
"sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"
],
"markers": "python_version >= '3.8'",
"markers": "platform_python_implementation != 'PyPy'",
"version": "==1.16.0"
},
"charset-normalizer": {
@ -180,32 +180,41 @@
},
"cryptography": {
"hashes": [
"sha256:079b85658ea2f59c4f43b70f8119a52414cdb7be34da5d019a77bf96d473b960",
"sha256:09616eeaef406f99046553b8a40fbf8b1e70795a91885ba4c96a70793de5504a",
"sha256:13f93ce9bea8016c253b34afc6bd6a75993e5c40672ed5405a9c832f0d4a00bc",
"sha256:37a138589b12069efb424220bf78eac59ca68b95696fc622b6ccc1c0a197204a",
"sha256:3c78451b78313fa81607fa1b3f1ae0a5ddd8014c38a02d9db0616133987b9cdf",
"sha256:43f2552a2378b44869fe8827aa19e69512e3245a219104438692385b0ee119d1",
"sha256:48a0476626da912a44cc078f9893f292f0b3e4c739caf289268168d8f4702a39",
"sha256:49f0805fc0b2ac8d4882dd52f4a3b935b210935d500b6b805f321addc8177406",
"sha256:5429ec739a29df2e29e15d082f1d9ad683701f0ec7709ca479b3ff2708dae65a",
"sha256:5a1b41bc97f1ad230a41657d9155113c7521953869ae57ac39ac7f1bb471469a",
"sha256:68a2dec79deebc5d26d617bfdf6e8aab065a4f34934b22d3b5010df3ba36612c",
"sha256:7a698cb1dac82c35fcf8fe3417a3aaba97de16a01ac914b89a0889d364d2f6be",
"sha256:841df4caa01008bad253bce2a6f7b47f86dc9f08df4b433c404def869f590a15",
"sha256:90452ba79b8788fa380dfb587cca692976ef4e757b194b093d845e8d99f612f2",
"sha256:928258ba5d6f8ae644e764d0f996d61a8777559f72dfeb2eea7e2fe0ad6e782d",
"sha256:af03b32695b24d85a75d40e1ba39ffe7db7ffcb099fe507b39fd41a565f1b157",
"sha256:b640981bf64a3e978a56167594a0e97db71c89a479da8e175d8bb5be5178c003",
"sha256:c5ca78485a255e03c32b513f8c2bc39fedb7f5c5f8535545bdc223a03b24f248",
"sha256:c7f3201ec47d5207841402594f1d7950879ef890c0c495052fa62f58283fde1a",
"sha256:d5ec85080cce7b0513cfd233914eb8b7bbd0633f1d1703aa28d1dd5a72f678ec",
"sha256:d6c391c021ab1f7a82da5d8d0b3cee2f4b2c455ec86c8aebbc84837a631ff309",
"sha256:e3114da6d7f95d2dee7d3f4eec16dacff819740bbab931aff8648cb13c5ff5e7",
"sha256:f983596065a18a2183e7f79ab3fd4c475205b839e02cbc0efbbf9666c4b3083d"
"sha256:087887e55e0b9c8724cf05361357875adb5c20dec27e5816b653492980d20380",
"sha256:09a77e5b2e8ca732a19a90c5bca2d124621a1edb5438c5daa2d2738bfeb02589",
"sha256:130c0f77022b2b9c99d8cebcdd834d81705f61c68e91ddd614ce74c657f8b3ea",
"sha256:141e2aa5ba100d3788c0ad7919b288f89d1fe015878b9659b307c9ef867d3a65",
"sha256:28cb2c41f131a5758d6ba6a0504150d644054fd9f3203a1e8e8d7ac3aea7f73a",
"sha256:2f9f14185962e6a04ab32d1abe34eae8a9001569ee4edb64d2304bf0d65c53f3",
"sha256:320948ab49883557a256eab46149df79435a22d2fefd6a66fe6946f1b9d9d008",
"sha256:36d4b7c4be6411f58f60d9ce555a73df8406d484ba12a63549c88bd64f7967f1",
"sha256:3b15c678f27d66d247132cbf13df2f75255627bcc9b6a570f7d2fd08e8c081d2",
"sha256:3dbd37e14ce795b4af61b89b037d4bc157f2cb23e676fa16932185a04dfbf635",
"sha256:4383b47f45b14459cab66048d384614019965ba6c1a1a141f11b5a551cace1b2",
"sha256:44c95c0e96b3cb628e8452ec060413a49002a247b2b9938989e23a2c8291fc90",
"sha256:4b063d3413f853e056161eb0c7724822a9740ad3caa24b8424d776cebf98e7ee",
"sha256:52ed9ebf8ac602385126c9a2fe951db36f2cb0c2538d22971487f89d0de4065a",
"sha256:55d1580e2d7e17f45d19d3b12098e352f3a37fe86d380bf45846ef257054b242",
"sha256:5ef9bc3d046ce83c4bbf4c25e1e0547b9c441c01d30922d812e887dc5f125c12",
"sha256:5fa82a26f92871eca593b53359c12ad7949772462f887c35edaf36f87953c0e2",
"sha256:61321672b3ac7aade25c40449ccedbc6db72c7f5f0fdf34def5e2f8b51ca530d",
"sha256:701171f825dcab90969596ce2af253143b93b08f1a716d4b2a9d2db5084ef7be",
"sha256:841ec8af7a8491ac76ec5a9522226e287187a3107e12b7d686ad354bb78facee",
"sha256:8a06641fb07d4e8f6c7dda4fc3f8871d327803ab6542e33831c7ccfdcb4d0ad6",
"sha256:8e88bb9eafbf6a4014d55fb222e7360eef53e613215085e65a13290577394529",
"sha256:a00aee5d1b6c20620161984f8ab2ab69134466c51f58c052c11b076715e72929",
"sha256:a047682d324ba56e61b7ea7c7299d51e61fd3bca7dad2ccc39b72bd0118d60a1",
"sha256:a7ef8dd0bf2e1d0a27042b231a3baac6883cdd5557036f5e8df7139255feaac6",
"sha256:ad28cff53f60d99a928dfcf1e861e0b2ceb2bc1f08a074fdd601b314e1cc9e0a",
"sha256:b9097a208875fc7bbeb1286d0125d90bdfed961f61f214d3f5be62cd4ed8a446",
"sha256:b97fe7d7991c25e6a31e5d5e795986b18fbbb3107b873d5f3ae6dc9a103278e9",
"sha256:e0ec52ba3c7f1b7d813cd52649a5b3ef1fc0d433219dc8c93827c57eab6cf888",
"sha256:ea2c3ffb662fec8bbbfce5602e2c159ff097a4631d96235fcf0fb00e59e3ece4",
"sha256:fa3dec4ba8fb6e662770b74f62f1a0c7d4e37e25b58b2bf2c1be4c95372b4a33",
"sha256:fbeb725c9dc799a574518109336acccaf1303c30d45c075c665c0793c2f79a7f"
],
"markers": "python_version >= '3.7'",
"version": "==41.0.7"
"version": "==42.0.2"
},
"docker": {
"hashes": [
@ -249,33 +258,33 @@
},
"pluggy": {
"hashes": [
"sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12",
"sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"
"sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981",
"sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"
],
"markers": "python_version >= '3.8'",
"version": "==1.3.0"
"version": "==1.4.0"
},
"psutil": {
"hashes": [
"sha256:032f4f2c909818c86cea4fe2cc407f1c0f0cde8e6c6d702b28b8ce0c0d143340",
"sha256:0bd41bf2d1463dfa535942b2a8f0e958acf6607ac0be52265ab31f7923bcd5e6",
"sha256:1132704b876e58d277168cd729d64750633d5ff0183acf5b3c986b8466cd0284",
"sha256:1d4bc4a0148fdd7fd8f38e0498639ae128e64538faa507df25a20f8f7fb2341c",
"sha256:3c4747a3e2ead1589e647e64aad601981f01b68f9398ddf94d01e3dc0d1e57c7",
"sha256:3f02134e82cfb5d089fddf20bb2e03fd5cd52395321d1c8458a9e58500ff417c",
"sha256:44969859757f4d8f2a9bd5b76eba8c3099a2c8cf3992ff62144061e39ba8568e",
"sha256:4c03362e280d06bbbfcd52f29acd79c733e0af33d707c54255d21029b8b32ba6",
"sha256:5794944462509e49d4d458f4dbfb92c47539e7d8d15c796f141f474010084056",
"sha256:b27f8fdb190c8c03914f908a4555159327d7481dac2f01008d483137ef3311a9",
"sha256:c727ca5a9b2dd5193b8644b9f0c883d54f1248310023b5ad3e92036c5e2ada68",
"sha256:e469990e28f1ad738f65a42dcfc17adaed9d0f325d55047593cb9033a0ab63df",
"sha256:ea36cc62e69a13ec52b2f625c27527f6e4479bca2b340b7a452af55b34fcbe2e",
"sha256:f37f87e4d73b79e6c5e749440c3113b81d1ee7d26f21c19c47371ddea834f414",
"sha256:fe361f743cb3389b8efda21980d93eb55c1f1e3898269bc9a2a1d0bb7b1f6508",
"sha256:fe8b7f07948f1304497ce4f4684881250cd859b16d06a1dc4d7941eeb6233bfe"
"sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d",
"sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73",
"sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8",
"sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2",
"sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e",
"sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36",
"sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7",
"sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c",
"sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee",
"sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421",
"sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf",
"sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81",
"sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0",
"sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631",
"sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4",
"sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
"version": "==5.9.7"
"version": "==5.9.8"
},
"pycparser": {
"hashes": [
@ -286,15 +295,15 @@
},
"pytest": {
"hashes": [
"sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac",
"sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"
"sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c",
"sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6"
],
"markers": "python_version >= '3.7'",
"version": "==7.4.3"
"markers": "python_version >= '3.8'",
"version": "==8.0.0"
},
"pytest-cs": {
"git": "https://github.com/crowdsecurity/pytest-cs.git",
"ref": "df835beabc539be7f7f627b21caa0d6ad333daae"
"ref": "aea7e8549faa32f5e1d1f17755a5db3712396a2a"
},
"pytest-datadir": {
"hashes": [
@ -322,11 +331,11 @@
},
"python-dotenv": {
"hashes": [
"sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba",
"sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"
"sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca",
"sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"
],
"markers": "python_version >= '3.8'",
"version": "==1.0.0"
"version": "==1.0.1"
},
"pyyaml": {
"hashes": [
@ -359,6 +368,7 @@
"sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4",
"sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba",
"sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8",
"sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef",
"sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5",
"sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd",
"sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3",
@ -402,11 +412,11 @@
},
"urllib3": {
"hashes": [
"sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3",
"sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"
"sha256:051d961ad0c62a94e50ecf1af379c3aba230c66c710493493560c0c223c49f20",
"sha256:ce3711610ddce217e6d113a2732fafad960a03fd0318c91faa79481e35c11224"
],
"markers": "python_version >= '3.8'",
"version": "==2.1.0"
"version": "==2.2.0"
}
},
"develop": {
@ -476,11 +486,11 @@
},
"ipython": {
"hashes": [
"sha256:ca6f079bb33457c66e233e4580ebfc4128855b4cf6370dddd73842a9563e8a27",
"sha256:e8267419d72d81955ec1177f8a29aaa90ac80ad647499201119e2f05e99aa397"
"sha256:1050a3ab8473488d7eee163796b02e511d0735cf43a04ba2a8348bd0f2eaf8a5",
"sha256:48fbc236fbe0e138b88773fa0437751f14c3645fb483f1d4c5dee58b37e5ce73"
],
"markers": "python_version >= '3.11'",
"version": "==8.18.1"
"version": "==8.21.0"
},
"jedi": {
"hashes": [
@ -561,18 +571,18 @@
},
"traitlets": {
"hashes": [
"sha256:f14949d23829023013c47df20b4a76ccd1a85effb786dc060f34de7948361b33",
"sha256:fcdaa8ac49c04dfa0ed3ee3384ef6dfdb5d6f3741502be247279407679296772"
"sha256:2e5a030e6eff91737c643231bfcf04a65b0132078dad75e4936700b213652e74",
"sha256:8585105b371a04b8316a43d5ce29c098575c2e477850b62b848b964f1444527e"
],
"markers": "python_version >= '3.8'",
"version": "==5.14.0"
"version": "==5.14.1"
},
"wcwidth": {
"hashes": [
"sha256:f01c104efdf57971bcb756f054dd58ddec5204dd15fa31d6503ea57947d97c02",
"sha256:f26ec43d96c8cbfed76a5075dac87680124fa84e0855195a6184da9c187f133c"
"sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859",
"sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"
],
"version": "==0.2.12"
"version": "==0.2.13"
}
}
}

View file

@ -22,6 +22,7 @@ def test_cscli_lapi(crowdsec, flavor):
assert "You can successfully interact with Local API (LAPI)" in stdout
@pytest.mark.skip(reason="currently broken by hub upgrade")
def test_flavor_content(crowdsec, flavor):
"""Test flavor contents"""
with crowdsec(flavor=flavor) as cs:

View file

@ -241,7 +241,7 @@ def test_tls_mutual_split_lapi_agent(crowdsec, flavor, certs_dir):
assert "You can successfully interact with Local API (LAPI)" in stdout
def test_tls_client_ou(crowdsec, certs_dir):
def test_tls_client_ou(crowdsec, flavor, certs_dir):
"""Check behavior of client certificate vs AGENTS_ALLOWED_OU"""
rand = uuid.uuid1()
@ -270,8 +270,8 @@ def test_tls_client_ou(crowdsec, certs_dir):
certs_dir(lapi_hostname=lapiname, agent_ou='custom-client-ou'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'},
}
cs_lapi = crowdsec(name=lapiname, environment=lapi_env, volumes=volumes)
cs_agent = crowdsec(name=agentname, environment=agent_env, volumes=volumes)
cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes)
cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes)
with cs_lapi as lapi:
lapi.wait_for_log([
@ -300,8 +300,8 @@ def test_tls_client_ou(crowdsec, certs_dir):
certs_dir(lapi_hostname=lapiname, agent_ou='custom-client-ou'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'},
}
cs_lapi = crowdsec(name=lapiname, environment=lapi_env, volumes=volumes)
cs_agent = crowdsec(name=agentname, environment=agent_env, volumes=volumes)
cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes)
cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes)
with cs_lapi as lapi:
lapi.wait_for_log([

4
go.mod
View file

@ -77,7 +77,7 @@ require (
github.com/shirou/gopsutil/v3 v3.23.5
github.com/sirupsen/logrus v1.9.3
github.com/slack-go/slack v0.12.2
github.com/spf13/cobra v1.7.0
github.com/spf13/cobra v1.8.0
github.com/stretchr/testify v1.8.4
github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26
github.com/wasilibs/go-re2 v1.3.0
@ -108,7 +108,7 @@ require (
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/corazawaf/libinjection-go v0.1.2 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/creack/pty v1.1.18 // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect
github.com/docker/go-units v0.5.0 // indirect

16
go.sum
View file

@ -91,21 +91,17 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/crowdsecurity/coraza/v3 v3.0.0-20231213144607-41d5358da94f h1:FkOB9aDw0xzDd14pTarGRLsUNAymONq3dc7zhvsXElg=
github.com/crowdsecurity/coraza/v3 v3.0.0-20231213144607-41d5358da94f/go.mod h1:TrU7Li+z2RHNrPy0TKJ6R65V6Yzpan2sTIRryJJyJso=
github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h8clMcRL2u5ooZ3tmwnmJftmhb9Ws1MKmavvI=
github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA=
github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU=
github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk=
github.com/crowdsecurity/go-cs-lib v0.0.5 h1:eVLW+BRj3ZYn0xt5/xmgzfbbB8EBo32gM4+WpQQk2e8=
github.com/crowdsecurity/go-cs-lib v0.0.5/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k=
github.com/crowdsecurity/go-cs-lib v0.0.6 h1:Ef6MylXe0GaJE9vrfvxEdbHb31+JUP1os+murPz7Pos=
github.com/crowdsecurity/go-cs-lib v0.0.6/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k=
github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4=
@ -546,6 +542,8 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec=
@ -640,8 +638,8 @@ github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@ -809,8 +807,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

View file

@ -383,15 +383,17 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) {
w.InChan <- parsedRequest
/*
response is a copy of w.AppSecRuntime.Response that is safe to use.
As OutOfBand might still be running, the original one can be modified
*/
response := <-parsedRequest.ResponseChannel
statusCode := http.StatusOK
if response.InBandInterrupt {
statusCode = http.StatusForbidden
AppsecBlockCounter.With(prometheus.Labels{"source": parsedRequest.RemoteAddrNormalized, "appsec_engine": parsedRequest.AppsecEngine}).Inc()
}
appsecResponse := w.AppsecRuntime.GenerateResponse(response, logger)
statusCode, appsecResponse := w.AppsecRuntime.GenerateResponse(response, logger)
logger.Debugf("Response: %+v", appsecResponse)
rw.WriteHeader(statusCode)

View file

@ -226,7 +226,8 @@ func (r *AppsecRunner) handleInBandInterrupt(request *appsec.ParsedRequest) {
if in := request.Tx.Interruption(); in != nil {
r.logger.Debugf("inband rules matched : %d", in.RuleID)
r.AppsecRuntime.Response.InBandInterrupt = true
r.AppsecRuntime.Response.HTTPResponseCode = r.AppsecRuntime.Config.BlockedHTTPCode
r.AppsecRuntime.Response.BouncerHTTPResponseCode = r.AppsecRuntime.Config.BouncerBlockedHTTPCode
r.AppsecRuntime.Response.UserHTTPResponseCode = r.AppsecRuntime.Config.UserBlockedHTTPCode
r.AppsecRuntime.Response.Action = r.AppsecRuntime.DefaultRemediation
if _, ok := r.AppsecRuntime.RemediationById[in.RuleID]; ok {
@ -252,7 +253,9 @@ func (r *AppsecRunner) handleInBandInterrupt(request *appsec.ParsedRequest) {
r.logger.Errorf("unable to generate appsec event : %s", err)
return
}
r.outChan <- *appsecOvlfw
if appsecOvlfw != nil {
r.outChan <- *appsecOvlfw
}
}
// Should the in band match trigger an event ?

View file

@ -1,6 +1,7 @@
package appsecacquisition
import (
"net/http"
"net/url"
"testing"
"time"
@ -21,16 +22,21 @@ Missing tests (wip):
*/
type appsecRuleTest struct {
name string
expected_load_ok bool
inband_rules []appsec_rule.CustomRule
outofband_rules []appsec_rule.CustomRule
on_load []appsec.Hook
pre_eval []appsec.Hook
post_eval []appsec.Hook
on_match []appsec.Hook
input_request appsec.ParsedRequest
output_asserts func(events []types.Event, responses []appsec.AppsecTempResponse)
name string
expected_load_ok bool
inband_rules []appsec_rule.CustomRule
outofband_rules []appsec_rule.CustomRule
on_load []appsec.Hook
pre_eval []appsec.Hook
post_eval []appsec.Hook
on_match []appsec.Hook
BouncerBlockedHTTPCode int
UserBlockedHTTPCode int
UserPassedHTTPCode int
DefaultRemediation string
DefaultPassAction string
input_request appsec.ParsedRequest
output_asserts func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int)
}
func TestAppsecOnMatchHooks(t *testing.T) {
@ -53,13 +59,14 @@ func TestAppsecOnMatchHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, events, 2)
require.Equal(t, types.APPSEC, events[0].Type)
require.Equal(t, types.LOG, events[1].Type)
require.Len(t, responses, 1)
require.Equal(t, 403, responses[0].HTTPResponseCode)
require.Equal(t, "ban", responses[0].Action)
require.Equal(t, 403, responses[0].BouncerHTTPResponseCode)
require.Equal(t, 403, responses[0].UserHTTPResponseCode)
require.Equal(t, appsec.BanRemediation, responses[0].Action)
},
},
@ -84,17 +91,18 @@ func TestAppsecOnMatchHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, events, 2)
require.Equal(t, types.APPSEC, events[0].Type)
require.Equal(t, types.LOG, events[1].Type)
require.Len(t, responses, 1)
require.Equal(t, 413, responses[0].HTTPResponseCode)
require.Equal(t, "ban", responses[0].Action)
require.Equal(t, 403, responses[0].BouncerHTTPResponseCode)
require.Equal(t, 413, responses[0].UserHTTPResponseCode)
require.Equal(t, appsec.BanRemediation, responses[0].Action)
},
},
{
name: "on_match: change action to another standard one (log)",
name: "on_match: change action to a non standard one (log)",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
@ -114,7 +122,7 @@ func TestAppsecOnMatchHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, events, 2)
require.Equal(t, types.APPSEC, events[0].Type)
require.Equal(t, types.LOG, events[1].Type)
@ -143,16 +151,16 @@ func TestAppsecOnMatchHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, events, 2)
require.Equal(t, types.APPSEC, events[0].Type)
require.Equal(t, types.LOG, events[1].Type)
require.Len(t, responses, 1)
require.Equal(t, "allow", responses[0].Action)
require.Equal(t, appsec.AllowRemediation, responses[0].Action)
},
},
{
name: "on_match: change action to another standard one (deny/ban/block)",
name: "on_match: change action to another standard one (ban)",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
@ -164,7 +172,7 @@ func TestAppsecOnMatchHooks(t *testing.T) {
},
},
on_match: []appsec.Hook{
{Filter: "IsInBand == true", Apply: []string{"SetRemediation('deny')"}},
{Filter: "IsInBand == true", Apply: []string{"SetRemediation('ban')"}},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
@ -172,10 +180,10 @@ func TestAppsecOnMatchHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, responses, 1)
//note: SetAction normalizes deny, ban and block to ban
require.Equal(t, "ban", responses[0].Action)
require.Equal(t, appsec.BanRemediation, responses[0].Action)
},
},
{
@ -199,10 +207,10 @@ func TestAppsecOnMatchHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, responses, 1)
//note: SetAction normalizes deny, ban and block to ban
require.Equal(t, "captcha", responses[0].Action)
require.Equal(t, appsec.CaptchaRemediation, responses[0].Action)
},
},
{
@ -226,7 +234,7 @@ func TestAppsecOnMatchHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, events, 2)
require.Equal(t, types.APPSEC, events[0].Type)
require.Equal(t, types.LOG, events[1].Type)
@ -255,11 +263,11 @@ func TestAppsecOnMatchHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, events, 1)
require.Equal(t, types.LOG, events[0].Type)
require.Len(t, responses, 1)
require.Equal(t, "ban", responses[0].Action)
require.Equal(t, appsec.BanRemediation, responses[0].Action)
},
},
{
@ -283,11 +291,11 @@ func TestAppsecOnMatchHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, events, 1)
require.Equal(t, types.APPSEC, events[0].Type)
require.Len(t, responses, 1)
require.Equal(t, "ban", responses[0].Action)
require.Equal(t, appsec.BanRemediation, responses[0].Action)
},
},
}
@ -328,7 +336,7 @@ func TestAppsecPreEvalHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Empty(t, events)
require.Len(t, responses, 1)
require.False(t, responses[0].InBandInterrupt)
@ -356,7 +364,7 @@ func TestAppsecPreEvalHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, events, 2)
require.Equal(t, types.APPSEC, events[0].Type)
@ -391,7 +399,7 @@ func TestAppsecPreEvalHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Empty(t, events)
require.Len(t, responses, 1)
require.False(t, responses[0].InBandInterrupt)
@ -419,7 +427,7 @@ func TestAppsecPreEvalHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Empty(t, events)
require.Len(t, responses, 1)
require.False(t, responses[0].InBandInterrupt)
@ -447,7 +455,7 @@ func TestAppsecPreEvalHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Empty(t, events)
require.Len(t, responses, 1)
require.False(t, responses[0].InBandInterrupt)
@ -472,7 +480,7 @@ func TestAppsecPreEvalHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, events, 1)
require.Equal(t, types.LOG, events[0].Type)
require.True(t, events[0].Appsec.HasOutBandMatches)
@ -506,7 +514,7 @@ func TestAppsecPreEvalHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, events, 2)
require.Len(t, responses, 1)
require.Equal(t, "foobar", responses[0].Action)
@ -533,7 +541,7 @@ func TestAppsecPreEvalHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, events, 2)
require.Len(t, responses, 1)
require.Equal(t, "foobar", responses[0].Action)
@ -560,10 +568,12 @@ func TestAppsecPreEvalHooks(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, events, 2)
require.Len(t, responses, 1)
require.Equal(t, "foobar", responses[0].Action)
require.Equal(t, "foobar", appsecResponse.Action)
require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus)
},
},
}
@ -574,6 +584,473 @@ func TestAppsecPreEvalHooks(t *testing.T) {
})
}
}
func TestAppsecRemediationConfigHooks(t *testing.T) {
tests := []appsecRuleTest{
{
name: "Basic matching rule",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule1",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.BanRemediation, responses[0].Action)
require.Equal(t, http.StatusForbidden, statusCode)
require.Equal(t, appsec.BanRemediation, appsecResponse.Action)
require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus)
},
},
{
name: "SetRemediation",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule1",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
on_match: []appsec.Hook{{Apply: []string{"SetRemediation('captcha')"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (:
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.CaptchaRemediation, responses[0].Action)
require.Equal(t, http.StatusForbidden, statusCode)
require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action)
require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus)
},
},
{
name: "SetRemediation",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule1",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
on_match: []appsec.Hook{{Apply: []string{"SetReturnCode(418)"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (:
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.BanRemediation, responses[0].Action)
require.Equal(t, http.StatusForbidden, statusCode)
require.Equal(t, appsec.BanRemediation, appsecResponse.Action)
require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus)
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
loadAppSecEngine(test, t)
})
}
}
func TestOnMatchRemediationHooks(t *testing.T) {
tests := []appsecRuleTest{
{
name: "set remediation to allow with on_match hook",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule42",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
on_match: []appsec.Hook{
{Filter: "IsInBand == true", Apply: []string{"SetRemediation('allow')"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.AllowRemediation, appsecResponse.Action)
require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus)
},
},
{
name: "set remediation to captcha + custom user code with on_match hook",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule42",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
DefaultRemediation: appsec.AllowRemediation,
on_match: []appsec.Hook{
{Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')", "SetReturnCode(418)"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
spew.Dump(responses)
spew.Dump(appsecResponse)
log.Errorf("http status : %d", statusCode)
require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action)
require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus)
require.Equal(t, http.StatusForbidden, statusCode)
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
loadAppSecEngine(test, t)
})
}
}
func TestAppsecDefaultPassRemediation(t *testing.T) {
tests := []appsecRuleTest{
{
name: "Basic non-matching rule",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule1",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/",
Args: url.Values{"foo": []string{"tutu"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.AllowRemediation, responses[0].Action)
require.Equal(t, http.StatusOK, statusCode)
require.Equal(t, appsec.AllowRemediation, appsecResponse.Action)
require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus)
},
},
{
name: "DefaultPassAction: pass",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule1",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/",
Args: url.Values{"foo": []string{"tutu"}},
},
DefaultPassAction: "allow",
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.AllowRemediation, responses[0].Action)
require.Equal(t, http.StatusOK, statusCode)
require.Equal(t, appsec.AllowRemediation, appsecResponse.Action)
require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus)
},
},
{
name: "DefaultPassAction: captcha",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule1",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/",
Args: url.Values{"foo": []string{"tutu"}},
},
DefaultPassAction: "captcha",
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.CaptchaRemediation, responses[0].Action)
require.Equal(t, http.StatusOK, statusCode) //@tko: body is captcha, but as it's 200, captcha won't be showed to user
require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action)
require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus)
},
},
{
name: "DefaultPassHTTPCode: 200",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule1",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/",
Args: url.Values{"foo": []string{"tutu"}},
},
UserPassedHTTPCode: 200,
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.AllowRemediation, responses[0].Action)
require.Equal(t, http.StatusOK, statusCode)
require.Equal(t, appsec.AllowRemediation, appsecResponse.Action)
require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus)
},
},
{
name: "DefaultPassHTTPCode: 200",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule1",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/",
Args: url.Values{"foo": []string{"tutu"}},
},
UserPassedHTTPCode: 418,
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.AllowRemediation, responses[0].Action)
require.Equal(t, http.StatusOK, statusCode)
require.Equal(t, appsec.AllowRemediation, appsecResponse.Action)
require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus)
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
loadAppSecEngine(test, t)
})
}
}
func TestAppsecDefaultRemediation(t *testing.T) {
tests := []appsecRuleTest{
{
name: "Basic matching rule",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule1",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.BanRemediation, responses[0].Action)
require.Equal(t, http.StatusForbidden, statusCode)
require.Equal(t, appsec.BanRemediation, appsecResponse.Action)
require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus)
},
},
{
name: "default remediation to ban (default)",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule42",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
DefaultRemediation: "ban",
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.BanRemediation, responses[0].Action)
require.Equal(t, http.StatusForbidden, statusCode)
require.Equal(t, appsec.BanRemediation, appsecResponse.Action)
require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus)
},
},
{
name: "default remediation to allow",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule42",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
DefaultRemediation: "allow",
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.AllowRemediation, responses[0].Action)
require.Equal(t, http.StatusOK, statusCode)
require.Equal(t, appsec.AllowRemediation, appsecResponse.Action)
require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus)
},
},
{
name: "default remediation to captcha",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule42",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
DefaultRemediation: "captcha",
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.CaptchaRemediation, responses[0].Action)
require.Equal(t, http.StatusForbidden, statusCode)
require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action)
require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus)
},
},
{
name: "custom user HTTP code",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule42",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
UserBlockedHTTPCode: 418,
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.BanRemediation, responses[0].Action)
require.Equal(t, http.StatusForbidden, statusCode)
require.Equal(t, appsec.BanRemediation, appsecResponse.Action)
require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus)
},
},
{
name: "custom remediation + HTTP code",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule42",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
UserBlockedHTTPCode: 418,
DefaultRemediation: "foobar",
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, "foobar", responses[0].Action)
require.Equal(t, http.StatusForbidden, statusCode)
require.Equal(t, "foobar", appsecResponse.Action)
require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus)
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
loadAppSecEngine(test, t)
})
}
}
func TestAppsecRuleMatches(t *testing.T) {
/*
@ -601,7 +1078,7 @@ func TestAppsecRuleMatches(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Len(t, events, 2)
require.Equal(t, types.APPSEC, events[0].Type)
@ -632,13 +1109,172 @@ func TestAppsecRuleMatches(t *testing.T) {
URI: "/urllll",
Args: url.Values{"foo": []string{"tutu"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) {
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Empty(t, events)
require.Len(t, responses, 1)
require.False(t, responses[0].InBandInterrupt)
require.False(t, responses[0].OutOfBandInterrupt)
},
},
{
name: "default remediation to allow",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule42",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
DefaultRemediation: "allow",
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.AllowRemediation, responses[0].Action)
require.Equal(t, http.StatusOK, statusCode)
require.Equal(t, appsec.AllowRemediation, appsecResponse.Action)
require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus)
},
},
{
name: "default remediation to captcha",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule42",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
DefaultRemediation: "captcha",
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.CaptchaRemediation, responses[0].Action)
require.Equal(t, http.StatusForbidden, statusCode)
require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action)
require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus)
},
},
{
name: "no default remediation / custom user HTTP code",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule42",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"toto"}},
},
UserBlockedHTTPCode: 418,
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Equal(t, appsec.BanRemediation, responses[0].Action)
require.Equal(t, http.StatusForbidden, statusCode)
require.Equal(t, appsec.BanRemediation, appsecResponse.Action)
require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus)
},
},
{
name: "no match but try to set remediation to captcha with on_match hook",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule42",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
on_match: []appsec.Hook{
{Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"bla"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Empty(t, events)
require.Equal(t, http.StatusOK, statusCode)
require.Equal(t, appsec.AllowRemediation, appsecResponse.Action)
},
},
{
name: "no match but try to set user HTTP code with on_match hook",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule42",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
on_match: []appsec.Hook{
{Filter: "IsInBand == true", Apply: []string{"SetReturnCode(418)"}},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"bla"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Empty(t, events)
require.Equal(t, http.StatusOK, statusCode)
require.Equal(t, appsec.AllowRemediation, appsecResponse.Action)
},
},
{
name: "no match but try to set remediation with pre_eval hook",
expected_load_ok: true,
inband_rules: []appsec_rule.CustomRule{
{
Name: "rule42",
Zones: []string{"ARGS"},
Variables: []string{"foo"},
Match: appsec_rule.Match{Type: "regex", Value: "^toto"},
Transform: []string{"lowercase"},
},
},
pre_eval: []appsec.Hook{
{Filter: "IsInBand == true", Apply: []string{"SetRemediationByName('rule42', 'captcha')"}},
},
input_request: appsec.ParsedRequest{
RemoteAddr: "1.2.3.4",
Method: "GET",
URI: "/urllll",
Args: url.Values{"foo": []string{"bla"}},
},
output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) {
require.Empty(t, events)
require.Equal(t, http.StatusOK, statusCode)
require.Equal(t, appsec.AllowRemediation, appsecResponse.Action)
},
},
}
for _, test := range tests {
@ -678,7 +1314,16 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) {
outofbandRules = append(outofbandRules, strRule)
}
appsecCfg := appsec.AppsecConfig{Logger: logger, OnLoad: test.on_load, PreEval: test.pre_eval, PostEval: test.post_eval, OnMatch: test.on_match}
appsecCfg := appsec.AppsecConfig{Logger: logger,
OnLoad: test.on_load,
PreEval: test.pre_eval,
PostEval: test.post_eval,
OnMatch: test.on_match,
BouncerBlockedHTTPCode: test.BouncerBlockedHTTPCode,
UserBlockedHTTPCode: test.UserBlockedHTTPCode,
UserPassedHTTPCode: test.UserPassedHTTPCode,
DefaultRemediation: test.DefaultRemediation,
DefaultPassAction: test.DefaultPassAction}
AppsecRuntime, err := appsecCfg.Build()
if err != nil {
t.Fatalf("unable to build appsec runtime : %s", err)
@ -724,8 +1369,10 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) {
runner.handleRequest(&input)
time.Sleep(50 * time.Millisecond)
http_status, appsecResponse := AppsecRuntime.GenerateResponse(OutputResponses[0], logger)
log.Infof("events : %s", spew.Sdump(OutputEvents))
log.Infof("responses : %s", spew.Sdump(OutputResponses))
test.output_asserts(OutputEvents, OutputResponses)
test.output_asserts(OutputEvents, OutputResponses, appsecResponse, http_status)
}

View file

@ -179,11 +179,9 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR
req.Tx.Variables().All(func(v variables.RuleVariable, col collection.Collection) bool {
for _, variable := range col.FindAll() {
key := ""
if variable.Key() == "" {
key = variable.Variable().Name()
} else {
key = variable.Variable().Name() + "." + variable.Key()
key := variable.Variable().Name()
if variable.Key() != "" {
key += "." + variable.Key()
}
if variable.Value() == "" {
continue
@ -214,7 +212,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR
evt.Appsec.HasOutBandMatches = true
}
name := ""
var name string
version := ""
hash := ""
ruleNameProm := fmt.Sprintf("%d", rule.Rule().ID())

View file

@ -25,6 +25,7 @@ type LokiClient struct {
t *tomb.Tomb
fail_start time.Time
currentTickerInterval time.Duration
requestHeaders map[string]string
}
type Config struct {
@ -116,7 +117,7 @@ func (lc *LokiClient) queryRange(uri string, ctx context.Context, c chan *LokiQu
case <-lc.t.Dying():
return lc.t.Err()
case <-ticker.C:
resp, err := http.Get(uri)
resp, err := lc.Get(uri)
if err != nil {
if ok := lc.shouldRetry(); !ok {
return errors.Wrapf(err, "error querying range")
@ -127,6 +128,7 @@ func (lc *LokiClient) queryRange(uri string, ctx context.Context, c chan *LokiQu
}
if resp.StatusCode != http.StatusOK {
lc.Logger.Warnf("bad HTTP response code for query range: %d", resp.StatusCode)
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
if ok := lc.shouldRetry(); !ok {
@ -215,7 +217,7 @@ func (lc *LokiClient) Ready(ctx context.Context) error {
return lc.t.Err()
case <-tick.C:
lc.Logger.Debug("Checking if Loki is ready")
resp, err := http.Get(url)
resp, err := lc.Get(url)
if err != nil {
lc.Logger.Warnf("Error checking if Loki is ready: %s", err)
continue
@ -251,10 +253,9 @@ func (lc *LokiClient) Tail(ctx context.Context) (chan *LokiResponse, error) {
}
requestHeader := http.Header{}
for k, v := range lc.config.Headers {
for k, v := range lc.requestHeaders {
requestHeader.Add(k, v)
}
requestHeader.Set("User-Agent", "Crowdsec "+cwversion.VersionStr())
lc.Logger.Infof("Connecting to %s", u)
conn, _, err := dialer.Dial(u, requestHeader)
@ -293,16 +294,6 @@ func (lc *LokiClient) QueryRange(ctx context.Context, infinite bool) chan *LokiQ
lc.Logger.Debugf("Since: %s (%s)", lc.config.Since, time.Now().Add(-lc.config.Since))
requestHeader := http.Header{}
for k, v := range lc.config.Headers {
requestHeader.Add(k, v)
}
if lc.config.Username != "" || lc.config.Password != "" {
requestHeader.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(lc.config.Username+":"+lc.config.Password)))
}
requestHeader.Set("User-Agent", "Crowdsec "+cwversion.VersionStr())
lc.Logger.Infof("Connecting to %s", url)
lc.t.Go(func() error {
return lc.queryRange(url, ctx, c, infinite)
@ -310,6 +301,26 @@ func (lc *LokiClient) QueryRange(ctx context.Context, infinite bool) chan *LokiQ
return c
}
func NewLokiClient(config Config) *LokiClient {
return &LokiClient{Logger: log.WithField("component", "lokiclient"), config: config}
// Create a wrapper for http.Get to be able to set headers and auth
func (lc *LokiClient) Get(url string) (*http.Response, error) {
request, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
for k, v := range lc.requestHeaders {
request.Header.Add(k, v)
}
return http.DefaultClient.Do(request)
}
func NewLokiClient(config Config) *LokiClient {
headers := make(map[string]string)
for k, v := range config.Headers {
headers[k] = v
}
if config.Username != "" || config.Password != "" {
headers["Authorization"] = "Basic " + base64.StdEncoding.EncodeToString([]byte(config.Username+":"+config.Password))
}
headers["User-Agent"] = "Crowdsec " + cwversion.VersionStr()
return &LokiClient{Logger: log.WithField("component", "lokiclient"), config: config, requestHeaders: headers}
}

View file

@ -276,10 +276,17 @@ func feedLoki(logger *log.Entry, n int, title string) error {
if err != nil {
return err
}
resp, err := http.Post("http://127.0.0.1:3100/loki/api/v1/push", "application/json", bytes.NewBuffer(buff))
req, err := http.NewRequest(http.MethodPost, "http://127.0.0.1:3100/loki/api/v1/push", bytes.NewBuffer(buff))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-Scope-OrgID", "1234")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
b, _ := io.ReadAll(resp.Body)
logger.Error(string(b))
@ -306,6 +313,8 @@ mode: cat
source: loki
url: http://127.0.0.1:3100
query: '{server="demo",key="%s"}'
headers:
x-scope-orgid: "1234"
since: 1h
`, title),
},
@ -362,26 +371,26 @@ func TestStreamingAcquisition(t *testing.T) {
}{
{
name: "Bad port",
config: `
mode: tail
config: `mode: tail
source: loki
url: http://127.0.0.1:3101
url: "http://127.0.0.1:3101"
headers:
x-scope-orgid: "1234"
query: >
{server="demo"}
`, // No Loki server here
{server="demo"}`, // No Loki server here
expectedErr: "",
streamErr: `loki is not ready: context deadline exceeded`,
expectedLines: 0,
},
{
name: "ok",
config: `
mode: tail
config: `mode: tail
source: loki
url: http://127.0.0.1:3100
url: "http://127.0.0.1:3100"
headers:
x-scope-orgid: "1234"
query: >
{server="demo"}
`,
{server="demo"}`,
expectedErr: "",
streamErr: "",
expectedLines: 20,
@ -456,6 +465,8 @@ func TestStopStreaming(t *testing.T) {
mode: tail
source: loki
url: http://127.0.0.1:3100
headers:
x-scope-orgid: "1234"
query: >
{server="demo"}
`

View file

@ -633,6 +633,13 @@ func (a *apic) PullTop(forcePull bool) error {
}
}
log.Debug("Acquiring lock for pullCAPI")
err = a.dbClient.AcquirePullCAPILock()
if a.dbClient.IsLocked(err) {
log.Info("PullCAPI is already running, skipping")
return nil
}
log.Infof("Starting community-blocklist update")
data, _, err := a.apiClient.Decisions.GetStreamV3(context.Background(), apiclient.DecisionsStreamOpts{Startup: a.startup})
@ -684,6 +691,11 @@ func (a *apic) PullTop(forcePull bool) error {
return fmt.Errorf("while updating blocklists: %w", err)
}
log.Debug("Releasing lock for pullCAPI")
if err := a.dbClient.ReleasePullCAPILock(); err != nil {
return fmt.Errorf("while releasing lock: %w", err)
}
return nil
}

View file

@ -26,15 +26,15 @@ func TestAPICSendMetrics(t *testing.T) {
}{
{
name: "basic",
duration: time.Millisecond * 60,
metricsInterval: time.Millisecond * 10,
duration: time.Millisecond * 120,
metricsInterval: time.Millisecond * 20,
expectedCalls: 5,
setUp: func(api *apic) {},
},
{
name: "with some metrics",
duration: time.Millisecond * 60,
metricsInterval: time.Millisecond * 10,
duration: time.Millisecond * 120,
metricsInterval: time.Millisecond * 20,
expectedCalls: 5,
setUp: func(api *apic) {
api.dbClient.Ent.Machine.Delete().ExecX(context.Background())

View file

@ -243,9 +243,9 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) {
controller.AlertsAddChan = apiClient.AlertsAddChan
if apiClient.apiClient.IsEnrolled() {
log.Infof("Machine is enrolled in the console, Loading PAPI Client")
if config.ConsoleConfig.IsPAPIEnabled() {
log.Info("Machine is enrolled in the console, Loading PAPI Client")
papiClient, err = NewPAPI(apiClient, dbClient, config.ConsoleConfig, *config.PapiLogLevel)
if err != nil {
return nil, err

View file

@ -66,7 +66,7 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer {
validCert, extractedCN, err := a.TlsAuth.ValidateCert(c)
if !validCert {
logger.Errorf("invalid client certificate: %s", err)
logger.Error(err)
return nil
}

View file

@ -4,6 +4,7 @@ import (
"bytes"
"crypto"
"crypto/x509"
"encoding/pem"
"fmt"
"io"
"net/http"
@ -19,14 +20,13 @@ import (
type TLSAuth struct {
AllowedOUs []string
CrlPath string
revokationCache map[string]cacheEntry
revocationCache map[string]cacheEntry
cacheExpiration time.Duration
logger *log.Entry
}
type cacheEntry struct {
revoked bool
err error
timestamp time.Time
}
@ -89,10 +89,12 @@ func (ta *TLSAuth) isExpired(cert *x509.Certificate) bool {
return false
}
func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) {
if cert.OCSPServer == nil || (cert.OCSPServer != nil && len(cert.OCSPServer) == 0) {
// isOCSPRevoked checks if the client certificate is revoked by any of the OCSP servers present in the certificate.
// It returns a boolean indicating if the certificate is revoked and a boolean indicating if the OCSP check was successful and could be cached.
func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) {
if cert.OCSPServer == nil || len(cert.OCSPServer) == 0 {
ta.logger.Infof("TLSAuth: no OCSP Server present in client certificate, skipping OCSP verification")
return false, nil
return false, true
}
for _, server := range cert.OCSPServer {
@ -104,9 +106,10 @@ func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificat
switch ocspResponse.Status {
case ocsp.Good:
return false, nil
return false, true
case ocsp.Revoked:
return true, fmt.Errorf("client certificate is revoked by server %s", server)
ta.logger.Errorf("TLSAuth: client certificate is revoked by server %s", server)
return true, true
case ocsp.Unknown:
log.Debugf("unknow OCSP status for server %s", server)
continue
@ -115,83 +118,82 @@ func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificat
log.Infof("Could not get any valid OCSP response, assuming the cert is revoked")
return true, nil
return true, false
}
func (ta *TLSAuth) isCRLRevoked(cert *x509.Certificate) (bool, error) {
// isCRLRevoked checks if the client certificate is revoked by the CRL present in the CrlPath.
// It returns a boolean indicating if the certificate is revoked and a boolean indicating if the CRL check was successful and could be cached.
func (ta *TLSAuth) isCRLRevoked(cert *x509.Certificate) (bool, bool) {
if ta.CrlPath == "" {
ta.logger.Warn("no crl_path, skipping CRL check")
return false, nil
ta.logger.Info("no crl_path, skipping CRL check")
return false, true
}
crlContent, err := os.ReadFile(ta.CrlPath)
if err != nil {
ta.logger.Warnf("could not read CRL file, skipping check: %s", err)
return false, nil
ta.logger.Errorf("could not read CRL file, skipping check: %s", err)
return false, false
}
crl, err := x509.ParseCRL(crlContent)
crlBinary, rest := pem.Decode(crlContent)
if len(rest) > 0 {
ta.logger.Warn("CRL file contains more than one PEM block, ignoring the rest")
}
crl, err := x509.ParseRevocationList(crlBinary.Bytes)
if err != nil {
ta.logger.Warnf("could not parse CRL file, skipping check: %s", err)
return false, nil
ta.logger.Errorf("could not parse CRL file, skipping check: %s", err)
return false, false
}
if crl.HasExpired(time.Now().UTC()) {
now := time.Now().UTC()
if now.After(crl.NextUpdate) {
ta.logger.Warn("CRL has expired, will still validate the cert against it.")
}
for _, revoked := range crl.TBSCertList.RevokedCertificates {
if now.Before(crl.ThisUpdate) {
ta.logger.Warn("CRL is not yet valid, will still validate the cert against it.")
}
for _, revoked := range crl.RevokedCertificateEntries {
if revoked.SerialNumber.Cmp(cert.SerialNumber) == 0 {
return true, fmt.Errorf("client certificate is revoked by CRL")
ta.logger.Warn("client certificate is revoked by CRL")
return true, true
}
}
return false, nil
return false, true
}
func (ta *TLSAuth) isRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) {
sn := cert.SerialNumber.String()
if cacheValue, ok := ta.revokationCache[sn]; ok {
if cacheValue, ok := ta.revocationCache[sn]; ok {
if time.Now().UTC().Sub(cacheValue.timestamp) < ta.cacheExpiration {
ta.logger.Debugf("TLSAuth: using cached value for cert %s: %t | %s", sn, cacheValue.revoked, cacheValue.err)
return cacheValue.revoked, cacheValue.err
} else {
ta.logger.Debugf("TLSAuth: cached value expired, removing from cache")
delete(ta.revokationCache, sn)
ta.logger.Debugf("TLSAuth: using cached value for cert %s: %t", sn, cacheValue.revoked)
return cacheValue.revoked, nil
}
ta.logger.Debugf("TLSAuth: cached value expired, removing from cache")
delete(ta.revocationCache, sn)
} else {
ta.logger.Tracef("TLSAuth: no cached value for cert %s", sn)
}
revoked, err := ta.isOCSPRevoked(cert, issuer)
if err != nil {
ta.revokationCache[sn] = cacheEntry{
revokedByOCSP, cacheOCSP := ta.isOCSPRevoked(cert, issuer)
revokedByCRL, cacheCRL := ta.isCRLRevoked(cert)
revoked := revokedByOCSP || revokedByCRL
if cacheOCSP && cacheCRL {
ta.revocationCache[sn] = cacheEntry{
revoked: revoked,
err: err,
timestamp: time.Now().UTC(),
}
return true, err
}
if revoked {
ta.revokationCache[sn] = cacheEntry{
revoked: revoked,
err: err,
timestamp: time.Now().UTC(),
}
return true, nil
}
revoked, err = ta.isCRLRevoked(cert)
ta.revokationCache[sn] = cacheEntry{
revoked: revoked,
err: err,
timestamp: time.Now().UTC(),
}
return revoked, err
return revoked, nil
}
func (ta *TLSAuth) isInvalid(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) {
@ -265,11 +267,11 @@ func (ta *TLSAuth) ValidateCert(c *gin.Context) (bool, string, error) {
revoked, err := ta.isInvalid(clientCert, c.Request.TLS.VerifiedChains[0][1])
if err != nil {
ta.logger.Errorf("TLSAuth: error checking if client certificate is revoked: %s", err)
return false, "", fmt.Errorf("could not check for client certification revokation status: %w", err)
return false, "", fmt.Errorf("could not check for client certification revocation status: %w", err)
}
if revoked {
return false, "", fmt.Errorf("client certificate is revoked")
return false, "", fmt.Errorf("client certificate for CN=%s OU=%s is revoked", clientCert.Subject.CommonName, clientCert.Subject.OrganizationalUnit)
}
ta.logger.Debugf("client OU %v is allowed vs required OU %v", clientCert.Subject.OrganizationalUnit, ta.AllowedOUs)
@ -282,7 +284,7 @@ func (ta *TLSAuth) ValidateCert(c *gin.Context) (bool, string, error) {
func NewTLSAuth(allowedOus []string, crlPath string, cacheExpiration time.Duration, logger *log.Entry) (*TLSAuth, error) {
ta := &TLSAuth{
revokationCache: map[string]cacheEntry{},
revocationCache: map[string]cacheEntry{},
cacheExpiration: cacheExpiration,
CrlPath: crlPath,
logger: logger,

View file

@ -2,6 +2,7 @@ package appsec
import (
"fmt"
"net/http"
"os"
"regexp"
@ -30,6 +31,12 @@ const (
hookOnMatch
)
const (
BanRemediation = "ban"
CaptchaRemediation = "captcha"
AllowRemediation = "allow"
)
func (h *Hook) Build(hookStage int) error {
ctx := map[string]interface{}{}
@ -62,12 +69,13 @@ func (h *Hook) Build(hookStage int) error {
}
type AppsecTempResponse struct {
InBandInterrupt bool
OutOfBandInterrupt bool
Action string //allow, deny, captcha, log
HTTPResponseCode int
SendEvent bool //do we send an internal event on rule match
SendAlert bool //do we send an alert on rule match
InBandInterrupt bool
OutOfBandInterrupt bool
Action string //allow, deny, captcha, log
UserHTTPResponseCode int //The response code to send to the user
BouncerHTTPResponseCode int //The response code to send to the remediation component
SendEvent bool //do we send an internal event on rule match
SendAlert bool //do we send an alert on rule match
}
type AppsecSubEngineOpts struct {
@ -110,31 +118,33 @@ type AppsecRuntimeConfig struct {
}
type AppsecConfig struct {
Name string `yaml:"name"`
OutOfBandRules []string `yaml:"outofband_rules"`
InBandRules []string `yaml:"inband_rules"`
DefaultRemediation string `yaml:"default_remediation"`
DefaultPassAction string `yaml:"default_pass_action"`
BlockedHTTPCode int `yaml:"blocked_http_code"`
PassedHTTPCode int `yaml:"passed_http_code"`
OnLoad []Hook `yaml:"on_load"`
PreEval []Hook `yaml:"pre_eval"`
PostEval []Hook `yaml:"post_eval"`
OnMatch []Hook `yaml:"on_match"`
VariablesTracking []string `yaml:"variables_tracking"`
InbandOptions AppsecSubEngineOpts `yaml:"inband_options"`
OutOfBandOptions AppsecSubEngineOpts `yaml:"outofband_options"`
Name string `yaml:"name"`
OutOfBandRules []string `yaml:"outofband_rules"`
InBandRules []string `yaml:"inband_rules"`
DefaultRemediation string `yaml:"default_remediation"`
DefaultPassAction string `yaml:"default_pass_action"`
BouncerBlockedHTTPCode int `yaml:"blocked_http_code"` //returned to the bouncer
BouncerPassedHTTPCode int `yaml:"passed_http_code"` //returned to the bouncer
UserBlockedHTTPCode int `yaml:"user_blocked_http_code"` //returned to the user
UserPassedHTTPCode int `yaml:"user_passed_http_code"` //returned to the user
OnLoad []Hook `yaml:"on_load"`
PreEval []Hook `yaml:"pre_eval"`
PostEval []Hook `yaml:"post_eval"`
OnMatch []Hook `yaml:"on_match"`
VariablesTracking []string `yaml:"variables_tracking"`
InbandOptions AppsecSubEngineOpts `yaml:"inband_options"`
OutOfBandOptions AppsecSubEngineOpts `yaml:"outofband_options"`
LogLevel *log.Level `yaml:"log_level"`
Logger *log.Entry `yaml:"-"`
}
func (w *AppsecRuntimeConfig) ClearResponse() {
log.Debugf("#-> %p", w)
w.Response = AppsecTempResponse{}
log.Debugf("-> %p", w.Config)
w.Response.Action = w.Config.DefaultPassAction
w.Response.HTTPResponseCode = w.Config.PassedHTTPCode
w.Response.BouncerHTTPResponseCode = w.Config.BouncerPassedHTTPCode
w.Response.UserHTTPResponseCode = w.Config.UserPassedHTTPCode
w.Response.SendEvent = true
w.Response.SendAlert = true
}
@ -191,24 +201,35 @@ func (wc *AppsecConfig) GetDataDir() string {
func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) {
ret := &AppsecRuntimeConfig{Logger: wc.Logger.WithField("component", "appsec_runtime_config")}
//set the defaults
switch wc.DefaultRemediation {
case "":
wc.DefaultRemediation = "ban"
case "ban", "captcha", "log":
//those are the officially supported remediation(s)
default:
wc.Logger.Warningf("default '%s' remediation of %s is none of [ban,captcha,log] ensure bouncer compatbility!", wc.DefaultRemediation, wc.Name)
if wc.BouncerBlockedHTTPCode == 0 {
wc.BouncerBlockedHTTPCode = http.StatusForbidden
}
if wc.BlockedHTTPCode == 0 {
wc.BlockedHTTPCode = 403
if wc.BouncerPassedHTTPCode == 0 {
wc.BouncerPassedHTTPCode = http.StatusOK
}
if wc.PassedHTTPCode == 0 {
wc.PassedHTTPCode = 200
if wc.UserBlockedHTTPCode == 0 {
wc.UserBlockedHTTPCode = http.StatusForbidden
}
if wc.UserPassedHTTPCode == 0 {
wc.UserPassedHTTPCode = http.StatusOK
}
if wc.DefaultPassAction == "" {
wc.DefaultPassAction = "allow"
wc.DefaultPassAction = AllowRemediation
}
if wc.DefaultRemediation == "" {
wc.DefaultRemediation = BanRemediation
}
//set the defaults
switch wc.DefaultRemediation {
case BanRemediation, CaptchaRemediation, AllowRemediation:
//those are the officially supported remediation(s)
default:
wc.Logger.Warningf("default '%s' remediation of %s is none of [%s,%s,%s] ensure bouncer compatbility!", wc.DefaultRemediation, wc.Name, BanRemediation, CaptchaRemediation, AllowRemediation)
}
ret.Name = wc.Name
ret.Config = wc
ret.DefaultRemediation = wc.DefaultRemediation
@ -290,20 +311,26 @@ func (w *AppsecRuntimeConfig) ProcessOnLoadRules() error {
switch t := output.(type) {
case bool:
if !t {
log.Debugf("filter didnt match")
w.Logger.Debugf("filter didnt match")
continue
}
default:
log.Errorf("Filter must return a boolean, can't filter")
w.Logger.Errorf("Filter must return a boolean, can't filter")
continue
}
}
for _, applyExpr := range rule.ApplyExpr {
_, err := exprhelpers.Run(applyExpr, GetOnLoadEnv(w), w.Logger, w.Logger.Level >= log.DebugLevel)
o, err := exprhelpers.Run(applyExpr, GetOnLoadEnv(w), w.Logger, w.Logger.Level >= log.DebugLevel)
if err != nil {
log.Errorf("unable to apply appsec on_load expr: %s", err)
w.Logger.Errorf("unable to apply appsec on_load expr: %s", err)
continue
}
switch t := o.(type) {
case error:
w.Logger.Errorf("unable to apply appsec on_load expr: %s", t)
continue
default:
}
}
}
return nil
@ -320,27 +347,33 @@ func (w *AppsecRuntimeConfig) ProcessOnMatchRules(request *ParsedRequest, evt ty
switch t := output.(type) {
case bool:
if !t {
log.Debugf("filter didnt match")
w.Logger.Debugf("filter didnt match")
continue
}
default:
log.Errorf("Filter must return a boolean, can't filter")
w.Logger.Errorf("Filter must return a boolean, can't filter")
continue
}
}
for _, applyExpr := range rule.ApplyExpr {
_, err := exprhelpers.Run(applyExpr, GetOnMatchEnv(w, request, evt), w.Logger, w.Logger.Level >= log.DebugLevel)
o, err := exprhelpers.Run(applyExpr, GetOnMatchEnv(w, request, evt), w.Logger, w.Logger.Level >= log.DebugLevel)
if err != nil {
log.Errorf("unable to apply appsec on_match expr: %s", err)
w.Logger.Errorf("unable to apply appsec on_match expr: %s", err)
continue
}
switch t := o.(type) {
case error:
w.Logger.Errorf("unable to apply appsec on_match expr: %s", t)
continue
default:
}
}
}
return nil
}
func (w *AppsecRuntimeConfig) ProcessPreEvalRules(request *ParsedRequest) error {
log.Debugf("processing %d pre_eval rules", len(w.CompiledPreEval))
w.Logger.Debugf("processing %d pre_eval rules", len(w.CompiledPreEval))
for _, rule := range w.CompiledPreEval {
if rule.FilterExpr != nil {
output, err := exprhelpers.Run(rule.FilterExpr, GetPreEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel)
@ -350,21 +383,27 @@ func (w *AppsecRuntimeConfig) ProcessPreEvalRules(request *ParsedRequest) error
switch t := output.(type) {
case bool:
if !t {
log.Debugf("filter didnt match")
w.Logger.Debugf("filter didnt match")
continue
}
default:
log.Errorf("Filter must return a boolean, can't filter")
w.Logger.Errorf("Filter must return a boolean, can't filter")
continue
}
}
// here means there is no filter or the filter matched
for _, applyExpr := range rule.ApplyExpr {
_, err := exprhelpers.Run(applyExpr, GetPreEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel)
o, err := exprhelpers.Run(applyExpr, GetPreEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel)
if err != nil {
log.Errorf("unable to apply appsec pre_eval expr: %s", err)
w.Logger.Errorf("unable to apply appsec pre_eval expr: %s", err)
continue
}
switch t := o.(type) {
case error:
w.Logger.Errorf("unable to apply appsec pre_eval expr: %s", t)
continue
default:
}
}
}
@ -381,21 +420,29 @@ func (w *AppsecRuntimeConfig) ProcessPostEvalRules(request *ParsedRequest) error
switch t := output.(type) {
case bool:
if !t {
log.Debugf("filter didnt match")
w.Logger.Debugf("filter didnt match")
continue
}
default:
log.Errorf("Filter must return a boolean, can't filter")
w.Logger.Errorf("Filter must return a boolean, can't filter")
continue
}
}
// here means there is no filter or the filter matched
for _, applyExpr := range rule.ApplyExpr {
_, err := exprhelpers.Run(applyExpr, GetPostEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel)
o, err := exprhelpers.Run(applyExpr, GetPostEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel)
if err != nil {
log.Errorf("unable to apply appsec post_eval expr: %s", err)
w.Logger.Errorf("unable to apply appsec post_eval expr: %s", err)
continue
}
switch t := o.(type) {
case error:
w.Logger.Errorf("unable to apply appsec post_eval expr: %s", t)
continue
default:
}
}
}
@ -527,27 +574,13 @@ func (w *AppsecRuntimeConfig) SetActionByName(name string, action string) error
func (w *AppsecRuntimeConfig) SetAction(action string) error {
//log.Infof("setting to %s", action)
w.Logger.Debugf("setting action to %s", action)
switch action {
case "allow":
w.Response.Action = action
w.Response.HTTPResponseCode = w.Config.PassedHTTPCode
//@tko how should we handle this ? it seems bouncer only understand bans, but it might be misleading ?
case "deny", "ban", "block":
w.Response.Action = "ban"
case "log":
w.Response.Action = action
w.Response.HTTPResponseCode = w.Config.PassedHTTPCode
case "captcha":
w.Response.Action = action
default:
w.Response.Action = action
}
w.Response.Action = action
return nil
}
func (w *AppsecRuntimeConfig) SetHTTPCode(code int) error {
w.Logger.Debugf("setting http code to %d", code)
w.Response.HTTPResponseCode = code
w.Response.UserHTTPResponseCode = code
return nil
}
@ -556,24 +589,23 @@ type BodyResponse struct {
HTTPStatus int `json:"http_status"`
}
func (w *AppsecRuntimeConfig) GenerateResponse(response AppsecTempResponse, logger *log.Entry) BodyResponse {
resp := BodyResponse{}
//if there is no interrupt, we should allow with default code
if !response.InBandInterrupt {
resp.Action = w.Config.DefaultPassAction
resp.HTTPStatus = w.Config.PassedHTTPCode
return resp
}
resp.Action = response.Action
if resp.Action == "" {
resp.Action = w.Config.DefaultRemediation
}
logger.Debugf("action is %s", resp.Action)
func (w *AppsecRuntimeConfig) GenerateResponse(response AppsecTempResponse, logger *log.Entry) (int, BodyResponse) {
var bouncerStatusCode int
resp.HTTPStatus = response.HTTPResponseCode
if resp.HTTPStatus == 0 {
resp.HTTPStatus = w.Config.BlockedHTTPCode
resp := BodyResponse{Action: response.Action}
if response.Action == AllowRemediation {
resp.HTTPStatus = w.Config.UserPassedHTTPCode
bouncerStatusCode = w.Config.BouncerPassedHTTPCode
} else { //ban, captcha and anything else
resp.HTTPStatus = response.UserHTTPResponseCode
if resp.HTTPStatus == 0 {
resp.HTTPStatus = w.Config.UserBlockedHTTPCode
}
bouncerStatusCode = response.BouncerHTTPResponseCode
if bouncerStatusCode == 0 {
bouncerStatusCode = w.Config.BouncerBlockedHTTPCode
}
}
logger.Debugf("http status is %d", resp.HTTPStatus)
return resp
return bouncerStatusCode, resp
}

View file

@ -38,7 +38,7 @@ type ParsedRequest struct {
Body []byte `json:"body,omitempty"`
TransferEncoding []string `json:"transfer_encoding,omitempty"`
UUID string `json:"uuid,omitempty"`
Tx ExtendedTransaction `json:"transaction,omitempty"`
Tx ExtendedTransaction `json:"-"`
ResponseChannel chan AppsecTempResponse `json:"-"`
IsInBand bool `json:"-"`
IsOutBand bool `json:"-"`
@ -260,12 +260,17 @@ func (r *ReqDumpFilter) ToJSON() error {
req := r.GetFilteredRequest()
log.Warningf("dumping : %+v", req)
log.Tracef("dumping : %+v", req)
if err := enc.Encode(req); err != nil {
//Don't clobber the temp directory with empty files
err2 := os.Remove(fd.Name())
if err2 != nil {
log.Errorf("while removing temp file %s: %s", fd.Name(), err)
}
return fmt.Errorf("while encoding request: %w", err)
}
log.Warningf("request dumped to %s", fd.Name())
log.Infof("request dumped to %s", fd.Name())
return nil
}
@ -324,7 +329,7 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR
return ParsedRequest{}, fmt.Errorf("unable to parse url '%s': %s", clientURI, err)
}
remoteAddrNormalized := ""
var remoteAddrNormalized string
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Errorf("Invalid appsec remote IP source %v: %s", r.RemoteAddr, err.Error())
@ -332,7 +337,7 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR
} else {
ip := net.ParseIP(host)
if ip == nil {
log.Errorf("Invalid appsec remote IP address source %v: %s", r.RemoteAddr, err.Error())
log.Errorf("Invalid appsec remote IP address source %v", r.RemoteAddr)
remoteAddrNormalized = r.RemoteAddr
} else {
remoteAddrNormalized = ip.String()

View file

@ -178,6 +178,7 @@ func (l *LocalApiClientCfg) Load() error {
func (lapiCfg *LocalApiServerCfg) GetTrustedIPs() ([]net.IPNet, error) {
trustedIPs := make([]net.IPNet, 0)
for _, ip := range lapiCfg.TrustedIPs {
cidr := toValidCIDR(ip)
@ -236,7 +237,7 @@ type LocalApiServerCfg struct {
CapiWhitelists *CapiWhitelist `yaml:"-"`
}
func (c *Config) LoadAPIServer() error {
func (c *Config) LoadAPIServer(inCli bool) error {
if c.DisableAPI {
log.Warning("crowdsec local API is disabled from flag")
}
@ -265,7 +266,7 @@ func (c *Config) LoadAPIServer() error {
return fmt.Errorf("no listen_uri specified")
}
//inherit log level from common, then api->server
// inherit log level from common, then api->server
var logLevel log.Level
if c.API.Server.LogLevel != nil {
logLevel = *c.API.Server.LogLevel
@ -285,11 +286,11 @@ func (c *Config) LoadAPIServer() error {
}
}
if c.API.Server.OnlineClient == nil || c.API.Server.OnlineClient.Credentials == nil {
if (c.API.Server.OnlineClient == nil || c.API.Server.OnlineClient.Credentials == nil) && !inCli {
log.Printf("push and pull to Central API disabled")
}
if err := c.LoadDBConfig(); err != nil {
if err := c.LoadDBConfig(inCli); err != nil {
return err
}
@ -297,7 +298,7 @@ func (c *Config) LoadAPIServer() error {
return err
}
if c.API.Server.CapiWhitelistsPath != "" {
if c.API.Server.CapiWhitelistsPath != "" && !inCli {
log.Infof("loaded capi whitelist from %s: %d IPs, %d CIDRs", c.API.Server.CapiWhitelistsPath, len(c.API.Server.CapiWhitelists.Ips), len(c.API.Server.CapiWhitelists.Cidrs))
}

View file

@ -240,7 +240,7 @@ func TestLoadAPIServer(t *testing.T) {
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
err := tc.input.LoadAPIServer()
err := tc.input.LoadAPIServer(false)
cstest.RequireErrorContains(t, err, tc.expectedErr)
if tc.expectedErr != "" {
return

View file

@ -25,7 +25,7 @@ var globalConfig = Config{}
// Config contains top-level defaults -> overridden by configuration file -> overridden by CLI flags
type Config struct {
//just a path to ourselves :p
// just a path to ourselves :p
FilePath *string `yaml:"-"`
Self []byte `yaml:"-"`
Common *CommonCfg `yaml:"common,omitempty"`
@ -41,13 +41,15 @@ type Config struct {
Hub *LocalHubCfg `yaml:"-"`
}
func NewConfig(configFile string, disableAgent bool, disableAPI bool, quiet bool) (*Config, string, error) {
func NewConfig(configFile string, disableAgent bool, disableAPI bool, inCli bool) (*Config, string, error) {
patcher := yamlpatch.NewPatcher(configFile, ".local")
patcher.SetQuiet(quiet)
patcher.SetQuiet(inCli)
fcontent, err := patcher.MergedPatchContent()
if err != nil {
return nil, "", err
}
configData := csstring.StrictExpand(string(fcontent), os.LookupEnv)
cfg := Config{
FilePath: &configFile,

View file

@ -9,6 +9,7 @@ type CscliCfg struct {
Output string `yaml:"output,omitempty"`
Color string `yaml:"color,omitempty"`
HubBranch string `yaml:"hub_branch"`
HubURLTemplate string `yaml:"__hub_url_template__,omitempty"`
SimulationConfig *SimulationConfig `yaml:"-"`
DbConfig *DatabaseCfg `yaml:"-"`
@ -16,6 +17,8 @@ type CscliCfg struct {
PrometheusUrl string `yaml:"prometheus_uri"`
}
const defaultHubURLTemplate = "https://hub-cdn.crowdsec.net/%s/%s"
func (c *Config) loadCSCLI() error {
if c.Cscli == nil {
c.Cscli = &CscliCfg{}
@ -25,5 +28,9 @@ func (c *Config) loadCSCLI() error {
c.Cscli.PrometheusUrl = fmt.Sprintf("http://%s:%d/metrics", c.Prometheus.ListenAddr, c.Prometheus.ListenPort)
}
if c.Cscli.HubURLTemplate == "" {
c.Cscli.HubURLTemplate = defaultHubURLTemplate
}
return nil
}

View file

@ -32,7 +32,8 @@ func TestLoadCSCLI(t *testing.T) {
},
},
expected: &CscliCfg{
PrometheusUrl: "http://127.0.0.1:6060/metrics",
PrometheusUrl: "http://127.0.0.1:6060/metrics",
HubURLTemplate: defaultHubURLTemplate,
},
},
}

View file

@ -50,7 +50,7 @@ type FlushDBCfg struct {
AgentsGC *AuthGCCfg `yaml:"agents_autodelete,omitempty"`
}
func (c *Config) LoadDBConfig() error {
func (c *Config) LoadDBConfig(inCli bool) error {
if c.DbConfig == nil {
return fmt.Errorf("no database configuration provided")
}
@ -77,10 +77,8 @@ func (c *Config) LoadDBConfig() error {
c.DbConfig.DecisionBulkSize = maxDecisionBulkSize
}
if c.DbConfig.Type == "sqlite" {
if c.DbConfig.UseWal == nil {
log.Warning("You are using sqlite without WAL, this can have a performance impact. If you do not store the database in a network share, set db_config.use_wal to true. Set explicitly to false to disable this warning.")
}
if !inCli && c.DbConfig.Type == "sqlite" && c.DbConfig.UseWal == nil {
log.Warning("You are using sqlite without WAL, this can have a performance impact. If you do not store the database in a network share, set db_config.use_wal to true. Set explicitly to false to disable this warning.")
}
return nil

View file

@ -47,7 +47,7 @@ func TestLoadDBConfig(t *testing.T) {
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
err := tc.input.LoadDBConfig()
err := tc.input.LoadDBConfig(false)
cstest.RequireErrorContains(t, err, tc.expectedErr)
if tc.expectedErr != "" {
return

View file

@ -77,14 +77,11 @@ func formatAlertSource(alert *models.Alert) string {
func formatAlertAsString(machineID string, alert *models.Alert) []string {
src := formatAlertSource(alert)
/**/
msg := ""
msg := "empty scenario"
if alert.Scenario != nil && *alert.Scenario != "" {
msg = *alert.Scenario
} else if alert.Message != nil && *alert.Message != "" {
msg = *alert.Message
} else {
msg = "empty scenario"
}
reason := fmt.Sprintf("%s by %s", msg, src)
@ -116,7 +113,7 @@ func formatAlertAsString(machineID string, alert *models.Alert) []string {
reason = fmt.Sprintf("%s for %d/%d decisions", msg, i+1, len(alert.Decisions))
}
machineIDOrigin := ""
var machineIDOrigin string
if machineID == "" {
machineIDOrigin = *decisionItem.Origin
} else {
@ -209,9 +206,9 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert)
//add missing decisions
log.Debugf("Adding %d missing decisions to alert %s", len(missingDecisions), foundAlert.UUID)
decisionBuilders := make([]*ent.DecisionCreate, len(missingDecisions))
decisionBuilders := []*ent.DecisionCreate{}
for i, decisionItem := range missingDecisions {
for _, decisionItem := range missingDecisions {
var start_ip, start_sfx, end_ip, end_sfx int64
var sz int
@ -219,7 +216,8 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert)
if strings.ToLower(*decisionItem.Scope) == "ip" || strings.ToLower(*decisionItem.Scope) == "range" {
sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(*decisionItem.Value)
if err != nil {
return "", errors.Wrapf(InvalidIPOrRange, "invalid addr/range %s : %s", *decisionItem.Value, err)
log.Errorf("invalid addr/range '%s': %s", *decisionItem.Value, err)
continue
}
}
@ -254,7 +252,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert)
SetSimulated(*alertItem.Simulated).
SetUUID(decisionItem.UUID)
decisionBuilders[i] = decisionBuilder
decisionBuilders = append(decisionBuilders, decisionBuilder)
}
decisions := []*ent.Decision{}
@ -486,9 +484,9 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in
}
func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decisions []*models.Decision) ([]*ent.Decision, error) {
decisionCreate := make([]*ent.DecisionCreate, len(decisions))
decisionCreate := []*ent.DecisionCreate{}
for i, decisionItem := range decisions {
for _, decisionItem := range decisions {
var start_ip, start_sfx, end_ip, end_sfx int64
var sz int
@ -501,7 +499,8 @@ func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decis
if strings.ToLower(*decisionItem.Scope) == "ip" || strings.ToLower(*decisionItem.Scope) == "range" {
sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(*decisionItem.Value)
if err != nil {
return nil, fmt.Errorf("%s: %w", *decisionItem.Value, InvalidIPOrRange)
log.Errorf("invalid addr/range '%s': %s", *decisionItem.Value, err)
continue
}
}
@ -520,7 +519,11 @@ func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decis
SetSimulated(simulated).
SetUUID(decisionItem.UUID)
decisionCreate[i] = newDecision
decisionCreate = append(decisionCreate, newDecision)
}
if len(decisionCreate) == 0 {
return nil, nil
}
ret, err := c.Ent.Decision.CreateBulk(decisionCreate...).Save(c.CTX)
@ -532,10 +535,10 @@ func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decis
}
func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts []*models.Alert) ([]string, error) {
alertBuilders := make([]*ent.AlertCreate, len(alerts))
alertDecisions := make([][]*ent.Decision, len(alerts))
alertBuilders := []*ent.AlertCreate{}
alertDecisions := [][]*ent.Decision{}
for i, alertItem := range alerts {
for _, alertItem := range alerts {
var metas []*ent.Meta
var events []*ent.Event
@ -656,6 +659,17 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [
decisions = append(decisions, decisionRet...)
}
discarded := len(alertItem.Decisions) - len(decisions)
if discarded > 0 {
c.Log.Warningf("discarded %d decisions for %s", discarded, alertItem.UUID)
}
// if all decisions were discarded, discard the alert too
if discarded > 0 && len(decisions) == 0 {
c.Log.Warningf("dropping alert %s with invalid decisions", alertItem.UUID)
continue
}
alertBuilder := c.Ent.Alert.
Create().
SetScenario(*alertItem.Scenario).
@ -685,8 +699,13 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [
alertBuilder.SetOwner(owner)
}
alertBuilders[i] = alertBuilder
alertDecisions[i] = decisions
alertBuilders = append(alertBuilders, alertBuilder)
alertDecisions = append(alertDecisions, decisions)
}
if len(alertBuilders) == 0 {
log.Warningf("no alerts to create, discarded?")
return nil, nil
}
alertsCreateBulk, err := c.Ent.Alert.CreateBulk(alertBuilders...).Save(c.CTX)

View file

@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
@ -67,6 +68,7 @@ type Alert struct {
// The values are being populated by the AlertQuery when eager-loading is set.
Edges AlertEdges `json:"edges"`
machine_alerts *int
selectValues sql.SelectValues
}
// AlertEdges holds the relations/edges for other nodes in the graph.
@ -142,7 +144,7 @@ func (*Alert) scanValues(columns []string) ([]any, error) {
case alert.ForeignKeys[0]: // machine_alerts
values[i] = new(sql.NullInt64)
default:
return nil, fmt.Errorf("unexpected column %q for type Alert", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@ -309,36 +311,44 @@ func (a *Alert) assignValues(columns []string, values []any) error {
a.machine_alerts = new(int)
*a.machine_alerts = int(value.Int64)
}
default:
a.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the Alert.
// This includes values selected through modifiers, order, etc.
func (a *Alert) Value(name string) (ent.Value, error) {
return a.selectValues.Get(name)
}
// QueryOwner queries the "owner" edge of the Alert entity.
func (a *Alert) QueryOwner() *MachineQuery {
return (&AlertClient{config: a.config}).QueryOwner(a)
return NewAlertClient(a.config).QueryOwner(a)
}
// QueryDecisions queries the "decisions" edge of the Alert entity.
func (a *Alert) QueryDecisions() *DecisionQuery {
return (&AlertClient{config: a.config}).QueryDecisions(a)
return NewAlertClient(a.config).QueryDecisions(a)
}
// QueryEvents queries the "events" edge of the Alert entity.
func (a *Alert) QueryEvents() *EventQuery {
return (&AlertClient{config: a.config}).QueryEvents(a)
return NewAlertClient(a.config).QueryEvents(a)
}
// QueryMetas queries the "metas" edge of the Alert entity.
func (a *Alert) QueryMetas() *MetaQuery {
return (&AlertClient{config: a.config}).QueryMetas(a)
return NewAlertClient(a.config).QueryMetas(a)
}
// Update returns a builder for updating this Alert.
// Note that you need to call Alert.Unwrap() before calling this method if this Alert
// was returned from a transaction, and the transaction was committed or rolled back.
func (a *Alert) Update() *AlertUpdateOne {
return (&AlertClient{config: a.config}).UpdateOne(a)
return NewAlertClient(a.config).UpdateOne(a)
}
// Unwrap unwraps the Alert entity that was returned from a transaction after it was closed,
@ -435,9 +445,3 @@ func (a *Alert) String() string {
// Alerts is a parsable slice of Alert.
type Alerts []*Alert
func (a Alerts) config(cfg config) {
for _i := range a {
a[_i].config = cfg
}
}

View file

@ -4,6 +4,9 @@ package alert
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
@ -168,3 +171,203 @@ var (
// DefaultSimulated holds the default value on creation for the "simulated" field.
DefaultSimulated bool
)
// OrderOption defines the ordering options for the Alert queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByScenario orders the results by the scenario field.
func ByScenario(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldScenario, opts...).ToFunc()
}
// ByBucketId orders the results by the bucketId field.
func ByBucketId(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldBucketId, opts...).ToFunc()
}
// ByMessage orders the results by the message field.
func ByMessage(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldMessage, opts...).ToFunc()
}
// ByEventsCountField orders the results by the eventsCount field.
func ByEventsCountField(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldEventsCount, opts...).ToFunc()
}
// ByStartedAt orders the results by the startedAt field.
func ByStartedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldStartedAt, opts...).ToFunc()
}
// ByStoppedAt orders the results by the stoppedAt field.
func ByStoppedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldStoppedAt, opts...).ToFunc()
}
// BySourceIp orders the results by the sourceIp field.
func BySourceIp(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceIp, opts...).ToFunc()
}
// BySourceRange orders the results by the sourceRange field.
func BySourceRange(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceRange, opts...).ToFunc()
}
// BySourceAsNumber orders the results by the sourceAsNumber field.
func BySourceAsNumber(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceAsNumber, opts...).ToFunc()
}
// BySourceAsName orders the results by the sourceAsName field.
func BySourceAsName(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceAsName, opts...).ToFunc()
}
// BySourceCountry orders the results by the sourceCountry field.
func BySourceCountry(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceCountry, opts...).ToFunc()
}
// BySourceLatitude orders the results by the sourceLatitude field.
func BySourceLatitude(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceLatitude, opts...).ToFunc()
}
// BySourceLongitude orders the results by the sourceLongitude field.
func BySourceLongitude(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceLongitude, opts...).ToFunc()
}
// BySourceScope orders the results by the sourceScope field.
func BySourceScope(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceScope, opts...).ToFunc()
}
// BySourceValue orders the results by the sourceValue field.
func BySourceValue(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceValue, opts...).ToFunc()
}
// ByCapacity orders the results by the capacity field.
func ByCapacity(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCapacity, opts...).ToFunc()
}
// ByLeakSpeed orders the results by the leakSpeed field.
func ByLeakSpeed(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldLeakSpeed, opts...).ToFunc()
}
// ByScenarioVersion orders the results by the scenarioVersion field.
func ByScenarioVersion(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldScenarioVersion, opts...).ToFunc()
}
// ByScenarioHash orders the results by the scenarioHash field.
func ByScenarioHash(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldScenarioHash, opts...).ToFunc()
}
// BySimulated orders the results by the simulated field.
func BySimulated(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSimulated, opts...).ToFunc()
}
// ByUUID orders the results by the uuid field.
func ByUUID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUUID, opts...).ToFunc()
}
// ByOwnerField orders the results by owner field.
func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...))
}
}
// ByDecisionsCount orders the results by decisions count.
func ByDecisionsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newDecisionsStep(), opts...)
}
}
// ByDecisions orders the results by decisions terms.
func ByDecisions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newDecisionsStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByEventsCount orders the results by events count.
func ByEventsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newEventsStep(), opts...)
}
}
// ByEvents orders the results by events terms.
func ByEvents(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newEventsStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByMetasCount orders the results by metas count.
func ByMetasCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newMetasStep(), opts...)
}
}
// ByMetas orders the results by metas terms.
func ByMetas(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newMetasStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
func newOwnerStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(OwnerInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
}
func newDecisionsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DecisionsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DecisionsTable, DecisionsColumn),
)
}
func newEventsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(EventsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, EventsTable, EventsColumn),
)
}
func newMetasStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(MetasInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, MetasTable, MetasColumn),
)
}

File diff suppressed because it is too large Load diff

View file

@ -409,50 +409,8 @@ func (ac *AlertCreate) Mutation() *AlertMutation {
// Save creates the Alert in the database.
func (ac *AlertCreate) Save(ctx context.Context) (*Alert, error) {
var (
err error
node *Alert
)
ac.defaults()
if len(ac.hooks) == 0 {
if err = ac.check(); err != nil {
return nil, err
}
node, err = ac.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*AlertMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = ac.check(); err != nil {
return nil, err
}
ac.mutation = mutation
if node, err = ac.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(ac.hooks) - 1; i >= 0; i-- {
if ac.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = ac.hooks[i](mut)
}
v, err := mut.Mutate(ctx, ac.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Alert)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from AlertMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, ac.sqlSave, ac.mutation, ac.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@ -525,6 +483,9 @@ func (ac *AlertCreate) check() error {
}
func (ac *AlertCreate) sqlSave(ctx context.Context) (*Alert, error) {
if err := ac.check(); err != nil {
return nil, err
}
_node, _spec := ac.createSpec()
if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@ -534,202 +495,106 @@ func (ac *AlertCreate) sqlSave(ctx context.Context) (*Alert, error) {
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
ac.mutation.id = &_node.ID
ac.mutation.done = true
return _node, nil
}
func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) {
var (
_node = &Alert{config: ac.config}
_spec = &sqlgraph.CreateSpec{
Table: alert.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
}
_spec = sqlgraph.NewCreateSpec(alert.Table, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt))
)
if value, ok := ac.mutation.CreatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: alert.FieldCreatedAt,
})
_spec.SetField(alert.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = &value
}
if value, ok := ac.mutation.UpdatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: alert.FieldUpdatedAt,
})
_spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = &value
}
if value, ok := ac.mutation.Scenario(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldScenario,
})
_spec.SetField(alert.FieldScenario, field.TypeString, value)
_node.Scenario = value
}
if value, ok := ac.mutation.BucketId(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldBucketId,
})
_spec.SetField(alert.FieldBucketId, field.TypeString, value)
_node.BucketId = value
}
if value, ok := ac.mutation.Message(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldMessage,
})
_spec.SetField(alert.FieldMessage, field.TypeString, value)
_node.Message = value
}
if value, ok := ac.mutation.EventsCount(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt32,
Value: value,
Column: alert.FieldEventsCount,
})
_spec.SetField(alert.FieldEventsCount, field.TypeInt32, value)
_node.EventsCount = value
}
if value, ok := ac.mutation.StartedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: alert.FieldStartedAt,
})
_spec.SetField(alert.FieldStartedAt, field.TypeTime, value)
_node.StartedAt = value
}
if value, ok := ac.mutation.StoppedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: alert.FieldStoppedAt,
})
_spec.SetField(alert.FieldStoppedAt, field.TypeTime, value)
_node.StoppedAt = value
}
if value, ok := ac.mutation.SourceIp(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceIp,
})
_spec.SetField(alert.FieldSourceIp, field.TypeString, value)
_node.SourceIp = value
}
if value, ok := ac.mutation.SourceRange(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceRange,
})
_spec.SetField(alert.FieldSourceRange, field.TypeString, value)
_node.SourceRange = value
}
if value, ok := ac.mutation.SourceAsNumber(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceAsNumber,
})
_spec.SetField(alert.FieldSourceAsNumber, field.TypeString, value)
_node.SourceAsNumber = value
}
if value, ok := ac.mutation.SourceAsName(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceAsName,
})
_spec.SetField(alert.FieldSourceAsName, field.TypeString, value)
_node.SourceAsName = value
}
if value, ok := ac.mutation.SourceCountry(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceCountry,
})
_spec.SetField(alert.FieldSourceCountry, field.TypeString, value)
_node.SourceCountry = value
}
if value, ok := ac.mutation.SourceLatitude(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeFloat32,
Value: value,
Column: alert.FieldSourceLatitude,
})
_spec.SetField(alert.FieldSourceLatitude, field.TypeFloat32, value)
_node.SourceLatitude = value
}
if value, ok := ac.mutation.SourceLongitude(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeFloat32,
Value: value,
Column: alert.FieldSourceLongitude,
})
_spec.SetField(alert.FieldSourceLongitude, field.TypeFloat32, value)
_node.SourceLongitude = value
}
if value, ok := ac.mutation.SourceScope(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceScope,
})
_spec.SetField(alert.FieldSourceScope, field.TypeString, value)
_node.SourceScope = value
}
if value, ok := ac.mutation.SourceValue(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceValue,
})
_spec.SetField(alert.FieldSourceValue, field.TypeString, value)
_node.SourceValue = value
}
if value, ok := ac.mutation.Capacity(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt32,
Value: value,
Column: alert.FieldCapacity,
})
_spec.SetField(alert.FieldCapacity, field.TypeInt32, value)
_node.Capacity = value
}
if value, ok := ac.mutation.LeakSpeed(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldLeakSpeed,
})
_spec.SetField(alert.FieldLeakSpeed, field.TypeString, value)
_node.LeakSpeed = value
}
if value, ok := ac.mutation.ScenarioVersion(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldScenarioVersion,
})
_spec.SetField(alert.FieldScenarioVersion, field.TypeString, value)
_node.ScenarioVersion = value
}
if value, ok := ac.mutation.ScenarioHash(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldScenarioHash,
})
_spec.SetField(alert.FieldScenarioHash, field.TypeString, value)
_node.ScenarioHash = value
}
if value, ok := ac.mutation.Simulated(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeBool,
Value: value,
Column: alert.FieldSimulated,
})
_spec.SetField(alert.FieldSimulated, field.TypeBool, value)
_node.Simulated = value
}
if value, ok := ac.mutation.UUID(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldUUID,
})
_spec.SetField(alert.FieldUUID, field.TypeString, value)
_node.UUID = value
}
if nodes := ac.mutation.OwnerIDs(); len(nodes) > 0 {
@ -740,10 +605,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) {
Columns: []string{alert.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: machine.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -760,10 +622,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) {
Columns: []string{alert.DecisionsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: decision.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -779,10 +638,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) {
Columns: []string{alert.EventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: event.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -798,10 +654,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) {
Columns: []string{alert.MetasColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: meta.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -815,11 +668,15 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) {
// AlertCreateBulk is the builder for creating many Alert entities in bulk.
type AlertCreateBulk struct {
config
err error
builders []*AlertCreate
}
// Save creates the Alert entities in the database.
func (acb *AlertCreateBulk) Save(ctx context.Context) ([]*Alert, error) {
if acb.err != nil {
return nil, acb.err
}
specs := make([]*sqlgraph.CreateSpec, len(acb.builders))
nodes := make([]*Alert, len(acb.builders))
mutators := make([]Mutator, len(acb.builders))
@ -836,8 +693,8 @@ func (acb *AlertCreateBulk) Save(ctx context.Context) ([]*Alert, error) {
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation)
} else {

View file

@ -4,7 +4,6 @@ package ent
import (
"context"
"fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@ -28,34 +27,7 @@ func (ad *AlertDelete) Where(ps ...predicate.Alert) *AlertDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (ad *AlertDelete) Exec(ctx context.Context) (int, error) {
var (
err error
affected int
)
if len(ad.hooks) == 0 {
affected, err = ad.sqlExec(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*AlertMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
ad.mutation = mutation
affected, err = ad.sqlExec(ctx)
mutation.done = true
return affected, err
})
for i := len(ad.hooks) - 1; i >= 0; i-- {
if ad.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = ad.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, ad.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, ad.sqlExec, ad.mutation, ad.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@ -68,15 +40,7 @@ func (ad *AlertDelete) ExecX(ctx context.Context) int {
}
func (ad *AlertDelete) sqlExec(ctx context.Context) (int, error) {
_spec := &sqlgraph.DeleteSpec{
Node: &sqlgraph.NodeSpec{
Table: alert.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
},
}
_spec := sqlgraph.NewDeleteSpec(alert.Table, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt))
if ps := ad.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -88,6 +52,7 @@ func (ad *AlertDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
ad.mutation.done = true
return affected, err
}
@ -96,6 +61,12 @@ type AlertDeleteOne struct {
ad *AlertDelete
}
// Where appends a list predicates to the AlertDelete builder.
func (ado *AlertDeleteOne) Where(ps ...predicate.Alert) *AlertDeleteOne {
ado.ad.mutation.Where(ps...)
return ado
}
// Exec executes the deletion query.
func (ado *AlertDeleteOne) Exec(ctx context.Context) error {
n, err := ado.ad.Exec(ctx)
@ -111,5 +82,7 @@ func (ado *AlertDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (ado *AlertDeleteOne) ExecX(ctx context.Context) {
ado.ad.ExecX(ctx)
if err := ado.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -22,11 +22,9 @@ import (
// AlertQuery is the builder for querying Alert entities.
type AlertQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
ctx *QueryContext
order []alert.OrderOption
inters []Interceptor
predicates []predicate.Alert
withOwner *MachineQuery
withDecisions *DecisionQuery
@ -44,34 +42,34 @@ func (aq *AlertQuery) Where(ps ...predicate.Alert) *AlertQuery {
return aq
}
// Limit adds a limit step to the query.
// Limit the number of records to be returned by this query.
func (aq *AlertQuery) Limit(limit int) *AlertQuery {
aq.limit = &limit
aq.ctx.Limit = &limit
return aq
}
// Offset adds an offset step to the query.
// Offset to start from.
func (aq *AlertQuery) Offset(offset int) *AlertQuery {
aq.offset = &offset
aq.ctx.Offset = &offset
return aq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (aq *AlertQuery) Unique(unique bool) *AlertQuery {
aq.unique = &unique
aq.ctx.Unique = &unique
return aq
}
// Order adds an order step to the query.
func (aq *AlertQuery) Order(o ...OrderFunc) *AlertQuery {
// Order specifies how the records should be ordered.
func (aq *AlertQuery) Order(o ...alert.OrderOption) *AlertQuery {
aq.order = append(aq.order, o...)
return aq
}
// QueryOwner chains the current query on the "owner" edge.
func (aq *AlertQuery) QueryOwner() *MachineQuery {
query := &MachineQuery{config: aq.config}
query := (&MachineClient{config: aq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
@ -93,7 +91,7 @@ func (aq *AlertQuery) QueryOwner() *MachineQuery {
// QueryDecisions chains the current query on the "decisions" edge.
func (aq *AlertQuery) QueryDecisions() *DecisionQuery {
query := &DecisionQuery{config: aq.config}
query := (&DecisionClient{config: aq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
@ -115,7 +113,7 @@ func (aq *AlertQuery) QueryDecisions() *DecisionQuery {
// QueryEvents chains the current query on the "events" edge.
func (aq *AlertQuery) QueryEvents() *EventQuery {
query := &EventQuery{config: aq.config}
query := (&EventClient{config: aq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
@ -137,7 +135,7 @@ func (aq *AlertQuery) QueryEvents() *EventQuery {
// QueryMetas chains the current query on the "metas" edge.
func (aq *AlertQuery) QueryMetas() *MetaQuery {
query := &MetaQuery{config: aq.config}
query := (&MetaClient{config: aq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
@ -160,7 +158,7 @@ func (aq *AlertQuery) QueryMetas() *MetaQuery {
// First returns the first Alert entity from the query.
// Returns a *NotFoundError when no Alert was found.
func (aq *AlertQuery) First(ctx context.Context) (*Alert, error) {
nodes, err := aq.Limit(1).All(ctx)
nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, "First"))
if err != nil {
return nil, err
}
@ -183,7 +181,7 @@ func (aq *AlertQuery) FirstX(ctx context.Context) *Alert {
// Returns a *NotFoundError when no Alert ID was found.
func (aq *AlertQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = aq.Limit(1).IDs(ctx); err != nil {
if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@ -206,7 +204,7 @@ func (aq *AlertQuery) FirstIDX(ctx context.Context) int {
// Returns a *NotSingularError when more than one Alert entity is found.
// Returns a *NotFoundError when no Alert entities are found.
func (aq *AlertQuery) Only(ctx context.Context) (*Alert, error) {
nodes, err := aq.Limit(2).All(ctx)
nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, "Only"))
if err != nil {
return nil, err
}
@ -234,7 +232,7 @@ func (aq *AlertQuery) OnlyX(ctx context.Context) *Alert {
// Returns a *NotFoundError when no entities are found.
func (aq *AlertQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = aq.Limit(2).IDs(ctx); err != nil {
if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@ -259,10 +257,12 @@ func (aq *AlertQuery) OnlyIDX(ctx context.Context) int {
// All executes the query and returns a list of Alerts.
func (aq *AlertQuery) All(ctx context.Context) ([]*Alert, error) {
ctx = setContextOp(ctx, aq.ctx, "All")
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
}
return aq.sqlAll(ctx)
qr := querierAll[[]*Alert, *AlertQuery]()
return withInterceptors[[]*Alert](ctx, aq, qr, aq.inters)
}
// AllX is like All, but panics if an error occurs.
@ -275,9 +275,12 @@ func (aq *AlertQuery) AllX(ctx context.Context) []*Alert {
}
// IDs executes the query and returns a list of Alert IDs.
func (aq *AlertQuery) IDs(ctx context.Context) ([]int, error) {
var ids []int
if err := aq.Select(alert.FieldID).Scan(ctx, &ids); err != nil {
func (aq *AlertQuery) IDs(ctx context.Context) (ids []int, err error) {
if aq.ctx.Unique == nil && aq.path != nil {
aq.Unique(true)
}
ctx = setContextOp(ctx, aq.ctx, "IDs")
if err = aq.Select(alert.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@ -294,10 +297,11 @@ func (aq *AlertQuery) IDsX(ctx context.Context) []int {
// Count returns the count of the given query.
func (aq *AlertQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, aq.ctx, "Count")
if err := aq.prepareQuery(ctx); err != nil {
return 0, err
}
return aq.sqlCount(ctx)
return withInterceptors[int](ctx, aq, querierCount[*AlertQuery](), aq.inters)
}
// CountX is like Count, but panics if an error occurs.
@ -311,10 +315,15 @@ func (aq *AlertQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (aq *AlertQuery) Exist(ctx context.Context) (bool, error) {
if err := aq.prepareQuery(ctx); err != nil {
return false, err
ctx = setContextOp(ctx, aq.ctx, "Exist")
switch _, err := aq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
return aq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@ -334,25 +343,24 @@ func (aq *AlertQuery) Clone() *AlertQuery {
}
return &AlertQuery{
config: aq.config,
limit: aq.limit,
offset: aq.offset,
order: append([]OrderFunc{}, aq.order...),
ctx: aq.ctx.Clone(),
order: append([]alert.OrderOption{}, aq.order...),
inters: append([]Interceptor{}, aq.inters...),
predicates: append([]predicate.Alert{}, aq.predicates...),
withOwner: aq.withOwner.Clone(),
withDecisions: aq.withDecisions.Clone(),
withEvents: aq.withEvents.Clone(),
withMetas: aq.withMetas.Clone(),
// clone intermediate query.
sql: aq.sql.Clone(),
path: aq.path,
unique: aq.unique,
sql: aq.sql.Clone(),
path: aq.path,
}
}
// WithOwner tells the query-builder to eager-load the nodes that are connected to
// the "owner" edge. The optional arguments are used to configure the query builder of the edge.
func (aq *AlertQuery) WithOwner(opts ...func(*MachineQuery)) *AlertQuery {
query := &MachineQuery{config: aq.config}
query := (&MachineClient{config: aq.config}).Query()
for _, opt := range opts {
opt(query)
}
@ -363,7 +371,7 @@ func (aq *AlertQuery) WithOwner(opts ...func(*MachineQuery)) *AlertQuery {
// WithDecisions tells the query-builder to eager-load the nodes that are connected to
// the "decisions" edge. The optional arguments are used to configure the query builder of the edge.
func (aq *AlertQuery) WithDecisions(opts ...func(*DecisionQuery)) *AlertQuery {
query := &DecisionQuery{config: aq.config}
query := (&DecisionClient{config: aq.config}).Query()
for _, opt := range opts {
opt(query)
}
@ -374,7 +382,7 @@ func (aq *AlertQuery) WithDecisions(opts ...func(*DecisionQuery)) *AlertQuery {
// WithEvents tells the query-builder to eager-load the nodes that are connected to
// the "events" edge. The optional arguments are used to configure the query builder of the edge.
func (aq *AlertQuery) WithEvents(opts ...func(*EventQuery)) *AlertQuery {
query := &EventQuery{config: aq.config}
query := (&EventClient{config: aq.config}).Query()
for _, opt := range opts {
opt(query)
}
@ -385,7 +393,7 @@ func (aq *AlertQuery) WithEvents(opts ...func(*EventQuery)) *AlertQuery {
// WithMetas tells the query-builder to eager-load the nodes that are connected to
// the "metas" edge. The optional arguments are used to configure the query builder of the edge.
func (aq *AlertQuery) WithMetas(opts ...func(*MetaQuery)) *AlertQuery {
query := &MetaQuery{config: aq.config}
query := (&MetaClient{config: aq.config}).Query()
for _, opt := range opts {
opt(query)
}
@ -408,16 +416,11 @@ func (aq *AlertQuery) WithMetas(opts ...func(*MetaQuery)) *AlertQuery {
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (aq *AlertQuery) GroupBy(field string, fields ...string) *AlertGroupBy {
grbuild := &AlertGroupBy{config: aq.config}
grbuild.fields = append([]string{field}, fields...)
grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
}
return aq.sqlQuery(ctx), nil
}
aq.ctx.Fields = append([]string{field}, fields...)
grbuild := &AlertGroupBy{build: aq}
grbuild.flds = &aq.ctx.Fields
grbuild.label = alert.Label
grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
grbuild.scan = grbuild.Scan
return grbuild
}
@ -434,15 +437,30 @@ func (aq *AlertQuery) GroupBy(field string, fields ...string) *AlertGroupBy {
// Select(alert.FieldCreatedAt).
// Scan(ctx, &v)
func (aq *AlertQuery) Select(fields ...string) *AlertSelect {
aq.fields = append(aq.fields, fields...)
selbuild := &AlertSelect{AlertQuery: aq}
selbuild.label = alert.Label
selbuild.flds, selbuild.scan = &aq.fields, selbuild.Scan
return selbuild
aq.ctx.Fields = append(aq.ctx.Fields, fields...)
sbuild := &AlertSelect{AlertQuery: aq}
sbuild.label = alert.Label
sbuild.flds, sbuild.scan = &aq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a AlertSelect configured with the given aggregations.
func (aq *AlertQuery) Aggregate(fns ...AggregateFunc) *AlertSelect {
return aq.Select().Aggregate(fns...)
}
func (aq *AlertQuery) prepareQuery(ctx context.Context) error {
for _, f := range aq.fields {
for _, inter := range aq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, aq); err != nil {
return err
}
}
}
for _, f := range aq.ctx.Fields {
if !alert.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@ -536,6 +554,9 @@ func (aq *AlertQuery) loadOwner(ctx context.Context, query *MachineQuery, nodes
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(machine.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@ -562,8 +583,11 @@ func (aq *AlertQuery) loadDecisions(ctx context.Context, query *DecisionQuery, n
init(nodes[i])
}
}
if len(query.ctx.Fields) > 0 {
query.ctx.AppendFieldOnce(decision.FieldAlertDecisions)
}
query.Where(predicate.Decision(func(s *sql.Selector) {
s.Where(sql.InValues(alert.DecisionsColumn, fks...))
s.Where(sql.InValues(s.C(alert.DecisionsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@ -573,7 +597,7 @@ func (aq *AlertQuery) loadDecisions(ctx context.Context, query *DecisionQuery, n
fk := n.AlertDecisions
node, ok := nodeids[fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "alert_decisions" returned %v for node %v`, fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "alert_decisions" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}
@ -589,8 +613,11 @@ func (aq *AlertQuery) loadEvents(ctx context.Context, query *EventQuery, nodes [
init(nodes[i])
}
}
if len(query.ctx.Fields) > 0 {
query.ctx.AppendFieldOnce(event.FieldAlertEvents)
}
query.Where(predicate.Event(func(s *sql.Selector) {
s.Where(sql.InValues(alert.EventsColumn, fks...))
s.Where(sql.InValues(s.C(alert.EventsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@ -600,7 +627,7 @@ func (aq *AlertQuery) loadEvents(ctx context.Context, query *EventQuery, nodes [
fk := n.AlertEvents
node, ok := nodeids[fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "alert_events" returned %v for node %v`, fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "alert_events" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}
@ -616,8 +643,11 @@ func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []*
init(nodes[i])
}
}
if len(query.ctx.Fields) > 0 {
query.ctx.AppendFieldOnce(meta.FieldAlertMetas)
}
query.Where(predicate.Meta(func(s *sql.Selector) {
s.Where(sql.InValues(alert.MetasColumn, fks...))
s.Where(sql.InValues(s.C(alert.MetasColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@ -627,7 +657,7 @@ func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []*
fk := n.AlertMetas
node, ok := nodeids[fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "alert_metas" returned %v for node %v`, fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "alert_metas" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}
@ -636,41 +666,22 @@ func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []*
func (aq *AlertQuery) sqlCount(ctx context.Context) (int, error) {
_spec := aq.querySpec()
_spec.Node.Columns = aq.fields
if len(aq.fields) > 0 {
_spec.Unique = aq.unique != nil && *aq.unique
_spec.Node.Columns = aq.ctx.Fields
if len(aq.ctx.Fields) > 0 {
_spec.Unique = aq.ctx.Unique != nil && *aq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, aq.driver, _spec)
}
func (aq *AlertQuery) sqlExist(ctx context.Context) (bool, error) {
switch _, err := aq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec {
_spec := &sqlgraph.QuerySpec{
Node: &sqlgraph.NodeSpec{
Table: alert.Table,
Columns: alert.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
},
From: aq.sql,
Unique: true,
}
if unique := aq.unique; unique != nil {
_spec := sqlgraph.NewQuerySpec(alert.Table, alert.Columns, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt))
_spec.From = aq.sql
if unique := aq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if aq.path != nil {
_spec.Unique = true
}
if fields := aq.fields; len(fields) > 0 {
if fields := aq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, alert.FieldID)
for i := range fields {
@ -686,10 +697,10 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
if limit := aq.limit; limit != nil {
if limit := aq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := aq.offset; offset != nil {
if offset := aq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := aq.order; len(ps) > 0 {
@ -705,7 +716,7 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec {
func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(aq.driver.Dialect())
t1 := builder.Table(alert.Table)
columns := aq.fields
columns := aq.ctx.Fields
if len(columns) == 0 {
columns = alert.Columns
}
@ -714,7 +725,7 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = aq.sql
selector.Select(selector.Columns(columns...)...)
}
if aq.unique != nil && *aq.unique {
if aq.ctx.Unique != nil && *aq.ctx.Unique {
selector.Distinct()
}
for _, p := range aq.predicates {
@ -723,12 +734,12 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range aq.order {
p(selector)
}
if offset := aq.offset; offset != nil {
if offset := aq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := aq.limit; limit != nil {
if limit := aq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@ -736,13 +747,8 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector {
// AlertGroupBy is the group-by builder for Alert entities.
type AlertGroupBy struct {
config
selector
fields []string
fns []AggregateFunc
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
build *AlertQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@ -751,74 +757,77 @@ func (agb *AlertGroupBy) Aggregate(fns ...AggregateFunc) *AlertGroupBy {
return agb
}
// Scan applies the group-by query and scans the result into the given value.
// Scan applies the selector query and scans the result into the given value.
func (agb *AlertGroupBy) Scan(ctx context.Context, v any) error {
query, err := agb.path(ctx)
if err != nil {
ctx = setContextOp(ctx, agb.build.ctx, "GroupBy")
if err := agb.build.prepareQuery(ctx); err != nil {
return err
}
agb.sql = query
return agb.sqlScan(ctx, v)
return scanWithInterceptors[*AlertQuery, *AlertGroupBy](ctx, agb.build, agb, agb.build.inters, v)
}
func (agb *AlertGroupBy) sqlScan(ctx context.Context, v any) error {
for _, f := range agb.fields {
if !alert.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
}
}
selector := agb.sqlQuery()
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := agb.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (agb *AlertGroupBy) sqlQuery() *sql.Selector {
selector := agb.sql.Select()
func (agb *AlertGroupBy) sqlScan(ctx context.Context, root *AlertQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(agb.fns))
for _, fn := range agb.fns {
aggregation = append(aggregation, fn(selector))
}
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(agb.fields)+len(agb.fns))
for _, f := range agb.fields {
columns := make([]string, 0, len(*agb.flds)+len(agb.fns))
for _, f := range *agb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(agb.fields...)...)
selector.GroupBy(selector.Columns(*agb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := agb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// AlertSelect is the builder for selecting fields of Alert entities.
type AlertSelect struct {
*AlertQuery
selector
// intermediate query (i.e. traversal path).
sql *sql.Selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (as *AlertSelect) Aggregate(fns ...AggregateFunc) *AlertSelect {
as.fns = append(as.fns, fns...)
return as
}
// Scan applies the selector query and scans the result into the given value.
func (as *AlertSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, as.ctx, "Select")
if err := as.prepareQuery(ctx); err != nil {
return err
}
as.sql = as.AlertQuery.sqlQuery(ctx)
return as.sqlScan(ctx, v)
return scanWithInterceptors[*AlertQuery, *AlertSelect](ctx, as.AlertQuery, as, as.inters, v)
}
func (as *AlertSelect) sqlScan(ctx context.Context, v any) error {
func (as *AlertSelect) sqlScan(ctx context.Context, root *AlertQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(as.fns))
for _, fn := range as.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*as.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := as.sql.Query()
query, args := selector.Query()
if err := as.driver.Query(ctx, query, args, rows); err != nil {
return err
}

File diff suppressed because it is too large Load diff

View file

@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
)
@ -37,7 +38,8 @@ type Bouncer struct {
// LastPull holds the value of the "last_pull" field.
LastPull time.Time `json:"last_pull"`
// AuthType holds the value of the "auth_type" field.
AuthType string `json:"auth_type"`
AuthType string `json:"auth_type"`
selectValues sql.SelectValues
}
// scanValues returns the types for scanning values from sql.Rows.
@ -54,7 +56,7 @@ func (*Bouncer) scanValues(columns []string) ([]any, error) {
case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldUntil, bouncer.FieldLastPull:
values[i] = new(sql.NullTime)
default:
return nil, fmt.Errorf("unexpected column %q for type Bouncer", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@ -142,16 +144,24 @@ func (b *Bouncer) assignValues(columns []string, values []any) error {
} else if value.Valid {
b.AuthType = value.String
}
default:
b.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the Bouncer.
// This includes values selected through modifiers, order, etc.
func (b *Bouncer) Value(name string) (ent.Value, error) {
return b.selectValues.Get(name)
}
// Update returns a builder for updating this Bouncer.
// Note that you need to call Bouncer.Unwrap() before calling this method if this Bouncer
// was returned from a transaction, and the transaction was committed or rolled back.
func (b *Bouncer) Update() *BouncerUpdateOne {
return (&BouncerClient{config: b.config}).UpdateOne(b)
return NewBouncerClient(b.config).UpdateOne(b)
}
// Unwrap unwraps the Bouncer entity that was returned from a transaction after it was closed,
@ -212,9 +222,3 @@ func (b *Bouncer) String() string {
// Bouncers is a parsable slice of Bouncer.
type Bouncers []*Bouncer
func (b Bouncers) config(cfg config) {
for _i := range b {
b[_i].config = cfg
}
}

View file

@ -4,6 +4,8 @@ package bouncer
import (
"time"
"entgo.io/ent/dialect/sql"
)
const (
@ -81,3 +83,66 @@ var (
// DefaultAuthType holds the default value on creation for the "auth_type" field.
DefaultAuthType string
)
// OrderOption defines the ordering options for the Bouncer queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByName orders the results by the name field.
func ByName(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldName, opts...).ToFunc()
}
// ByAPIKey orders the results by the api_key field.
func ByAPIKey(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldAPIKey, opts...).ToFunc()
}
// ByRevoked orders the results by the revoked field.
func ByRevoked(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldRevoked, opts...).ToFunc()
}
// ByIPAddress orders the results by the ip_address field.
func ByIPAddress(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldIPAddress, opts...).ToFunc()
}
// ByType orders the results by the type field.
func ByType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldType, opts...).ToFunc()
}
// ByVersion orders the results by the version field.
func ByVersion(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldVersion, opts...).ToFunc()
}
// ByUntil orders the results by the until field.
func ByUntil(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUntil, opts...).ToFunc()
}
// ByLastPull orders the results by the last_pull field.
func ByLastPull(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldLastPull, opts...).ToFunc()
}
// ByAuthType orders the results by the auth_type field.
func ByAuthType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldAuthType, opts...).ToFunc()
}

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more