Browse Source

Merge branch 'master' into matrix_version

Manuel Sabban 2 years ago
parent
commit
c8ce9049f7
100 changed files with 4290 additions and 1425 deletions
  1. 1 0
      .github/workflows/bats-mysql.yml
  2. 13 6
      .github/workflows/ci-windows-build-msi.yml
  3. 18 8
      .github/workflows/dispatch_create_branch_hub.yaml
  4. 18 8
      .github/workflows/dispatch_delete_branch_hub.yaml
  5. 1 0
      .github/workflows/go-tests-windows.yml
  6. 16 7
      .github/workflows/go-tests.yml
  7. 34 33
      .github/workflows/release_publish-package.yml
  8. 4 4
      .github/workflows/release_publish_docker-image-debian.yml
  9. 13 20
      .github/workflows/release_publish_docker-image.yml
  10. 8 65
      Dockerfile
  11. 10 65
      Dockerfile.debian
  12. 1 1
      Makefile
  13. 24 3
      cmd/crowdsec-cli/alerts.go
  14. 110 77
      cmd/crowdsec-cli/bouncers.go
  15. 1 0
      cmd/crowdsec-cli/capi.go
  16. 4 489
      cmd/crowdsec-cli/config.go
  17. 170 0
      cmd/crowdsec-cli/config_backup.go
  18. 225 0
      cmd/crowdsec-cli/config_restore.go
  19. 217 0
      cmd/crowdsec-cli/config_show.go
  20. 27 13
      cmd/crowdsec-cli/console.go
  21. 6 0
      cmd/crowdsec-cli/console_table.go
  22. 485 99
      cmd/crowdsec-cli/lapi.go
  23. 183 137
      cmd/crowdsec-cli/machines.go
  24. 20 4
      cmd/crowdsec-cli/main.go
  25. 14 2
      cmd/crowdsec-cli/metrics.go
  26. 35 0
      cmd/crowdsec-cli/metrics_table.go
  27. 76 48
      cmd/crowdsec-cli/postoverflows.go
  28. 19 4
      cmd/crowdsec-cli/support.go
  29. 25 3
      cmd/crowdsec-cli/utils.go
  30. 1 1
      cmd/crowdsec/crowdsec.go
  31. 47 41
      cmd/crowdsec/main.go
  32. 9 2
      cmd/crowdsec/metrics.go
  33. 1 1
      cmd/crowdsec/run_in_svc.go
  34. 1 1
      cmd/crowdsec/run_in_svc_windows.go
  35. 1 1
      cmd/crowdsec/serve.go
  36. 1 1
      cmd/crowdsec/win_service.go
  37. 1 0
      config/config.yaml
  38. 1 0
      config/config_win.yaml
  39. 1 0
      config/console.yaml
  40. 0 0
      config/context.yaml
  41. 2 0
      config/crowdsec.service
  42. 2 1
      debian/rules
  43. 34 14
      docker/README.md
  44. 3 5
      docker/config.yaml
  45. 93 53
      docker/docker_start.sh
  46. 3 0
      go.mod
  47. 8 1
      go.sum
  48. 1 1
      pkg/acquisition/acquisition_test.go
  49. 2 2
      pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go
  50. 157 0
      pkg/alertcontext/alertcontext.go
  51. 201 0
      pkg/alertcontext/alertcontext_test.go
  52. 120 20
      pkg/apiclient/auth.go
  53. 1 1
      pkg/apiclient/auth_service_test.go
  54. 2 2
      pkg/apiclient/client.go
  55. 15 4
      pkg/apiserver/apic.go
  56. 1 0
      pkg/apiserver/apic_test.go
  57. 119 0
      pkg/cache/cache.go
  58. 30 0
      pkg/cache/cache_test.go
  59. 11 8
      pkg/csconfig/api.go
  60. 1 0
      pkg/csconfig/api_test.go
  61. 2 1
      pkg/csconfig/config.go
  62. 3 3
      pkg/csconfig/config_test.go
  63. 10 1
      pkg/csconfig/console.go
  64. 67 19
      pkg/csconfig/crowdsec_service.go
  65. 64 39
      pkg/csconfig/crowdsec_service_test.go
  66. 2 0
      pkg/csconfig/tests/context.yaml
  67. 50 0
      pkg/cstest/utils.go
  68. 24 2
      pkg/exprhelpers/exprlib.go
  69. 45 0
      pkg/exprhelpers/exprlib_test.go
  70. 19 0
      pkg/fflag/crowdsec.go
  71. 264 0
      pkg/fflag/features.go
  72. 397 0
      pkg/fflag/features_test.go
  73. 27 0
      pkg/hubtest/parser_assert.go
  74. 25 11
      pkg/leakybucket/bucket.go
  75. 2 2
      pkg/leakybucket/buckets_test.go
  76. 61 0
      pkg/leakybucket/conditional.go
  77. 76 42
      pkg/leakybucket/manager_load.go
  78. 10 4
      pkg/leakybucket/overflows.go
  79. 7 0
      pkg/leakybucket/processor.go
  80. 6 0
      pkg/leakybucket/reset_filter.go
  81. 11 0
      pkg/leakybucket/tests/conditional-bucket/bucket.yaml
  82. 1 0
      pkg/leakybucket/tests/conditional-bucket/scenarios.yaml
  83. 50 0
      pkg/leakybucket/tests/conditional-bucket/test.json
  84. 5 2
      pkg/leakybucket/timemachine.go
  85. 6 0
      pkg/leakybucket/uniq.go
  86. 102 15
      pkg/models/add_signals_request_item.go
  87. 16 10
      pkg/parser/enrich_date.go
  88. 103 4
      pkg/parser/node.go
  89. 4 3
      pkg/parser/parsing_test.go
  90. BIN
      pkg/parser/test_data/GeoLite2-ASN.mmdb
  91. 31 0
      pkg/parser/tests/base-grok-stash/base-grok-stash.yaml
  92. 2 0
      pkg/parser/tests/base-grok-stash/parsers.yaml
  93. 63 0
      pkg/parser/tests/base-grok-stash/test.yaml
  94. 1 0
      pkg/parser/tests/geoip-enrich/base-grok.yaml
  95. 3 4
      pkg/parser/tests/geoip-enrich/test.yaml
  96. 41 0
      pkg/parser/unix_parser.go
  97. 14 1
      pkg/types/grok_pattern.go
  98. 24 5
      pkg/yamlpatch/patcher.go
  99. 1 1
      rpm/SOURCES/crowdsec.unit.patch
  100. 3 0
      rpm/SPECS/crowdsec.spec

+ 1 - 0
.github/workflows/bats-mysql.yml

@@ -10,6 +10,7 @@ on:
 env:
 env:
   PREFIX_TEST_NAMES_WITH_FILE: true
   PREFIX_TEST_NAMES_WITH_FILE: true
 
 
+
 jobs:
 jobs:
 
 
   build:
   build:

+ 13 - 6
.github/workflows/ci-windows-build-msi.yml

@@ -11,6 +11,18 @@ on:
       - 'README.md'
       - 'README.md'
 
 
 jobs:
 jobs:
+  get_latest_release:
+    name: get_latest_release
+    runs-on: ubuntu-latest
+    permissions:
+      contents: read
+    steps:
+    - id: get_latest_release
+      uses: cardinalby/git-get-release-action@cedef2faf69cb7c55b285bad07688d04430b7ada
+      env:
+        GITHUB_TOKEN: ${{ github.token }}
+      with:
+        latest: true
 
 
   build:
   build:
     name: Build
     name: Build
@@ -23,13 +35,8 @@ jobs:
       id: go
       id: go
     - name: Check out code into the Go module directory
     - name: Check out code into the Go module directory
       uses: actions/checkout@v2
       uses: actions/checkout@v2
-    - id: get_latest_release
-      uses: pozetroninc/github-action-get-latest-release@master
-      with:
-        repository: crowdsecurity/crowdsec
-        excludes: draft
     - id: set_release_in_env
     - id: set_release_in_env
-      run: echo "BUILD_VERSION=${{ steps.get_latest_release.outputs.release }}" >> $env:GITHUB_ENV
+      run: echo "BUILD_VERSION=${{ jobs.get_latest_release.outputs.tag_name }}" >> $env:GITHUB_ENV
     - name: Build
     - name: Build
       run: make windows_installer
       run: make windows_installer
     - name: Upload MSI
     - name: Upload MSI

+ 18 - 8
.github/workflows/dispatch_create_branch_hub.yaml

@@ -2,23 +2,33 @@ name: Dispatch to hub when creating pre-release
 
 
 on:
 on:
   release:
   release:
-    types: prereleased
+    types:
+      - prereleased
 
 
 jobs:
 jobs:
+  get_latest_release:
+    name: get_latest_release
+    runs-on: ubuntu-latest
+    permissions:
+      contents: read
+    steps:
+    - id: get_latest_release
+      uses: cardinalby/git-get-release-action@cedef2faf69cb7c55b285bad07688d04430b7ada
+      env:
+        GITHUB_TOKEN: ${{ github.token }}
+      with:
+        latest: true
+        draft: false
+        prerelease: false
+
   dispatch:
   dispatch:
     name: dispatch to hub-tests
     name: dispatch to hub-tests
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
     steps:
     steps:
-      - id: keydb
-        uses: pozetroninc/github-action-get-latest-release@master
-        with:
-          owner: crowdsecurity
-          repo: crowdsec
-          excludes: prerelease, draft
       - name: Repository Dispatch
       - name: Repository Dispatch
         uses: peter-evans/repository-dispatch@v1
         uses: peter-evans/repository-dispatch@v1
         with:
         with:
           token: ${{ secrets.DISPATCH_TOKEN }}
           token: ${{ secrets.DISPATCH_TOKEN }}
           event-type: create_branch
           event-type: create_branch
           repository: crowdsecurity/hub
           repository: crowdsecurity/hub
-          client-payload: '{"version": "${{ steps.keydb.outputs.release }}"}'
+          client-payload: '{"version": "${{ jobs.get_latest_release.outputs.tag_name }}"}'

+ 18 - 8
.github/workflows/dispatch_delete_branch_hub.yaml

@@ -2,23 +2,33 @@ name: Dispatch to hub when deleting pre-release
 
 
 on:
 on:
   release:
   release:
-    types: deleted
+    types:
+      - deleted
 
 
 jobs:
 jobs:
+  get_latest_release:
+    name: get_latest_release
+    runs-on: ubuntu-latest
+    permissions:
+      contents: read
+    steps:
+      - id: get_latest_release
+        uses: cardinalby/git-get-release-action@cedef2faf69cb7c55b285bad07688d04430b7ada
+        env:
+          GITHUB_TOKEN: ${{ github.token }}
+        with:
+          latest: true
+          draft: false
+          prerelease: false
+
   dispatch:
   dispatch:
     name: dispatch to hub-tests
     name: dispatch to hub-tests
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
     steps:
     steps:
-      - id: keydb
-        uses: pozetroninc/github-action-get-latest-release@master
-        with:
-          owner: crowdsecurity
-          repo: crowdsec
-          excludes: prerelease, draft
       - name: Repository Dispatch
       - name: Repository Dispatch
         uses: peter-evans/repository-dispatch@v1
         uses: peter-evans/repository-dispatch@v1
         with:
         with:
           token: ${{ secrets.DISPATCH_TOKEN }}
           token: ${{ secrets.DISPATCH_TOKEN }}
           event-type: delete_branch
           event-type: delete_branch
           repository: crowdsecurity/hub
           repository: crowdsecurity/hub
-          client-payload: '{"version": "${{ steps.keydb.outputs.release }}"}'
+          client-payload: '{"version": "${{ jobs.get_latest_release.outputs.tag_name }}"}'

+ 1 - 0
.github/workflows/go-tests-windows.yml

@@ -16,6 +16,7 @@ on:
 
 
 env:
 env:
   RICHGO_FORCE_COLOR: 1
   RICHGO_FORCE_COLOR: 1
+  CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF: true
 
 
 jobs:
 jobs:
 
 

+ 16 - 7
.github/workflows/go-tests.yml

@@ -23,7 +23,6 @@ on:
 env:
 env:
   RICHGO_FORCE_COLOR: 1
   RICHGO_FORCE_COLOR: 1
   AWS_HOST: localstack
   AWS_HOST: localstack
-  SERVICES: cloudwatch,logs,kinesis
   # these are to mimic aws config
   # these are to mimic aws config
   AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
   AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
   AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
   AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
@@ -31,29 +30,27 @@ env:
   # and to override our endpoint in aws sdk
   # and to override our endpoint in aws sdk
   AWS_ENDPOINT_FORCE: http://localhost:4566
   AWS_ENDPOINT_FORCE: http://localhost:4566
   KINESIS_INITIALIZE_STREAMS: "stream-1-shard:1,stream-2-shards:2"
   KINESIS_INITIALIZE_STREAMS: "stream-1-shard:1,stream-2-shards:2"
+  CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF: true
 
 
 jobs:
 jobs:
 
 
   build:
   build:
     strategy:
     strategy:
       matrix:
       matrix:
-        go-version: [1.18, 1.x]
+        go-version: ["1.19", "1.20.0-rc.1"]
 
 
     name: "Build + tests"
     name: "Build + tests"
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
     services:
     services:
       localstack:
       localstack:
-        image: localstack/localstack:0.13.3
+        image: localstack/localstack:1.3.0
         ports:
         ports:
         - 4566:4566  # Localstack exposes all services on the same port
         - 4566:4566  # Localstack exposes all services on the same port
         env:
         env:
-          SERVICES: ${{ env.SERVICES }}
           DEBUG: ""
           DEBUG: ""
-          DATA_DIR: ""
           LAMBDA_EXECUTOR: ""
           LAMBDA_EXECUTOR: ""
           KINESIS_ERROR_PROBABILITY: ""
           KINESIS_ERROR_PROBABILITY: ""
           DOCKER_HOST: unix:///var/run/docker.sock
           DOCKER_HOST: unix:///var/run/docker.sock
-          HOST_TMP_FOLDER: "/tmp"
           KINESIS_INITIALIZE_STREAMS: ${{ env.KINESIS_INITIALIZE_STREAMS }}
           KINESIS_INITIALIZE_STREAMS: ${{ env.KINESIS_INITIALIZE_STREAMS }}
           HOSTNAME_EXTERNAL: ${{ env.AWS_HOST }}  # Required so that resource urls are provided properly
           HOSTNAME_EXTERNAL: ${{ env.AWS_HOST }}  # Required so that resource urls are provided properly
           # e.g sqs url will get localhost if we don't set this env to map our service
           # e.g sqs url will get localhost if we don't set this env to map our service
@@ -64,7 +61,7 @@ jobs:
           --health-timeout=5s
           --health-timeout=5s
           --health-retries=3
           --health-retries=3
       zoo1:
       zoo1:
-        image: confluentinc/cp-zookeeper:7.1.1
+        image: confluentinc/cp-zookeeper:7.3.0
         ports:
         ports:
           - "2181:2181"
           - "2181:2181"
         env:
         env:
@@ -128,6 +125,18 @@ jobs:
         fetch-depth: 0
         fetch-depth: 0
         submodules: false
         submodules: false
 
 
+    - name: Cache Go modules
+      uses: actions/cache@v2
+      with:
+        path: |
+          ~/go/pkg/mod
+          ~/.cache/go-build
+          ~/Library/Caches/go-build
+          %LocalAppData%\go-build
+        key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
+        restore-keys: |
+          ${{ runner.os }}-${{ matrix.go-version }}-go-
+
     - name: Build and run tests
     - name: Build and run tests
       run: |
       run: |
         go install github.com/ory/go-acc@v0.2.8
         go install github.com/ory/go-acc@v0.2.8

+ 34 - 33
.github/workflows/release_publish-package.yml

@@ -3,46 +3,47 @@ name: build
 
 
 on:
 on:
   release:
   release:
-    types: prereleased
+    types:
+      - prereleased
 
 
 jobs:
 jobs:
   build:
   build:
     name: Build and upload binary package
     name: Build and upload binary package
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
     steps:
     steps:
-    - name: Set up Go 1.19
-      uses: actions/setup-go@v3
-      with:
-        go-version: 1.19
-      id: go
-    - name: Check out code into the Go module directory
-      uses: actions/checkout@v3
-    - name: Build the binaries
-      run: make release
-    - name: Upload to release
-      uses: JasonEtco/upload-to-release@master
-      with:
-        args: crowdsec-release.tgz application/x-gzip
-      env:
-        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+      - name: Set up Go 1.19
+        uses: actions/setup-go@v3
+        with:
+          go-version: 1.19
+        id: go
+      - name: Check out code into the Go module directory
+        uses: actions/checkout@v3
+      - name: Build the binaries
+        run: make release
+      - name: Upload to release
+        uses: JasonEtco/upload-to-release@master
+        with:
+          args: crowdsec-release.tgz application/x-gzip
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
   build_static:
   build_static:
     name: Build and upload binary package
     name: Build and upload binary package
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
     steps:
     steps:
-    - name: Set up Go 1.19
-      uses: actions/setup-go@v3
-      with:
-        go-version: 1.19
-      id: go
-    - name: Check out code into the Go module directory
-      uses: actions/checkout@v3
-    - name: Build the binaries
-      run: |
-        make release BUILD_STATIC=yes
-        mv crowdsec-release.tgz crowdsec-release-static.tgz
-    - name: Upload to release
-      uses: JasonEtco/upload-to-release@master
-      with:
-        args: crowdsec-release-static.tgz application/x-gzip
-      env:
-        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+      - name: Set up Go 1.19
+        uses: actions/setup-go@v3
+        with:
+          go-version: 1.19
+        id: go
+      - name: Check out code into the Go module directory
+        uses: actions/checkout@v3
+      - name: Build the binaries
+        run: |
+          make release BUILD_STATIC=yes
+          mv crowdsec-release.tgz crowdsec-release-static.tgz
+      - name: Upload to release
+        uses: JasonEtco/upload-to-release@master
+        with:
+          args: crowdsec-release-static.tgz application/x-gzip
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

+ 4 - 4
.github/workflows/release_publish_docker-image-debian.yml

@@ -37,19 +37,19 @@ jobs:
           echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
           echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
       -
       -
         name: Set up QEMU
         name: Set up QEMU
-        uses: docker/setup-qemu-action@v1
+        uses: docker/setup-qemu-action@v2
       -
       -
         name: Set up Docker Buildx
         name: Set up Docker Buildx
-        uses: docker/setup-buildx-action@v1
+        uses: docker/setup-buildx-action@v2
       -
       -
         name: Login to DockerHub
         name: Login to DockerHub
-        uses: docker/login-action@v1
+        uses: docker/login-action@v2
         with:
         with:
           username: ${{ secrets.DOCKER_USERNAME }}
           username: ${{ secrets.DOCKER_USERNAME }}
           password: ${{ secrets.DOCKER_PASSWORD }}
           password: ${{ secrets.DOCKER_PASSWORD }}
       -
       -
         name: Build and push
         name: Build and push
-        uses: docker/build-push-action@v2
+        uses: docker/build-push-action@v3
         with:
         with:
           context: .
           context: .
           file: ./Dockerfile.debian
           file: ./Dockerfile.debian

+ 13 - 20
.github/workflows/release_publish_docker-image.yml

@@ -11,11 +11,9 @@ jobs:
     name: Push Docker image to Docker Hub
     name: Push Docker image to Docker Hub
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
     steps:
     steps:
-      -
-        name: Check out the repo
+      - name: Check out the repo
         uses: actions/checkout@v3
         uses: actions/checkout@v3
-      -
-        name: Prepare
+      - name: Prepare
         id: prep
         id: prep
         run: |
         run: |
           DOCKER_IMAGE=crowdsecurity/crowdsec
           DOCKER_IMAGE=crowdsecurity/crowdsec
@@ -32,21 +30,18 @@ jobs:
           TAGS_SLIM="${DOCKER_IMAGE}:${VERSION}-slim"
           TAGS_SLIM="${DOCKER_IMAGE}:${VERSION}-slim"
           if [[ ${{ github.event.action }} == released ]]; then
           if [[ ${{ github.event.action }} == released ]]; then
             TAGS=$TAGS,${DOCKER_IMAGE}:latest,${GHCR_IMAGE}:latest
             TAGS=$TAGS,${DOCKER_IMAGE}:latest,${GHCR_IMAGE}:latest
-            TAGS_SLIM=$TAGS,${DOCKER_IMAGE}:slim
+            TAGS_SLIM=$TAGS_SLIM,${DOCKER_IMAGE}:slim
           fi
           fi
           echo ::set-output name=version::${VERSION}
           echo ::set-output name=version::${VERSION}
           echo ::set-output name=tags::${TAGS}
           echo ::set-output name=tags::${TAGS}
           echo ::set-output name=tags_slim::${TAGS_SLIM}
           echo ::set-output name=tags_slim::${TAGS_SLIM}
           echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
           echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
-      -
-        name: Set up QEMU
-        uses: docker/setup-qemu-action@v1
-      -
-        name: Set up Docker Buildx
-        uses: docker/setup-buildx-action@v1
-      -
-        name: Login to DockerHub
-        uses: docker/login-action@v1
+      - name: Set up QEMU
+        uses: docker/setup-qemu-action@v2
+      - name: Set up Docker Buildx
+        uses: docker/setup-buildx-action@v2
+      - name: Login to DockerHub
+        uses: docker/login-action@v2
         with:
         with:
           username: ${{ secrets.DOCKER_USERNAME }}
           username: ${{ secrets.DOCKER_USERNAME }}
           password: ${{ secrets.DOCKER_PASSWORD }}
           password: ${{ secrets.DOCKER_PASSWORD }}
@@ -58,9 +53,8 @@ jobs:
           username: ${{ github.repository_owner }}
           username: ${{ github.repository_owner }}
           password: ${{ secrets.GITHUB_TOKEN }}
           password: ${{ secrets.GITHUB_TOKEN }}
 
 
-      -
-        name: Build and push slim image
-        uses: docker/build-push-action@v2
+      - name: Build and push slim image
+        uses: docker/build-push-action@v3
         with:
         with:
           context: .
           context: .
           file: ./Dockerfile
           file: ./Dockerfile
@@ -74,9 +68,8 @@ jobs:
             org.opencontainers.image.created=${{ steps.prep.outputs.created }}
             org.opencontainers.image.created=${{ steps.prep.outputs.created }}
             org.opencontainers.image.revision=${{ github.sha }}
             org.opencontainers.image.revision=${{ github.sha }}
 
 
-      -
-        name: Build and push full image
-        uses: docker/build-push-action@v2
+      - name: Build and push full image
+        uses: docker/build-push-action@v3
         with:
         with:
           context: .
           context: .
           file: ./Dockerfile
           file: ./Dockerfile

+ 8 - 65
Dockerfile

@@ -10,87 +10,30 @@ COPY . .
 
 
 # wizard.sh requires GNU coreutils
 # wizard.sh requires GNU coreutils
 RUN apk add --no-cache git gcc libc-dev make bash gettext binutils-gold coreutils && \
 RUN apk add --no-cache git gcc libc-dev make bash gettext binutils-gold coreutils && \
+    echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \
     SYSTEM="docker" make clean release && \
     SYSTEM="docker" make clean release && \
     cd crowdsec-v* && \
     cd crowdsec-v* && \
     ./wizard.sh --docker-mode && \
     ./wizard.sh --docker-mode && \
-    cd - && \
+    cd - >/dev/null && \
     cscli hub update && \
     cscli hub update && \
     cscli collections install crowdsecurity/linux && \
     cscli collections install crowdsecurity/linux && \
-    cscli parsers install crowdsecurity/whitelists
+    cscli parsers install crowdsecurity/whitelists && \
+    go install github.com/mikefarah/yq/v4@v4.30.6
 
 
 FROM alpine:latest as build-slim
 FROM alpine:latest as build-slim
 
 
-RUN apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community tzdata yq bash && \
+RUN apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community tzdata bash && \
     mkdir -p /staging/etc/crowdsec && \
     mkdir -p /staging/etc/crowdsec && \
     mkdir -p /staging/var/lib/crowdsec && \
     mkdir -p /staging/var/lib/crowdsec && \
-    mkdir -p /var/lib/crowdsec/data \
-    yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml
+    mkdir -p /var/lib/crowdsec/data
 
 
+COPY --from=build /go/bin/yq /usr/local/bin/yq
 COPY --from=build /etc/crowdsec /staging/etc/crowdsec
 COPY --from=build /etc/crowdsec /staging/etc/crowdsec
 COPY --from=build /usr/local/bin/crowdsec /usr/local/bin/crowdsec
 COPY --from=build /usr/local/bin/crowdsec /usr/local/bin/crowdsec
 COPY --from=build /usr/local/bin/cscli /usr/local/bin/cscli
 COPY --from=build /usr/local/bin/cscli /usr/local/bin/cscli
 COPY --from=build /go/src/crowdsec/docker/docker_start.sh /
 COPY --from=build /go/src/crowdsec/docker/docker_start.sh /
 COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/config.yaml
 COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/config.yaml
-
-# NOTE: setting default values here will overwrite the ones set in config.yaml
-#       every time the container is started. We set the default in docker/config.yaml
-#       and document them in docker/README.md, but keep the variables empty here.
-
-ENV CONFIG_FILE=/etc/crowdsec/config.yaml
-ENV LOCAL_API_URL=
-ENV CUSTOM_HOSTNAME=localhost
-ENV PLUGIN_DIR=
-ENV DISABLE_AGENT=false
-ENV DISABLE_LOCAL_API=false
-ENV DISABLE_ONLINE_API=false
-ENV DSN=
-ENV TYPE=
-ENV TEST_MODE=false
-ENV USE_WAL=
-
-# register to app.crowdsec.net
-
-ENV ENROLL_INSTANCE_NAME=
-ENV ENROLL_KEY=
-ENV ENROLL_TAGS=
-
-# log verbosity
-
-ENV LEVEL_TRACE=
-ENV LEVEL_DEBUG=
-ENV LEVEL_INFO=
-
-# TLS setup ----------------------------------- #
-
-ENV AGENT_USERNAME=
-ENV AGENT_PASSWORD=
-
-# TLS setup ----------------------------------- #
-
-ENV USE_TLS=false
-ENV CACERT_FILE=
-ENV CERT_FILE=
-ENV KEY_FILE=
-# comma-separated list of allowed OU values for TLS bouncer certificates
-ENV BOUNCERS_ALLOWED_OU=
-# comma-separated list of allowed OU values for TLS agent certificates
-ENV AGENTS_ALLOWED_OU=
-
-# Install the following hub items --------------#
-
-ENV COLLECTIONS=
-ENV PARSERS=
-ENV SCENARIOS=
-ENV POSTOVERFLOWS=
-
-# Uninstall the following hub items ------------#
-
-ENV DISABLE_COLLECTIONS=
-ENV DISABLE_PARSERS=
-ENV DISABLE_SCENARIOS=
-ENV DISABLE_POSTOVERFLOWS=
-
-ENV METRICS_PORT=
+RUN yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml
 
 
 ENTRYPOINT /bin/bash docker_start.sh
 ENTRYPOINT /bin/bash docker_start.sh
 
 

+ 10 - 65
Dockerfile.debian

@@ -14,17 +14,21 @@ ENV DEBCONF_NOWARNINGS="yes"
 # wizard.sh requires GNU coreutils
 # wizard.sh requires GNU coreutils
 RUN apt-get update && \
 RUN apt-get update && \
     apt-get install -y -q git gcc libc-dev make bash gettext binutils-gold coreutils tzdata && \
     apt-get install -y -q git gcc libc-dev make bash gettext binutils-gold coreutils tzdata && \
-    SYSTEM="docker" make release && \
+    echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \
+    SYSTEM="docker" make clean release && \
     cd crowdsec-v* && \
     cd crowdsec-v* && \
     ./wizard.sh --docker-mode && \
     ./wizard.sh --docker-mode && \
-    cd - && \
+    cd - >/dev/null && \
     cscli hub update && \
     cscli hub update && \
     cscli collections install crowdsecurity/linux && \
     cscli collections install crowdsecurity/linux && \
     cscli parsers install crowdsecurity/whitelists && \
     cscli parsers install crowdsecurity/whitelists && \
-    go install github.com/mikefarah/yq/v4@v4.30.5
+    go install github.com/mikefarah/yq/v4@v4.30.6
 
 
 FROM debian:bullseye-slim as build-slim
 FROM debian:bullseye-slim as build-slim
 
 
+ENV DEBIAN_FRONTEND=noninteractive
+ENV DEBCONF_NOWARNINGS="yes"
+
 RUN apt-get update && \
 RUN apt-get update && \
     apt-get install -y -q --install-recommends --no-install-suggests \
     apt-get install -y -q --install-recommends --no-install-suggests \
     procps \
     procps \
@@ -35,8 +39,7 @@ RUN apt-get update && \
     tzdata && \
     tzdata && \
     mkdir -p /staging/etc/crowdsec && \
     mkdir -p /staging/etc/crowdsec && \
     mkdir -p /staging/var/lib/crowdsec && \
     mkdir -p /staging/var/lib/crowdsec && \
-    mkdir -p /var/lib/crowdsec/data \
-    yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml
+    mkdir -p /var/lib/crowdsec/data
 
 
 COPY --from=build /go/bin/yq /usr/local/bin/yq
 COPY --from=build /go/bin/yq /usr/local/bin/yq
 COPY --from=build /etc/crowdsec /staging/etc/crowdsec
 COPY --from=build /etc/crowdsec /staging/etc/crowdsec
@@ -44,67 +47,9 @@ COPY --from=build /usr/local/bin/crowdsec /usr/local/bin/crowdsec
 COPY --from=build /usr/local/bin/cscli /usr/local/bin/cscli
 COPY --from=build /usr/local/bin/cscli /usr/local/bin/cscli
 COPY --from=build /go/src/crowdsec/docker/docker_start.sh /
 COPY --from=build /go/src/crowdsec/docker/docker_start.sh /
 COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/config.yaml
 COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/config.yaml
-RUN yq eval -i ".plugin_config.group = \"nogroup\"" /staging/etc/crowdsec/config.yaml
-
-# NOTE: setting default values here will overwrite the ones set in config.yaml
-#       every time the container is started. We set the default in docker/config.yaml
-#       and document them in docker/README.md, but keep the variables empty here.
-
-ENV CONFIG_FILE=/etc/crowdsec/config.yaml
-ENV LOCAL_API_URL=
-ENV CUSTOM_HOSTNAME=localhost
-ENV PLUGIN_DIR=
-ENV DISABLE_AGENT=false
-ENV DISABLE_LOCAL_API=false
-ENV DISABLE_ONLINE_API=false
-ENV DSN=
-ENV TYPE=
-ENV TEST_MODE=false
-ENV USE_WAL=
-
-# register to app.crowdsec.net
-
-ENV ENROLL_INSTANCE_NAME=
-ENV ENROLL_KEY=
-ENV ENROLL_TAGS=
-
-# log verbosity
-
-ENV LEVEL_TRACE=
-ENV LEVEL_DEBUG=
-ENV LEVEL_INFO=
-
-# TLS setup ----------------------------------- #
-
-ENV AGENT_USERNAME=
-ENV AGENT_PASSWORD=
-
-# TLS setup ----------------------------------- #
-
-ENV USE_TLS=false
-ENV CACERT_FILE=
-ENV CERT_FILE=
-ENV KEY_FILE=
-# comma-separated list of allowed OU values for TLS bouncer certificates
-ENV BOUNCERS_ALLOWED_OU=
-# comma-separated list of allowed OU values for TLS agent certificates
-ENV AGENTS_ALLOWED_OU=
-
-# Install the following hub items --------------#
-
-ENV COLLECTIONS=
-ENV PARSERS=
-ENV SCENARIOS=
-ENV POSTOVERFLOWS=
-
-# Uninstall the following hub items ------------#
-
-ENV DISABLE_COLLECTIONS=
-ENV DISABLE_PARSERS=
-ENV DISABLE_SCENARIOS=
-ENV DISABLE_POSTOVERFLOWS=
+RUN yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml && \
+    yq eval -i ".plugin_config.group = \"nogroup\"" /staging/etc/crowdsec/config.yaml
 
 
-ENV METRICS_PORT=
 
 
 ENTRYPOINT /bin/bash docker_start.sh
 ENTRYPOINT /bin/bash docker_start.sh
 
 

+ 1 - 1
Makefile

@@ -45,7 +45,7 @@ CSCLI_BIN = cscli$(EXT)
 BUILD_CMD = build
 BUILD_CMD = build
 
 
 MINIMUM_SUPPORTED_GO_MAJOR_VERSION = 1
 MINIMUM_SUPPORTED_GO_MAJOR_VERSION = 1
-MINIMUM_SUPPORTED_GO_MINOR_VERSION = 18
+MINIMUM_SUPPORTED_GO_MINOR_VERSION = 19
 
 
 go_major_minor = $(subst ., ,$(BUILD_GOVERSION))
 go_major_minor = $(subst ., ,$(BUILD_GOVERSION))
 GO_MAJOR_VERSION = $(word 1, $(go_major_minor))
 GO_MAJOR_VERSION = $(word 1, $(go_major_minor))

+ 24 - 3
cmd/crowdsec-cli/alerts.go

@@ -7,6 +7,7 @@ import (
 	"fmt"
 	"fmt"
 	"net/url"
 	"net/url"
 	"os"
 	"os"
+	"sort"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 
 
@@ -112,6 +113,29 @@ func DisplayOneAlert(alert *models.Alert, withDetail bool) error {
 
 
 		alertDecisionsTable(color.Output, alert)
 		alertDecisionsTable(color.Output, alert)
 
 
+		if len(alert.Meta) > 0 {
+			fmt.Printf("\n - Context  :\n")
+			sort.Slice(alert.Meta, func(i, j int) bool {
+				return alert.Meta[i].Key < alert.Meta[j].Key
+			})
+			table := newTable(color.Output)
+			table.SetRowLines(false)
+			table.SetHeaders("Key", "Value")
+			for _, meta := range alert.Meta {
+				var valSlice []string
+				if err := json.Unmarshal([]byte(meta.Value), &valSlice); err != nil {
+					return fmt.Errorf("unknown context value type '%s' : %s", meta.Value, err)
+				}
+				for _, value := range valSlice {
+					table.AddRow(
+						meta.Key,
+						value,
+					)
+				}
+			}
+			table.Render()
+		}
+
 		if withDetail {
 		if withDetail {
 			fmt.Printf("\n - Events  :\n")
 			fmt.Printf("\n - Events  :\n")
 			for _, event := range alert.Events {
 			for _, event := range alert.Events {
@@ -419,9 +443,6 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`,
 			if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
 			if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
 				log.Fatal("Local API is disabled, please run this command on the local API machine")
 				log.Fatal("Local API is disabled, please run this command on the local API machine")
 			}
 			}
-			if err := csConfig.LoadDBConfig(); err != nil {
-				log.Fatal(err)
-			}
 			dbClient, err = database.NewClient(csConfig.DbConfig)
 			dbClient, err = database.NewClient(csConfig.DbConfig)
 			if err != nil {
 			if err != nil {
 				log.Fatalf("unable to create new database client: %s", err)
 				log.Fatalf("unable to create new database client: %s", err)

+ 110 - 77
cmd/crowdsec-cli/bouncers.go

@@ -18,10 +18,6 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 )
 )
 
 
-var keyIP string
-var keyLength int
-var key string
-
 func getBouncers(out io.Writer, dbClient *database.Client) error {
 func getBouncers(out io.Writer, dbClient *database.Client) error {
 	bouncers, err := dbClient.ListBouncers()
 	bouncers, err := dbClient.ListBouncers()
 	if err != nil {
 	if err != nil {
@@ -59,33 +55,8 @@ func getBouncers(out io.Writer, dbClient *database.Client) error {
 	return nil
 	return nil
 }
 }
 
 
-func NewBouncersCmd() *cobra.Command {
-	/* ---- DECISIONS COMMAND */
-	var cmdBouncers = &cobra.Command{
-		Use:   "bouncers [action]",
-		Short: "Manage bouncers [requires local API]",
-		Long: `To list/add/delete bouncers.
-Note: This command requires database direct access, so is intended to be run on Local API/master.
-`,
-		Args:              cobra.MinimumNArgs(1),
-		Aliases:           []string{"bouncer"},
-		DisableAutoGenTag: true,
-		PersistentPreRun: func(cmd *cobra.Command, args []string) {
-			var err error
-			if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
-				log.Fatal("Local API is disabled, please run this command on the local API machine")
-			}
-			if err := csConfig.LoadDBConfig(); err != nil {
-				log.Fatal(err)
-			}
-			dbClient, err = database.NewClient(csConfig.DbConfig)
-			if err != nil {
-				log.Fatalf("unable to create new database client: %s", err)
-			}
-		},
-	}
-
-	var cmdBouncersList = &cobra.Command{
+func NewBouncersListCmd() *cobra.Command {
+	cmdBouncersList := &cobra.Command{
 		Use:               "list",
 		Use:               "list",
 		Short:             "List bouncers",
 		Short:             "List bouncers",
 		Long:              `List bouncers`,
 		Long:              `List bouncers`,
@@ -99,9 +70,61 @@ Note: This command requires database direct access, so is intended to be run on
 			}
 			}
 		},
 		},
 	}
 	}
-	cmdBouncers.AddCommand(cmdBouncersList)
 
 
-	var cmdBouncersAdd = &cobra.Command{
+	return cmdBouncersList
+}
+
+func runBouncersAdd(cmd *cobra.Command, args []string) error {
+	flags := cmd.Flags()
+
+	keyLength, err := flags.GetInt("length")
+	if err != nil {
+		return err
+	}
+
+	key, err := flags.GetString("key")
+	if err != nil {
+		return err
+	}
+
+	keyName := args[0]
+	var apiKey string
+
+	if keyName == "" {
+		log.Fatalf("Please provide a name for the api key")
+	}
+	apiKey = key
+	if key == "" {
+		apiKey, err = middlewares.GenerateAPIKey(keyLength)
+	}
+	if err != nil {
+		log.Fatalf("unable to generate api key: %s", err)
+	}
+	_, err = dbClient.CreateBouncer(keyName, "", middlewares.HashSHA512(apiKey), types.ApiKeyAuthType)
+	if err != nil {
+		log.Fatalf("unable to create bouncer: %s", err)
+	}
+
+	if csConfig.Cscli.Output == "human" {
+		fmt.Printf("Api key for '%s':\n\n", keyName)
+		fmt.Printf("   %s\n\n", apiKey)
+		fmt.Print("Please keep this key since you will not be able to retrieve it!\n")
+	} else if csConfig.Cscli.Output == "raw" {
+		fmt.Printf("%s", apiKey)
+	} else if csConfig.Cscli.Output == "json" {
+		j, err := json.Marshal(apiKey)
+		if err != nil {
+			log.Fatalf("unable to marshal api key")
+		}
+		fmt.Printf("%s", string(j))
+	}
+
+	return nil
+}
+
+
+func NewBouncersAddCmd() *cobra.Command {
+	cmdBouncersAdd := &cobra.Command{
 		Use:   "add MyBouncerName [--length 16]",
 		Use:   "add MyBouncerName [--length 16]",
 		Short: "add bouncer",
 		Short: "add bouncer",
 		Long:  `add bouncer`,
 		Long:  `add bouncer`,
@@ -110,45 +133,33 @@ cscli bouncers add MyBouncerName -l 24
 cscli bouncers add MyBouncerName -k %s`, generatePassword(32)),
 cscli bouncers add MyBouncerName -k %s`, generatePassword(32)),
 		Args:              cobra.ExactArgs(1),
 		Args:              cobra.ExactArgs(1),
 		DisableAutoGenTag: true,
 		DisableAutoGenTag: true,
-		Run: func(cmd *cobra.Command, arg []string) {
-			keyName := arg[0]
-			var apiKey string
-			var err error
-			if keyName == "" {
-				log.Fatalf("Please provide a name for the api key")
-			}
-			apiKey = key
-			if key == "" {
-				apiKey, err = middlewares.GenerateAPIKey(keyLength)
-			}
-			if err != nil {
-				log.Fatalf("unable to generate api key: %s", err)
-			}
-			_, err = dbClient.CreateBouncer(keyName, keyIP, middlewares.HashSHA512(apiKey), types.ApiKeyAuthType)
-			if err != nil {
-				log.Fatalf("unable to create bouncer: %s", err)
-			}
+		RunE: runBouncersAdd,
+	}
 
 
-			if csConfig.Cscli.Output == "human" {
-				fmt.Printf("Api key for '%s':\n\n", keyName)
-				fmt.Printf("   %s\n\n", apiKey)
-				fmt.Print("Please keep this key since you will not be able to retrieve it!\n")
-			} else if csConfig.Cscli.Output == "raw" {
-				fmt.Printf("%s", apiKey)
-			} else if csConfig.Cscli.Output == "json" {
-				j, err := json.Marshal(apiKey)
-				if err != nil {
-					log.Fatalf("unable to marshal api key")
-				}
-				fmt.Printf("%s", string(j))
-			}
-		},
+	flags := cmdBouncersAdd.Flags()
+
+	flags.IntP("length", "l", 16, "length of the api key")
+	flags.StringP("key", "k", "", "api key for the bouncer")
+
+	return cmdBouncersAdd
+}
+
+
+func runBouncersDelete(cmd *cobra.Command, args []string) error {
+	for _, bouncerID := range args {
+		err := dbClient.DeleteBouncer(bouncerID)
+		if err != nil {
+			log.Fatalf("unable to delete bouncer '%s': %s", bouncerID, err)
+		}
+		log.Infof("bouncer '%s' deleted successfully", bouncerID)
 	}
 	}
-	cmdBouncersAdd.Flags().IntVarP(&keyLength, "length", "l", 16, "length of the api key")
-	cmdBouncersAdd.Flags().StringVarP(&key, "key", "k", "", "api key for the bouncer")
-	cmdBouncers.AddCommand(cmdBouncersAdd)
 
 
-	var cmdBouncersDelete = &cobra.Command{
+	return nil
+}
+
+
+func NewBouncersDeleteCmd() *cobra.Command {
+	cmdBouncersDelete := &cobra.Command{
 		Use:               "delete MyBouncerName",
 		Use:               "delete MyBouncerName",
 		Short:             "delete bouncer",
 		Short:             "delete bouncer",
 		Args:              cobra.MinimumNArgs(1),
 		Args:              cobra.MinimumNArgs(1),
@@ -173,16 +184,38 @@ cscli bouncers add MyBouncerName -k %s`, generatePassword(32)),
 			}
 			}
 			return ret, cobra.ShellCompDirectiveNoFileComp
 			return ret, cobra.ShellCompDirectiveNoFileComp
 		},
 		},
-		Run: func(cmd *cobra.Command, args []string) {
-			for _, bouncerID := range args {
-				err := dbClient.DeleteBouncer(bouncerID)
-				if err != nil {
-					log.Fatalf("unable to delete bouncer '%s': %s", bouncerID, err)
-				}
-				log.Infof("bouncer '%s' deleted successfully", bouncerID)
+		RunE: runBouncersDelete,
+	}
+
+	return cmdBouncersDelete
+}
+
+func NewBouncersCmd() *cobra.Command {
+	/* ---- DECISIONS COMMAND */
+	var cmdBouncers = &cobra.Command{
+		Use:   "bouncers [action]",
+		Short: "Manage bouncers [requires local API]",
+		Long: `To list/add/delete bouncers.
+Note: This command requires database direct access, so is intended to be run on Local API/master.
+`,
+		Args:              cobra.MinimumNArgs(1),
+		Aliases:           []string{"bouncer"},
+		DisableAutoGenTag: true,
+		PersistentPreRun: func(cmd *cobra.Command, args []string) {
+			var err error
+			if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
+				log.Fatal("Local API is disabled, please run this command on the local API machine")
+			}
+			dbClient, err = database.NewClient(csConfig.DbConfig)
+			if err != nil {
+				log.Fatalf("unable to create new database client: %s", err)
 			}
 			}
 		},
 		},
 	}
 	}
-	cmdBouncers.AddCommand(cmdBouncersDelete)
+
+	cmdBouncers.AddCommand(NewBouncersListCmd())
+	cmdBouncers.AddCommand(NewBouncersAddCmd())
+	cmdBouncers.AddCommand(NewBouncersDeleteCmd())
+
 	return cmdBouncers
 	return cmdBouncers
 }
 }

+ 1 - 0
cmd/crowdsec-cli/capi.go

@@ -22,6 +22,7 @@ import (
 var CAPIURLPrefix string = "v2"
 var CAPIURLPrefix string = "v2"
 var CAPIBaseURL string = "https://api.crowdsec.net/"
 var CAPIBaseURL string = "https://api.crowdsec.net/"
 var capiUserPrefix string
 var capiUserPrefix string
+var outputFile string
 
 
 func NewCapiCmd() *cobra.Command {
 func NewCapiCmd() *cobra.Command {
 	var cmdCapi = &cobra.Command{
 	var cmdCapi = &cobra.Command{

+ 4 - 489
cmd/crowdsec-cli/config.go

@@ -1,506 +1,21 @@
 package main
 package main
 
 
 import (
 import (
-	"encoding/json"
-	"fmt"
-	"io"
-	"os"
-	"path/filepath"
-
-	"github.com/antonmedv/expr"
-	"github.com/pkg/errors"
-	log "github.com/sirupsen/logrus"
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
-	"gopkg.in/yaml.v2"
-
-	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
-	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
-	"github.com/crowdsecurity/crowdsec/pkg/types"
 )
 )
 
 
-type OldAPICfg struct {
-	MachineID string `json:"machine_id"`
-	Password  string `json:"password"`
-}
-
-/* Backup crowdsec configurations to directory <dirPath> :
-
-- Main config (config.yaml)
-- Profiles config (profiles.yaml)
-- Simulation config (simulation.yaml)
-- Backup of API credentials (local API and online API)
-- List of scenarios, parsers, postoverflows and collections that are up-to-date
-- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
-*/
-func backupConfigToDirectory(dirPath string) error {
-	var err error
-
-	if dirPath == "" {
-		return fmt.Errorf("directory path can't be empty")
-	}
-	log.Infof("Starting configuration backup")
-	/*if parent directory doesn't exist, bail out. create final dir with Mkdir*/
-	parentDir := filepath.Dir(dirPath)
-	if _, err := os.Stat(parentDir); err != nil {
-		return errors.Wrapf(err, "while checking parent directory %s existence", parentDir)
-	}
-
-	if err = os.Mkdir(dirPath, 0700); err != nil {
-		return errors.Wrapf(err, "while creating %s", dirPath)
-	}
-
-	if csConfig.ConfigPaths.SimulationFilePath != "" {
-		backupSimulation := filepath.Join(dirPath, "simulation.yaml")
-		if err = types.CopyFile(csConfig.ConfigPaths.SimulationFilePath, backupSimulation); err != nil {
-			return errors.Wrapf(err, "failed copy %s to %s", csConfig.ConfigPaths.SimulationFilePath, backupSimulation)
-		}
-		log.Infof("Saved simulation to %s", backupSimulation)
-	}
-
-	/*
-	   - backup AcquisitionFilePath
-	   - backup the other files of acquisition directory
-	*/
-	if csConfig.Crowdsec != nil && csConfig.Crowdsec.AcquisitionFilePath != "" {
-		backupAcquisition := filepath.Join(dirPath, "acquis.yaml")
-		if err = types.CopyFile(csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition); err != nil {
-			return fmt.Errorf("failed copy %s to %s : %s", csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition, err)
-		}
-	}
-
-	acquisBackupDir := filepath.Join(dirPath, "acquis")
-	if err = os.Mkdir(acquisBackupDir, 0700); err != nil {
-		return fmt.Errorf("error while creating %s : %s", acquisBackupDir, err)
-	}
-
-	if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 {
-		for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles {
-			/*if it was the default one, it was already backup'ed*/
-			if csConfig.Crowdsec.AcquisitionFilePath == acquisFile {
-				continue
-			}
-			targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile)))
-			if err != nil {
-				return errors.Wrapf(err, "while saving %s to %s", acquisFile, acquisBackupDir)
-			}
-			if err = types.CopyFile(acquisFile, targetFname); err != nil {
-				return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err)
-			}
-			log.Infof("Saved acquis %s to %s", acquisFile, targetFname)
-		}
-	}
-
-	if ConfigFilePath != "" {
-		backupMain := fmt.Sprintf("%s/config.yaml", dirPath)
-		if err = types.CopyFile(ConfigFilePath, backupMain); err != nil {
-			return fmt.Errorf("failed copy %s to %s : %s", ConfigFilePath, backupMain, err)
-		}
-		log.Infof("Saved default yaml to %s", backupMain)
-	}
-	if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" {
-		backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath)
-		if err = types.CopyFile(csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds); err != nil {
-			return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds, err)
-		}
-		log.Infof("Saved online API credentials to %s", backupCAPICreds)
-	}
-	if csConfig.API != nil && csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" {
-		backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath)
-		if err = types.CopyFile(csConfig.API.Client.CredentialsFilePath, backupLAPICreds); err != nil {
-			return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Client.CredentialsFilePath, backupLAPICreds, err)
-		}
-		log.Infof("Saved local API credentials to %s", backupLAPICreds)
-	}
-	if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.ProfilesPath != "" {
-		backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath)
-		if err = types.CopyFile(csConfig.API.Server.ProfilesPath, backupProfiles); err != nil {
-			return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Server.ProfilesPath, backupProfiles, err)
-		}
-		log.Infof("Saved profiles to %s", backupProfiles)
-	}
-
-	if err = BackupHub(dirPath); err != nil {
-		return fmt.Errorf("failed to backup hub config : %s", err)
-	}
-
-	return nil
-}
-
-/* Restore crowdsec configurations to directory <dirPath> :
-
-- Main config (config.yaml)
-- Profiles config (profiles.yaml)
-- Simulation config (simulation.yaml)
-- Backup of API credentials (local API and online API)
-- List of scenarios, parsers, postoverflows and collections that are up-to-date
-- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
-*/
-func restoreConfigFromDirectory(dirPath string) error {
-	var err error
-
-	if !restoreOldBackup {
-		backupMain := fmt.Sprintf("%s/config.yaml", dirPath)
-		if _, err = os.Stat(backupMain); err == nil {
-			if csConfig.ConfigPaths != nil && csConfig.ConfigPaths.ConfigDir != "" {
-				if err = types.CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)); err != nil {
-					return fmt.Errorf("failed copy %s to %s : %s", backupMain, csConfig.ConfigPaths.ConfigDir, err)
-				}
-			}
-		}
-
-		// Now we have config.yaml, we should regenerate config struct to have rights paths etc
-		ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)
-		initConfig()
-
-		backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath)
-		if _, err = os.Stat(backupCAPICreds); err == nil {
-			if err = types.CopyFile(backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath); err != nil {
-				return fmt.Errorf("failed copy %s to %s : %s", backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath, err)
-			}
-		}
-
-		backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath)
-		if _, err = os.Stat(backupLAPICreds); err == nil {
-			if err = types.CopyFile(backupLAPICreds, csConfig.API.Client.CredentialsFilePath); err != nil {
-				return fmt.Errorf("failed copy %s to %s : %s", backupLAPICreds, csConfig.API.Client.CredentialsFilePath, err)
-			}
-		}
-
-		backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath)
-		if _, err = os.Stat(backupProfiles); err == nil {
-			if err = types.CopyFile(backupProfiles, csConfig.API.Server.ProfilesPath); err != nil {
-				return fmt.Errorf("failed copy %s to %s : %s", backupProfiles, csConfig.API.Server.ProfilesPath, err)
-			}
-		}
-	} else {
-		var oldAPICfg OldAPICfg
-		backupOldAPICfg := fmt.Sprintf("%s/api_creds.json", dirPath)
-
-		jsonFile, err := os.Open(backupOldAPICfg)
-		if err != nil {
-			log.Warningf("failed to open %s : %s", backupOldAPICfg, err)
-		} else {
-			byteValue, _ := io.ReadAll(jsonFile)
-			err = json.Unmarshal(byteValue, &oldAPICfg)
-			if err != nil {
-				return fmt.Errorf("failed to load json file %s : %s", backupOldAPICfg, err)
-			}
-
-			apiCfg := csconfig.ApiCredentialsCfg{
-				Login:    oldAPICfg.MachineID,
-				Password: oldAPICfg.Password,
-				URL:      CAPIBaseURL,
-			}
-			apiConfigDump, err := yaml.Marshal(apiCfg)
-			if err != nil {
-				return fmt.Errorf("unable to dump api credentials: %s", err)
-			}
-			apiConfigDumpFile := fmt.Sprintf("%s/online_api_credentials.yaml", csConfig.ConfigPaths.ConfigDir)
-			if csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" {
-				apiConfigDumpFile = csConfig.API.Server.OnlineClient.CredentialsFilePath
-			}
-			err = os.WriteFile(apiConfigDumpFile, apiConfigDump, 0644)
-			if err != nil {
-				return fmt.Errorf("write api credentials in '%s' failed: %s", apiConfigDumpFile, err)
-			}
-			log.Infof("Saved API credentials to %s", apiConfigDumpFile)
-		}
-	}
-
-	backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath)
-	if _, err = os.Stat(backupSimulation); err == nil {
-		if err = types.CopyFile(backupSimulation, csConfig.ConfigPaths.SimulationFilePath); err != nil {
-			return fmt.Errorf("failed copy %s to %s : %s", backupSimulation, csConfig.ConfigPaths.SimulationFilePath, err)
-		}
-	}
-
-	/*if there is a acquisition dir, restore its content*/
-	if csConfig.Crowdsec.AcquisitionDirPath != "" {
-		if err = os.Mkdir(csConfig.Crowdsec.AcquisitionDirPath, 0700); err != nil {
-			return fmt.Errorf("error while creating %s : %s", csConfig.Crowdsec.AcquisitionDirPath, err)
-		}
-
-	}
-
-	//if there was a single one
-	backupAcquisition := fmt.Sprintf("%s/acquis.yaml", dirPath)
-	if _, err = os.Stat(backupAcquisition); err == nil {
-		log.Debugf("restoring backup'ed %s", backupAcquisition)
-		if err = types.CopyFile(backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath); err != nil {
-			return fmt.Errorf("failed copy %s to %s : %s", backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath, err)
-		}
-	}
-
-	//if there is files in the acquis backup dir, restore them
-	acquisBackupDir := filepath.Join(dirPath, "acquis", "*.yaml")
-	if acquisFiles, err := filepath.Glob(acquisBackupDir); err == nil {
-		for _, acquisFile := range acquisFiles {
-			targetFname, err := filepath.Abs(csConfig.Crowdsec.AcquisitionDirPath + "/" + filepath.Base(acquisFile))
-			if err != nil {
-				return errors.Wrapf(err, "while saving %s to %s", acquisFile, targetFname)
-			}
-			log.Debugf("restoring %s to %s", acquisFile, targetFname)
-			if err = types.CopyFile(acquisFile, targetFname); err != nil {
-				return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err)
-			}
-		}
-	}
-
-	if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 {
-		for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles {
-			log.Infof("backup filepath from dir -> %s", acquisFile)
-			/*if it was the default one, it was already backup'ed*/
-			if csConfig.Crowdsec.AcquisitionFilePath == acquisFile {
-				log.Infof("skip this one")
-				continue
-			}
-			targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile)))
-			if err != nil {
-				return errors.Wrapf(err, "while saving %s to %s", acquisFile, acquisBackupDir)
-			}
-			if err = types.CopyFile(acquisFile, targetFname); err != nil {
-				return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err)
-			}
-			log.Infof("Saved acquis %s to %s", acquisFile, targetFname)
-		}
-	}
-
-	if err = RestoreHub(dirPath); err != nil {
-		return fmt.Errorf("failed to restore hub config : %s", err)
-	}
-
-	return nil
-}
 
 
 func NewConfigCmd() *cobra.Command {
 func NewConfigCmd() *cobra.Command {
-
-	var cmdConfig = &cobra.Command{
+	cmdConfig := &cobra.Command{
 		Use:               "config [command]",
 		Use:               "config [command]",
 		Short:             "Allows to view current config",
 		Short:             "Allows to view current config",
 		Args:              cobra.ExactArgs(0),
 		Args:              cobra.ExactArgs(0),
 		DisableAutoGenTag: true,
 		DisableAutoGenTag: true,
 	}
 	}
-	var key string
-	type Env struct {
-		Config *csconfig.Config
-	}
-	var cmdConfigShow = &cobra.Command{
-		Use:               "show",
-		Short:             "Displays current config",
-		Long:              `Displays the current cli configuration.`,
-		Args:              cobra.ExactArgs(0),
-		DisableAutoGenTag: true,
-		Run: func(cmd *cobra.Command, args []string) {
-
-			if key != "" {
-				program, err := expr.Compile(key, expr.Env(Env{}))
-				if err != nil {
-					log.Fatal(err)
-				}
-				output, err := expr.Run(program, Env{Config: csConfig})
-				if err != nil {
-					log.Fatal(err)
-				}
-				switch csConfig.Cscli.Output {
-				case "human", "raw":
-					switch output.(type) {
-					case string:
-						fmt.Printf("%s\n", output)
-					case int:
-						fmt.Printf("%d\n", output)
-					default:
-						fmt.Printf("%v\n", output)
-					}
-				case "json":
-					data, err := json.MarshalIndent(output, "", "  ")
-					if err != nil {
-						log.Fatalf("failed to marshal configuration: %s", err)
-					}
-					fmt.Printf("%s\n", string(data))
-				}
-				return
-			}
-
-			switch csConfig.Cscli.Output {
-			case "human":
-				fmt.Printf("Global:\n")
-				if csConfig.ConfigPaths != nil {
-					fmt.Printf("   - Configuration Folder   : %s\n", csConfig.ConfigPaths.ConfigDir)
-					fmt.Printf("   - Data Folder            : %s\n", csConfig.ConfigPaths.DataDir)
-					fmt.Printf("   - Hub Folder             : %s\n", csConfig.ConfigPaths.HubDir)
-					fmt.Printf("   - Simulation File        : %s\n", csConfig.ConfigPaths.SimulationFilePath)
-				}
-				if csConfig.Common != nil {
-					fmt.Printf("   - Log Folder             : %s\n", csConfig.Common.LogDir)
-					fmt.Printf("   - Log level              : %s\n", csConfig.Common.LogLevel)
-					fmt.Printf("   - Log Media              : %s\n", csConfig.Common.LogMedia)
-				}
-				if csConfig.Crowdsec != nil {
-					fmt.Printf("Crowdsec:\n")
-					fmt.Printf("  - Acquisition File        : %s\n", csConfig.Crowdsec.AcquisitionFilePath)
-					fmt.Printf("  - Parsers routines        : %d\n", csConfig.Crowdsec.ParserRoutinesCount)
-					if csConfig.Crowdsec.AcquisitionDirPath != "" {
-						fmt.Printf("  - Acquisition Folder      : %s\n", csConfig.Crowdsec.AcquisitionDirPath)
-					}
-				}
-				if csConfig.Cscli != nil {
-					fmt.Printf("cscli:\n")
-					fmt.Printf("  - Output                  : %s\n", csConfig.Cscli.Output)
-					fmt.Printf("  - Hub Branch              : %s\n", csConfig.Cscli.HubBranch)
-					fmt.Printf("  - Hub Folder              : %s\n", csConfig.Cscli.HubDir)
-				}
-				if csConfig.API != nil {
-					if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil {
-						fmt.Printf("API Client:\n")
-						fmt.Printf("  - URL                     : %s\n", csConfig.API.Client.Credentials.URL)
-						fmt.Printf("  - Login                   : %s\n", csConfig.API.Client.Credentials.Login)
-						fmt.Printf("  - Credentials File        : %s\n", csConfig.API.Client.CredentialsFilePath)
-					}
-					if csConfig.API.Server != nil {
-						fmt.Printf("Local API Server:\n")
-						fmt.Printf("  - Listen URL              : %s\n", csConfig.API.Server.ListenURI)
-						fmt.Printf("  - Profile File            : %s\n", csConfig.API.Server.ProfilesPath)
-						if csConfig.API.Server.TLS != nil {
-							if csConfig.API.Server.TLS.CertFilePath != "" {
-								fmt.Printf("  - Cert File : %s\n", csConfig.API.Server.TLS.CertFilePath)
-							}
-							if csConfig.API.Server.TLS.KeyFilePath != "" {
-								fmt.Printf("  - Key File  : %s\n", csConfig.API.Server.TLS.KeyFilePath)
-							}
-							if csConfig.API.Server.TLS.CACertPath != "" {
-								fmt.Printf("  - CA Cert   : %s\n", csConfig.API.Server.TLS.CACertPath)
-							}
-							if csConfig.API.Server.TLS.CRLPath != "" {
-								fmt.Printf("  - CRL       : %s\n", csConfig.API.Server.TLS.CRLPath)
-							}
-							if csConfig.API.Server.TLS.CacheExpiration != nil {
-								fmt.Printf("  - Cache Expiration : %s\n", csConfig.API.Server.TLS.CacheExpiration)
-							}
-							if csConfig.API.Server.TLS.ClientVerification != "" {
-								fmt.Printf("  - Client Verification : %s\n", csConfig.API.Server.TLS.ClientVerification)
-							}
-							if csConfig.API.Server.TLS.AllowedAgentsOU != nil {
-								for _, ou := range csConfig.API.Server.TLS.AllowedAgentsOU {
-									fmt.Printf("      - Allowed Agents OU       : %s\n", ou)
-								}
-							}
-							if csConfig.API.Server.TLS.AllowedBouncersOU != nil {
-								for _, ou := range csConfig.API.Server.TLS.AllowedBouncersOU {
-									fmt.Printf("      - Allowed Bouncers OU       : %s\n", ou)
-								}
-							}
-
-						}
-						fmt.Printf("  - Trusted IPs: \n")
-						for _, ip := range csConfig.API.Server.TrustedIPs {
-							fmt.Printf("      - %s\n", ip)
-						}
-						if csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.Credentials != nil {
-							fmt.Printf("Central API:\n")
-							fmt.Printf("  - URL                     : %s\n", csConfig.API.Server.OnlineClient.Credentials.URL)
-							fmt.Printf("  - Login                   : %s\n", csConfig.API.Server.OnlineClient.Credentials.Login)
-							fmt.Printf("  - Credentials File        : %s\n", csConfig.API.Server.OnlineClient.CredentialsFilePath)
-						}
-					}
-				}
-				if csConfig.DbConfig != nil {
-					fmt.Printf("  - Database:\n")
-					fmt.Printf("      - Type                : %s\n", csConfig.DbConfig.Type)
-					switch csConfig.DbConfig.Type {
-					case "sqlite":
-						fmt.Printf("      - Path                : %s\n", csConfig.DbConfig.DbPath)
-					default:
-						fmt.Printf("      - Host                : %s\n", csConfig.DbConfig.Host)
-						fmt.Printf("      - Port                : %d\n", csConfig.DbConfig.Port)
-						fmt.Printf("      - User                : %s\n", csConfig.DbConfig.User)
-						fmt.Printf("      - DB Name             : %s\n", csConfig.DbConfig.DbName)
-					}
-					if csConfig.DbConfig.Flush != nil {
-						if *csConfig.DbConfig.Flush.MaxAge != "" {
-							fmt.Printf("      - Flush age           : %s\n", *csConfig.DbConfig.Flush.MaxAge)
-						}
-						if *csConfig.DbConfig.Flush.MaxItems != 0 {
-							fmt.Printf("      - Flush size          : %d\n", *csConfig.DbConfig.Flush.MaxItems)
-						}
-					}
-				}
-			case "json":
-				data, err := json.MarshalIndent(csConfig, "", "  ")
-				if err != nil {
-					log.Fatalf("failed to marshal configuration: %s", err)
-				}
-				fmt.Printf("%s\n", string(data))
-			case "raw":
-				data, err := yaml.Marshal(csConfig)
-				if err != nil {
-					log.Fatalf("failed to marshal configuration: %s", err)
-				}
-				fmt.Printf("%s\n", string(data))
-			}
-		},
-	}
-	cmdConfigShow.Flags().StringVar(&key, "key", "", "Display only this value (Config.API.Server.ListenURI)")
-	cmdConfig.AddCommand(cmdConfigShow)
-
-	var cmdConfigBackup = &cobra.Command{
-		Use:   `backup "directory"`,
-		Short: "Backup current config",
-		Long: `Backup the current crowdsec configuration including :
-
-- Main config (config.yaml)
-- Simulation config (simulation.yaml)
-- Profiles config (profiles.yaml)
-- List of scenarios, parsers, postoverflows and collections that are up-to-date
-- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
-- Backup of API credentials (local API and online API)`,
-		Example:           `cscli config backup ./my-backup`,
-		Args:              cobra.ExactArgs(1),
-		DisableAutoGenTag: true,
-		Run: func(cmd *cobra.Command, args []string) {
-			var err error
-			if err := csConfig.LoadHub(); err != nil {
-				log.Fatal(err)
-			}
-			if err = cwhub.GetHubIdx(csConfig.Hub); err != nil {
-				log.Info("Run 'sudo cscli hub update' to get the hub index")
-				log.Fatalf("Failed to get Hub index : %v", err)
-			}
-			if err = backupConfigToDirectory(args[0]); err != nil {
-				log.Fatalf("Failed to backup configurations: %s", err)
-			}
-		},
-	}
-	cmdConfig.AddCommand(cmdConfigBackup)
-
-	var cmdConfigRestore = &cobra.Command{
-		Use:   `restore "directory"`,
-		Short: `Restore config in backup "directory"`,
-		Long: `Restore the crowdsec configuration from specified backup "directory" including:
 
 
-- Main config (config.yaml)
-- Simulation config (simulation.yaml)
-- Profiles config (profiles.yaml)
-- List of scenarios, parsers, postoverflows and collections that are up-to-date
-- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
-- Backup of API credentials (local API and online API)`,
-		Args:              cobra.ExactArgs(1),
-		DisableAutoGenTag: true,
-		Run: func(cmd *cobra.Command, args []string) {
-			var err error
-			if err := csConfig.LoadHub(); err != nil {
-				log.Fatal(err)
-			}
-			if err = cwhub.GetHubIdx(csConfig.Hub); err != nil {
-				log.Info("Run 'sudo cscli hub update' to get the hub index")
-				log.Fatalf("Failed to get Hub index : %v", err)
-			}
-			if err := restoreConfigFromDirectory(args[0]); err != nil {
-				log.Fatalf("failed restoring configurations from %s : %s", args[0], err)
-			}
-		},
-	}
-	cmdConfigRestore.PersistentFlags().BoolVar(&restoreOldBackup, "old-backup", false, "To use when you are upgrading crowdsec v0.X to v1.X and you need to restore backup from v0.X")
-	cmdConfig.AddCommand(cmdConfigRestore)
+	cmdConfig.AddCommand(NewConfigShowCmd())
+	cmdConfig.AddCommand(NewConfigBackupCmd())
+	cmdConfig.AddCommand(NewConfigRestoreCmd())
 
 
 	return cmdConfig
 	return cmdConfig
 }
 }

+ 170 - 0
cmd/crowdsec-cli/config_backup.go

@@ -0,0 +1,170 @@
+package main
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/pkg/errors"
+	log "github.com/sirupsen/logrus"
+	"github.com/spf13/cobra"
+
+	"github.com/crowdsecurity/crowdsec/pkg/types"
+	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
+)
+
+/* Backup crowdsec configurations to directory <dirPath> :
+
+- Main config (config.yaml)
+- Profiles config (profiles.yaml)
+- Simulation config (simulation.yaml)
+- Backup of API credentials (local API and online API)
+- List of scenarios, parsers, postoverflows and collections that are up-to-date
+- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
+*/
+func backupConfigToDirectory(dirPath string) error {
+	var err error
+
+	if dirPath == "" {
+		return fmt.Errorf("directory path can't be empty")
+	}
+
+	log.Infof("Starting configuration backup")
+
+	/*if parent directory doesn't exist, bail out. create final dir with Mkdir*/
+	parentDir := filepath.Dir(dirPath)
+	if _, err := os.Stat(parentDir); err != nil {
+		return errors.Wrapf(err, "while checking parent directory %s existence", parentDir)
+	}
+
+	if err = os.Mkdir(dirPath, 0o700); err != nil {
+		return errors.Wrapf(err, "while creating %s", dirPath)
+	}
+
+	if csConfig.ConfigPaths.SimulationFilePath != "" {
+		backupSimulation := filepath.Join(dirPath, "simulation.yaml")
+		if err = types.CopyFile(csConfig.ConfigPaths.SimulationFilePath, backupSimulation); err != nil {
+			return errors.Wrapf(err, "failed copy %s to %s", csConfig.ConfigPaths.SimulationFilePath, backupSimulation)
+		}
+
+		log.Infof("Saved simulation to %s", backupSimulation)
+	}
+
+	/*
+	   - backup AcquisitionFilePath
+	   - backup the other files of acquisition directory
+	*/
+	if csConfig.Crowdsec != nil && csConfig.Crowdsec.AcquisitionFilePath != "" {
+		backupAcquisition := filepath.Join(dirPath, "acquis.yaml")
+		if err = types.CopyFile(csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition); err != nil {
+			return fmt.Errorf("failed copy %s to %s : %s", csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition, err)
+		}
+	}
+
+	acquisBackupDir := filepath.Join(dirPath, "acquis")
+	if err = os.Mkdir(acquisBackupDir, 0o700); err != nil {
+		return fmt.Errorf("error while creating %s : %s", acquisBackupDir, err)
+	}
+
+	if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 {
+		for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles {
+			/*if it was the default one, it was already backup'ed*/
+			if csConfig.Crowdsec.AcquisitionFilePath == acquisFile {
+				continue
+			}
+
+			targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile)))
+			if err != nil {
+				return errors.Wrapf(err, "while saving %s to %s", acquisFile, acquisBackupDir)
+			}
+
+			if err = types.CopyFile(acquisFile, targetFname); err != nil {
+				return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err)
+			}
+
+			log.Infof("Saved acquis %s to %s", acquisFile, targetFname)
+		}
+	}
+
+	if ConfigFilePath != "" {
+		backupMain := fmt.Sprintf("%s/config.yaml", dirPath)
+		if err = types.CopyFile(ConfigFilePath, backupMain); err != nil {
+			return fmt.Errorf("failed copy %s to %s : %s", ConfigFilePath, backupMain, err)
+		}
+
+		log.Infof("Saved default yaml to %s", backupMain)
+	}
+
+	if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" {
+		backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath)
+		if err = types.CopyFile(csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds); err != nil {
+			return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds, err)
+		}
+
+		log.Infof("Saved online API credentials to %s", backupCAPICreds)
+	}
+
+	if csConfig.API != nil && csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" {
+		backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath)
+		if err = types.CopyFile(csConfig.API.Client.CredentialsFilePath, backupLAPICreds); err != nil {
+			return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Client.CredentialsFilePath, backupLAPICreds, err)
+		}
+
+		log.Infof("Saved local API credentials to %s", backupLAPICreds)
+	}
+
+	if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.ProfilesPath != "" {
+		backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath)
+		if err = types.CopyFile(csConfig.API.Server.ProfilesPath, backupProfiles); err != nil {
+			return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Server.ProfilesPath, backupProfiles, err)
+		}
+
+		log.Infof("Saved profiles to %s", backupProfiles)
+	}
+
+	if err = BackupHub(dirPath); err != nil {
+		return fmt.Errorf("failed to backup hub config : %s", err)
+	}
+
+	return nil
+}
+
+
+func runConfigBackup(cmd *cobra.Command, args []string) error {
+	if err := csConfig.LoadHub(); err != nil {
+		return err
+	}
+
+	if err := cwhub.GetHubIdx(csConfig.Hub); err != nil {
+		log.Info("Run 'sudo cscli hub update' to get the hub index")
+		return fmt.Errorf("failed to get Hub index: %w", err)
+	}
+
+	if err := backupConfigToDirectory(args[0]); err != nil {
+		return fmt.Errorf("failed to backup config: %w", err)
+	}
+
+	return nil
+}
+
+
+func NewConfigBackupCmd() *cobra.Command {
+	cmdConfigBackup := &cobra.Command{
+		Use:   `backup "directory"`,
+		Short: "Backup current config",
+		Long: `Backup the current crowdsec configuration including :
+
+- Main config (config.yaml)
+- Simulation config (simulation.yaml)
+- Profiles config (profiles.yaml)
+- List of scenarios, parsers, postoverflows and collections that are up-to-date
+- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
+- Backup of API credentials (local API and online API)`,
+		Example:           `cscli config backup ./my-backup`,
+		Args:              cobra.ExactArgs(1),
+		DisableAutoGenTag: true,
+		RunE:              runConfigBackup,
+	}
+
+	return cmdConfigBackup
+}

+ 225 - 0
cmd/crowdsec-cli/config_restore.go

@@ -0,0 +1,225 @@
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/pkg/errors"
+	log "github.com/sirupsen/logrus"
+	"github.com/spf13/cobra"
+	"gopkg.in/yaml.v2"
+
+	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
+	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
+)
+
+type OldAPICfg struct {
+	MachineID string `json:"machine_id"`
+	Password  string `json:"password"`
+}
+
+/* Restore crowdsec configurations to directory <dirPath> :
+
+- Main config (config.yaml)
+- Profiles config (profiles.yaml)
+- Simulation config (simulation.yaml)
+- Backup of API credentials (local API and online API)
+- List of scenarios, parsers, postoverflows and collections that are up-to-date
+- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
+*/
+func restoreConfigFromDirectory(dirPath string, oldBackup bool) error {
+	var err error
+
+	if !oldBackup {
+		backupMain := fmt.Sprintf("%s/config.yaml", dirPath)
+		if _, err = os.Stat(backupMain); err == nil {
+			if csConfig.ConfigPaths != nil && csConfig.ConfigPaths.ConfigDir != "" {
+				if err = types.CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)); err != nil {
+					return fmt.Errorf("failed copy %s to %s : %s", backupMain, csConfig.ConfigPaths.ConfigDir, err)
+				}
+			}
+		}
+
+		// Now we have config.yaml, we should regenerate config struct to have rights paths etc
+		ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)
+
+		initConfig()
+
+		backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath)
+		if _, err = os.Stat(backupCAPICreds); err == nil {
+			if err = types.CopyFile(backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath); err != nil {
+				return fmt.Errorf("failed copy %s to %s : %s", backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath, err)
+			}
+		}
+
+		backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath)
+		if _, err = os.Stat(backupLAPICreds); err == nil {
+			if err = types.CopyFile(backupLAPICreds, csConfig.API.Client.CredentialsFilePath); err != nil {
+				return fmt.Errorf("failed copy %s to %s : %s", backupLAPICreds, csConfig.API.Client.CredentialsFilePath, err)
+			}
+		}
+
+		backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath)
+		if _, err = os.Stat(backupProfiles); err == nil {
+			if err = types.CopyFile(backupProfiles, csConfig.API.Server.ProfilesPath); err != nil {
+				return fmt.Errorf("failed copy %s to %s : %s", backupProfiles, csConfig.API.Server.ProfilesPath, err)
+			}
+		}
+	} else {
+		var oldAPICfg OldAPICfg
+		backupOldAPICfg := fmt.Sprintf("%s/api_creds.json", dirPath)
+
+		jsonFile, err := os.Open(backupOldAPICfg)
+		if err != nil {
+			log.Warningf("failed to open %s : %s", backupOldAPICfg, err)
+		} else {
+			byteValue, _ := io.ReadAll(jsonFile)
+			err = json.Unmarshal(byteValue, &oldAPICfg)
+			if err != nil {
+				return fmt.Errorf("failed to load json file %s : %s", backupOldAPICfg, err)
+			}
+
+			apiCfg := csconfig.ApiCredentialsCfg{
+				Login:    oldAPICfg.MachineID,
+				Password: oldAPICfg.Password,
+				URL:      CAPIBaseURL,
+			}
+			apiConfigDump, err := yaml.Marshal(apiCfg)
+			if err != nil {
+				return fmt.Errorf("unable to dump api credentials: %s", err)
+			}
+			apiConfigDumpFile := fmt.Sprintf("%s/online_api_credentials.yaml", csConfig.ConfigPaths.ConfigDir)
+			if csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" {
+				apiConfigDumpFile = csConfig.API.Server.OnlineClient.CredentialsFilePath
+			}
+			err = os.WriteFile(apiConfigDumpFile, apiConfigDump, 0o644)
+			if err != nil {
+				return fmt.Errorf("write api credentials in '%s' failed: %s", apiConfigDumpFile, err)
+			}
+			log.Infof("Saved API credentials to %s", apiConfigDumpFile)
+		}
+	}
+
+	backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath)
+	if _, err = os.Stat(backupSimulation); err == nil {
+		if err = types.CopyFile(backupSimulation, csConfig.ConfigPaths.SimulationFilePath); err != nil {
+			return fmt.Errorf("failed copy %s to %s : %s", backupSimulation, csConfig.ConfigPaths.SimulationFilePath, err)
+		}
+	}
+
+	/*if there is a acquisition dir, restore its content*/
+	if csConfig.Crowdsec.AcquisitionDirPath != "" {
+		if err = os.Mkdir(csConfig.Crowdsec.AcquisitionDirPath, 0o700); err != nil {
+			return fmt.Errorf("error while creating %s : %s", csConfig.Crowdsec.AcquisitionDirPath, err)
+		}
+	}
+
+	// if there was a single one
+	backupAcquisition := fmt.Sprintf("%s/acquis.yaml", dirPath)
+	if _, err = os.Stat(backupAcquisition); err == nil {
+		log.Debugf("restoring backup'ed %s", backupAcquisition)
+
+		if err = types.CopyFile(backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath); err != nil {
+			return fmt.Errorf("failed copy %s to %s : %s", backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath, err)
+		}
+	}
+
+	// if there is files in the acquis backup dir, restore them
+	acquisBackupDir := filepath.Join(dirPath, "acquis", "*.yaml")
+	if acquisFiles, err := filepath.Glob(acquisBackupDir); err == nil {
+		for _, acquisFile := range acquisFiles {
+			targetFname, err := filepath.Abs(csConfig.Crowdsec.AcquisitionDirPath + "/" + filepath.Base(acquisFile))
+			if err != nil {
+				return errors.Wrapf(err, "while saving %s to %s", acquisFile, targetFname)
+			}
+
+			log.Debugf("restoring %s to %s", acquisFile, targetFname)
+
+			if err = types.CopyFile(acquisFile, targetFname); err != nil {
+				return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err)
+			}
+		}
+	}
+
+	if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 {
+		for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles {
+			log.Infof("backup filepath from dir -> %s", acquisFile)
+
+			// if it was the default one, it has already been backed up
+			if csConfig.Crowdsec.AcquisitionFilePath == acquisFile {
+				log.Infof("skip this one")
+				continue
+			}
+
+			targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile)))
+			if err != nil {
+				return errors.Wrapf(err, "while saving %s to %s", acquisFile, acquisBackupDir)
+			}
+
+			if err = types.CopyFile(acquisFile, targetFname); err != nil {
+				return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err)
+			}
+
+			log.Infof("Saved acquis %s to %s", acquisFile, targetFname)
+		}
+	}
+
+	if err = RestoreHub(dirPath); err != nil {
+		return fmt.Errorf("failed to restore hub config : %s", err)
+	}
+
+	return nil
+}
+
+
+func runConfigRestore(cmd *cobra.Command, args []string) error {
+	flags := cmd.Flags()
+
+	oldBackup, err := flags.GetBool("old-backup")
+	if err != nil {
+		return err
+	}
+
+	if err := csConfig.LoadHub(); err != nil {
+		return err
+	}
+
+	if err := cwhub.GetHubIdx(csConfig.Hub); err != nil {
+		log.Info("Run 'sudo cscli hub update' to get the hub index")
+		return fmt.Errorf("failed to get Hub index: %w", err)
+	}
+
+	if err := restoreConfigFromDirectory(args[0], oldBackup); err != nil {
+		return fmt.Errorf("failed to restore config from %s: %w", args[0], err)
+	}
+
+	return nil
+}
+
+
+func NewConfigRestoreCmd() *cobra.Command {
+	cmdConfigRestore := &cobra.Command{
+		Use:   `restore "directory"`,
+		Short: `Restore config in backup "directory"`,
+		Long: `Restore the crowdsec configuration from specified backup "directory" including:
+
+- Main config (config.yaml)
+- Simulation config (simulation.yaml)
+- Profiles config (profiles.yaml)
+- List of scenarios, parsers, postoverflows and collections that are up-to-date
+- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
+- Backup of API credentials (local API and online API)`,
+		Args:              cobra.ExactArgs(1),
+		DisableAutoGenTag: true,
+		RunE:              runConfigRestore,
+	}
+
+	flags := cmdConfigRestore.Flags()
+	flags.BoolP("old-backup", "", false, "To use when you are upgrading crowdsec v0.X to v1.X and you need to restore backup from v0.X")
+
+	return cmdConfigRestore
+}

+ 217 - 0
cmd/crowdsec-cli/config_show.go

@@ -0,0 +1,217 @@
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/antonmedv/expr"
+	"github.com/spf13/cobra"
+	"gopkg.in/yaml.v2"
+
+	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
+)
+
+func showConfigKey(key string) error {
+	type Env struct {
+		Config *csconfig.Config
+	}
+
+	program, err := expr.Compile(key, expr.Env(Env{}))
+	if err != nil {
+		return err
+	}
+
+	output, err := expr.Run(program, Env{Config: csConfig})
+	if err != nil {
+		return err
+	}
+
+	switch csConfig.Cscli.Output {
+	case "human", "raw":
+		switch output.(type) {
+		case string:
+			fmt.Printf("%s\n", output)
+		case int:
+			fmt.Printf("%d\n", output)
+		default:
+			fmt.Printf("%v\n", output)
+		}
+	case "json":
+		data, err := json.MarshalIndent(output, "", "  ")
+		if err != nil {
+			return fmt.Errorf("failed to marshal configuration: %w", err)
+		}
+
+		fmt.Printf("%s\n", string(data))
+	}
+	return nil
+}
+
+func runConfigShow(cmd *cobra.Command, args []string) error {
+	flags := cmd.Flags()
+
+	key, err := flags.GetString("key")
+	if err != nil {
+		return err
+	}
+
+	if key != "" {
+		return showConfigKey(key)
+	}
+
+	switch csConfig.Cscli.Output {
+	case "human":
+		fmt.Printf("Global:\n")
+
+		if csConfig.ConfigPaths != nil {
+			fmt.Printf("   - Configuration Folder   : %s\n", csConfig.ConfigPaths.ConfigDir)
+			fmt.Printf("   - Data Folder            : %s\n", csConfig.ConfigPaths.DataDir)
+			fmt.Printf("   - Hub Folder             : %s\n", csConfig.ConfigPaths.HubDir)
+			fmt.Printf("   - Simulation File        : %s\n", csConfig.ConfigPaths.SimulationFilePath)
+		}
+
+		if csConfig.Common != nil {
+			fmt.Printf("   - Log Folder             : %s\n", csConfig.Common.LogDir)
+			fmt.Printf("   - Log level              : %s\n", csConfig.Common.LogLevel)
+			fmt.Printf("   - Log Media              : %s\n", csConfig.Common.LogMedia)
+		}
+
+		if csConfig.Crowdsec != nil {
+			fmt.Printf("Crowdsec:\n")
+			fmt.Printf("  - Acquisition File        : %s\n", csConfig.Crowdsec.AcquisitionFilePath)
+			fmt.Printf("  - Parsers routines        : %d\n", csConfig.Crowdsec.ParserRoutinesCount)
+			if csConfig.Crowdsec.AcquisitionDirPath != "" {
+				fmt.Printf("  - Acquisition Folder      : %s\n", csConfig.Crowdsec.AcquisitionDirPath)
+			}
+		}
+
+		if csConfig.Cscli != nil {
+			fmt.Printf("cscli:\n")
+			fmt.Printf("  - Output                  : %s\n", csConfig.Cscli.Output)
+			fmt.Printf("  - Hub Branch              : %s\n", csConfig.Cscli.HubBranch)
+			fmt.Printf("  - Hub Folder              : %s\n", csConfig.Cscli.HubDir)
+		}
+
+		if csConfig.API != nil {
+			if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil {
+				fmt.Printf("API Client:\n")
+				fmt.Printf("  - URL                     : %s\n", csConfig.API.Client.Credentials.URL)
+				fmt.Printf("  - Login                   : %s\n", csConfig.API.Client.Credentials.Login)
+				fmt.Printf("  - Credentials File        : %s\n", csConfig.API.Client.CredentialsFilePath)
+			}
+
+			if csConfig.API.Server != nil {
+				fmt.Printf("Local API Server:\n")
+				fmt.Printf("  - Listen URL              : %s\n", csConfig.API.Server.ListenURI)
+				fmt.Printf("  - Profile File            : %s\n", csConfig.API.Server.ProfilesPath)
+
+				if csConfig.API.Server.TLS != nil {
+					if csConfig.API.Server.TLS.CertFilePath != "" {
+						fmt.Printf("  - Cert File : %s\n", csConfig.API.Server.TLS.CertFilePath)
+					}
+
+					if csConfig.API.Server.TLS.KeyFilePath != "" {
+						fmt.Printf("  - Key File  : %s\n", csConfig.API.Server.TLS.KeyFilePath)
+					}
+
+					if csConfig.API.Server.TLS.CACertPath != "" {
+						fmt.Printf("  - CA Cert   : %s\n", csConfig.API.Server.TLS.CACertPath)
+					}
+
+					if csConfig.API.Server.TLS.CRLPath != "" {
+						fmt.Printf("  - CRL       : %s\n", csConfig.API.Server.TLS.CRLPath)
+					}
+
+					if csConfig.API.Server.TLS.CacheExpiration != nil {
+						fmt.Printf("  - Cache Expiration : %s\n", csConfig.API.Server.TLS.CacheExpiration)
+					}
+
+					if csConfig.API.Server.TLS.ClientVerification != "" {
+						fmt.Printf("  - Client Verification : %s\n", csConfig.API.Server.TLS.ClientVerification)
+					}
+
+					if csConfig.API.Server.TLS.AllowedAgentsOU != nil {
+						for _, ou := range csConfig.API.Server.TLS.AllowedAgentsOU {
+							fmt.Printf("      - Allowed Agents OU       : %s\n", ou)
+						}
+					}
+
+					if csConfig.API.Server.TLS.AllowedBouncersOU != nil {
+						for _, ou := range csConfig.API.Server.TLS.AllowedBouncersOU {
+							fmt.Printf("      - Allowed Bouncers OU       : %s\n", ou)
+						}
+					}
+				}
+
+				fmt.Printf("  - Trusted IPs: \n")
+
+				for _, ip := range csConfig.API.Server.TrustedIPs {
+					fmt.Printf("      - %s\n", ip)
+				}
+
+				if csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.Credentials != nil {
+					fmt.Printf("Central API:\n")
+					fmt.Printf("  - URL                     : %s\n", csConfig.API.Server.OnlineClient.Credentials.URL)
+					fmt.Printf("  - Login                   : %s\n", csConfig.API.Server.OnlineClient.Credentials.Login)
+					fmt.Printf("  - Credentials File        : %s\n", csConfig.API.Server.OnlineClient.CredentialsFilePath)
+				}
+			}
+		}
+
+		if csConfig.DbConfig != nil {
+			fmt.Printf("  - Database:\n")
+			fmt.Printf("      - Type                : %s\n", csConfig.DbConfig.Type)
+
+			switch csConfig.DbConfig.Type {
+			case "sqlite":
+				fmt.Printf("      - Path                : %s\n", csConfig.DbConfig.DbPath)
+			default:
+				fmt.Printf("      - Host                : %s\n", csConfig.DbConfig.Host)
+				fmt.Printf("      - Port                : %d\n", csConfig.DbConfig.Port)
+				fmt.Printf("      - User                : %s\n", csConfig.DbConfig.User)
+				fmt.Printf("      - DB Name             : %s\n", csConfig.DbConfig.DbName)
+			}
+
+			if csConfig.DbConfig.Flush != nil {
+				if *csConfig.DbConfig.Flush.MaxAge != "" {
+					fmt.Printf("      - Flush age           : %s\n", *csConfig.DbConfig.Flush.MaxAge)
+				}
+				if *csConfig.DbConfig.Flush.MaxItems != 0 {
+					fmt.Printf("      - Flush size          : %d\n", *csConfig.DbConfig.Flush.MaxItems)
+				}
+			}
+		}
+	case "json":
+		data, err := json.MarshalIndent(csConfig, "", "  ")
+		if err != nil {
+			return fmt.Errorf("failed to marshal configuration: %w", err)
+		}
+
+		fmt.Printf("%s\n", string(data))
+	case "raw":
+		data, err := yaml.Marshal(csConfig)
+		if err != nil {
+			return fmt.Errorf("failed to marshal configuration: %w", err)
+		}
+
+		fmt.Printf("%s\n", string(data))
+	}
+	return nil
+}
+
+
+func NewConfigShowCmd() *cobra.Command {
+	cmdConfigShow := &cobra.Command{
+		Use:               "show",
+		Short:             "Displays current config",
+		Long:              `Displays the current cli configuration.`,
+		Args:              cobra.ExactArgs(0),
+		DisableAutoGenTag: true,
+		RunE:              runConfigShow,
+	}
+
+	flags := cmdConfigShow.Flags()
+	flags.StringP("key", "", "", "Display only this value (Config.API.Server.ListenURI)")
+
+	return cmdConfigShow
+}

+ 27 - 13
cmd/crowdsec-cli/console.go

@@ -46,7 +46,7 @@ func NewConsoleCmd() *cobra.Command {
 				log.Fatalf("No configuration for Central API (CAPI) in '%s'", *csConfig.FilePath)
 				log.Fatalf("No configuration for Central API (CAPI) in '%s'", *csConfig.FilePath)
 			}
 			}
 			if csConfig.API.Server.OnlineClient.Credentials == nil {
 			if csConfig.API.Server.OnlineClient.Credentials == nil {
-				log.Fatal("You must configure Central API (CAPI) with `cscli capi register` before enrolling your instance")
+				log.Fatal("You must configure Central API (CAPI) with `cscli capi register` before accessing console features.")
 			}
 			}
 			return nil
 			return nil
 		},
 		},
@@ -129,9 +129,9 @@ After running this command your will need to validate the enrollment in the weba
 	var enableAll, disableAll bool
 	var enableAll, disableAll bool
 
 
 	cmdEnable := &cobra.Command{
 	cmdEnable := &cobra.Command{
-		Use:     "enable [feature-flag]",
-		Short:   "Enable a feature flag",
-		Example: "enable tainted",
+		Use:     "enable [option]",
+		Short:   "Enable a console option",
+		Example: "sudo cscli console enable tainted",
 		Long: `
 		Long: `
 Enable given information push to the central API. Allows to empower the console`,
 Enable given information push to the central API. Allows to empower the console`,
 		ValidArgs:         csconfig.CONSOLE_CONFIGS,
 		ValidArgs:         csconfig.CONSOLE_CONFIGS,
@@ -153,13 +153,13 @@ Enable given information push to the central API. Allows to empower the console`
 			log.Infof(ReloadMessage())
 			log.Infof(ReloadMessage())
 		},
 		},
 	}
 	}
-	cmdEnable.Flags().BoolVarP(&enableAll, "all", "a", false, "Enable all feature flags")
+	cmdEnable.Flags().BoolVarP(&enableAll, "all", "a", false, "Enable all console options")
 	cmdConsole.AddCommand(cmdEnable)
 	cmdConsole.AddCommand(cmdEnable)
 
 
 	cmdDisable := &cobra.Command{
 	cmdDisable := &cobra.Command{
-		Use:     "disable [feature-flag]",
-		Short:   "Disable a feature flag",
-		Example: "disable tainted",
+		Use:     "disable [option]",
+		Short:   "Disable a console option",
+		Example: "sudo cscli console disable tainted",
 		Long: `
 		Long: `
 Disable given information push to the central API.`,
 Disable given information push to the central API.`,
 		ValidArgs:         csconfig.CONSOLE_CONFIGS,
 		ValidArgs:         csconfig.CONSOLE_CONFIGS,
@@ -183,13 +183,13 @@ Disable given information push to the central API.`,
 			log.Infof(ReloadMessage())
 			log.Infof(ReloadMessage())
 		},
 		},
 	}
 	}
-	cmdDisable.Flags().BoolVarP(&disableAll, "all", "a", false, "Enable all feature flags")
+	cmdDisable.Flags().BoolVarP(&disableAll, "all", "a", false, "Disable all console options")
 	cmdConsole.AddCommand(cmdDisable)
 	cmdConsole.AddCommand(cmdDisable)
 
 
 	cmdConsoleStatus := &cobra.Command{
 	cmdConsoleStatus := &cobra.Command{
-		Use:               "status [feature-flag]",
-		Short:             "Shows status of one or all feature flags",
-		Example:           "status tainted",
+		Use:               "status [option]",
+		Short:             "Shows status of one or all console options",
+		Example:           `sudo cscli console status`,
 		DisableAutoGenTag: true,
 		DisableAutoGenTag: true,
 		Run: func(cmd *cobra.Command, args []string) {
 		Run: func(cmd *cobra.Command, args []string) {
 			switch csConfig.Cscli.Output {
 			switch csConfig.Cscli.Output {
@@ -212,6 +212,7 @@ Disable given information push to the central API.`,
 					{"share_manual_decisions", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareManualDecisions)},
 					{"share_manual_decisions", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareManualDecisions)},
 					{"share_custom", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios)},
 					{"share_custom", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios)},
 					{"share_tainted", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios)},
 					{"share_tainted", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios)},
+					{"share_context", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareContext)},
 				}
 				}
 				for _, row := range rows {
 				for _, row := range rows {
 					err = csvwriter.Write(row)
 					err = csvwriter.Write(row)
@@ -223,8 +224,8 @@ Disable given information push to the central API.`,
 			}
 			}
 		},
 		},
 	}
 	}
-
 	cmdConsole.AddCommand(cmdConsoleStatus)
 	cmdConsole.AddCommand(cmdConsoleStatus)
+
 	return cmdConsole
 	return cmdConsole
 }
 }
 
 
@@ -270,6 +271,19 @@ func SetConsoleOpts(args []string, wanted bool) {
 				log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted)
 				log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted)
 				csConfig.API.Server.ConsoleConfig.ShareManualDecisions = types.BoolPtr(wanted)
 				csConfig.API.Server.ConsoleConfig.ShareManualDecisions = types.BoolPtr(wanted)
 			}
 			}
+		case csconfig.SEND_CONTEXT:
+			/*for each flag check if it's already set before setting it*/
+			if csConfig.API.Server.ConsoleConfig.ShareContext != nil {
+				if *csConfig.API.Server.ConsoleConfig.ShareContext == wanted {
+					log.Infof("%s already set to %t", csconfig.SEND_CONTEXT, wanted)
+				} else {
+					log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted)
+					*csConfig.API.Server.ConsoleConfig.ShareContext = wanted
+				}
+			} else {
+				log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted)
+				csConfig.API.Server.ConsoleConfig.ShareContext = types.BoolPtr(wanted)
+			}
 		default:
 		default:
 			log.Fatalf("unknown flag %s", arg)
 			log.Fatalf("unknown flag %s", arg)
 		}
 		}

+ 6 - 0
cmd/crowdsec-cli/console_table.go

@@ -41,6 +41,12 @@ func cmdConsoleStatusTable(out io.Writer, csConfig csconfig.Config) {
 			}
 			}
 
 
 			t.AddRow(option, activated, "Send alerts from tainted scenarios to the console")
 			t.AddRow(option, activated, "Send alerts from tainted scenarios to the console")
+		case csconfig.SEND_CONTEXT:
+			activated := string(emoji.CrossMark)
+			if *csConfig.API.Server.ConsoleConfig.ShareContext {
+				activated = string(emoji.CheckMarkButton)
+			}
+			t.AddRow(option, activated, "Send context with alerts to the console")
 		}
 		}
 	}
 	}
 
 

+ 485 - 99
cmd/crowdsec-cli/lapi.go

@@ -5,6 +5,7 @@ import (
 	"fmt"
 	"fmt"
 	"net/url"
 	"net/url"
 	"os"
 	"os"
+	"sort"
 	"strings"
 	"strings"
 
 
 	"github.com/go-openapi/strfmt"
 	"github.com/go-openapi/strfmt"
@@ -13,15 +14,186 @@ import (
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
 	"gopkg.in/yaml.v2"
 	"gopkg.in/yaml.v2"
 
 
+	"github.com/crowdsecurity/crowdsec/pkg/alertcontext"
 	"github.com/crowdsecurity/crowdsec/pkg/apiclient"
 	"github.com/crowdsecurity/crowdsec/pkg/apiclient"
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
+	"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
+	"github.com/crowdsecurity/crowdsec/pkg/parser"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
 )
 )
 
 
 var LAPIURLPrefix string = "v1"
 var LAPIURLPrefix string = "v1"
-var lapiUser string
+
+func runLapiStatus(cmd *cobra.Command, args []string) error {
+	var err error
+
+	password := strfmt.Password(csConfig.API.Client.Credentials.Password)
+	apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL)
+	login := csConfig.API.Client.Credentials.Login
+	if err != nil {
+		log.Fatalf("parsing api url ('%s'): %s", apiurl, err)
+	}
+	if err := csConfig.LoadHub(); err != nil {
+		log.Fatal(err)
+	}
+
+	if err := cwhub.GetHubIdx(csConfig.Hub); err != nil {
+		log.Info("Run 'sudo cscli hub update' to get the hub index")
+		log.Fatalf("Failed to load hub index : %s", err)
+	}
+	scenarios, err := cwhub.GetInstalledScenariosAsString()
+	if err != nil {
+		log.Fatalf("failed to get scenarios : %s", err)
+	}
+
+	Client, err = apiclient.NewDefaultClient(apiurl,
+		LAPIURLPrefix,
+		fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()),
+		nil)
+	if err != nil {
+		log.Fatalf("init default client: %s", err)
+	}
+	t := models.WatcherAuthRequest{
+		MachineID: &login,
+		Password:  &password,
+		Scenarios: scenarios,
+	}
+	log.Infof("Loaded credentials from %s", csConfig.API.Client.CredentialsFilePath)
+	log.Infof("Trying to authenticate with username %s on %s", login, apiurl)
+	_, err = Client.Auth.AuthenticateWatcher(context.Background(), t)
+	if err != nil {
+		log.Fatalf("Failed to authenticate to Local API (LAPI) : %s", err)
+	} else {
+		log.Infof("You can successfully interact with Local API (LAPI)")
+	}
+
+	return nil
+}
+
+func runLapiRegister(cmd *cobra.Command, args []string) error {
+	var err error
+
+	flags := cmd.Flags()
+
+	apiURL, err := flags.GetString("url")
+	if err != nil {
+		return err
+	}
+
+	outputFile, err := flags.GetString("file")
+	if err != nil {
+		return err
+	}
+
+	lapiUser, err := flags.GetString("machine")
+	if err != nil {
+		return err
+	}
+
+	if lapiUser == "" {
+		lapiUser, err = generateID("")
+		if err != nil {
+			log.Fatalf("unable to generate machine id: %s", err)
+		}
+	}
+	password := strfmt.Password(generatePassword(passwordLength))
+	if apiURL == "" {
+		if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" {
+			apiURL = csConfig.API.Client.Credentials.URL
+		} else {
+			log.Fatalf("No Local API URL. Please provide it in your configuration or with the -u parameter")
+		}
+	}
+	/*URL needs to end with /, but user doesn't care*/
+	if !strings.HasSuffix(apiURL, "/") {
+		apiURL += "/"
+	}
+	/*URL needs to start with http://, but user doesn't care*/
+	if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") {
+		apiURL = "http://" + apiURL
+	}
+	apiurl, err := url.Parse(apiURL)
+	if err != nil {
+		log.Fatalf("parsing api url: %s", err)
+	}
+	_, err = apiclient.RegisterClient(&apiclient.Config{
+		MachineID:     lapiUser,
+		Password:      password,
+		UserAgent:     fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()),
+		URL:           apiurl,
+		VersionPrefix: LAPIURLPrefix,
+	}, nil)
+
+	if err != nil {
+		log.Fatalf("api client register: %s", err)
+	}
+
+	log.Printf("Successfully registered to Local API (LAPI)")
+
+	var dumpFile string
+	if outputFile != "" {
+		dumpFile = outputFile
+	} else if csConfig.API.Client.CredentialsFilePath != "" {
+		dumpFile = csConfig.API.Client.CredentialsFilePath
+	} else {
+		dumpFile = ""
+	}
+	apiCfg := csconfig.ApiCredentialsCfg{
+		Login:    lapiUser,
+		Password: password.String(),
+		URL:      apiURL,
+	}
+	apiConfigDump, err := yaml.Marshal(apiCfg)
+	if err != nil {
+		log.Fatalf("unable to marshal api credentials: %s", err)
+	}
+	if dumpFile != "" {
+		err = os.WriteFile(dumpFile, apiConfigDump, 0644)
+		if err != nil {
+			log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err)
+		}
+		log.Printf("Local API credentials dumped to '%s'", dumpFile)
+	} else {
+		fmt.Printf("%s\n", string(apiConfigDump))
+	}
+	log.Warning(ReloadMessage())
+
+	return nil
+}
+
+func NewLapiStatusCmd() *cobra.Command {
+	cmdLapiStatus := &cobra.Command{
+		Use:               "status",
+		Short:             "Check authentication to Local API (LAPI)",
+		Args:              cobra.MinimumNArgs(0),
+		DisableAutoGenTag: true,
+		RunE:              runLapiStatus,
+	}
+
+	return cmdLapiStatus
+}
+
+func NewLapiRegisterCmd() *cobra.Command {
+	cmdLapiRegister := &cobra.Command{
+		Use:   "register",
+		Short: "Register a machine to Local API (LAPI)",
+		Long: `Register you machine to the Local API (LAPI).
+Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`,
+		Args:              cobra.MinimumNArgs(0),
+		DisableAutoGenTag: true,
+		RunE:              runLapiRegister,
+	}
+
+	flags := cmdLapiRegister.Flags()
+	flags.StringP("url", "u", "", "URL of the API (ie. http://127.0.0.1)")
+	flags.StringP("file", "f", "", "output file destination")
+	flags.String("machine", "", "Name of the machine to register with")
+
+	return cmdLapiRegister
+}
 
 
 func NewLapiCmd() *cobra.Command {
 func NewLapiCmd() *cobra.Command {
 	var cmdLapi = &cobra.Command{
 	var cmdLapi = &cobra.Command{
@@ -37,138 +209,352 @@ func NewLapiCmd() *cobra.Command {
 		},
 		},
 	}
 	}
 
 
-	var cmdLapiRegister = &cobra.Command{
-		Use:   "register",
-		Short: "Register a machine to Local API (LAPI)",
-		Long: `Register you machine to the Local API (LAPI).
-Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`,
-		Args:              cobra.MinimumNArgs(0),
+	cmdLapi.AddCommand(NewLapiRegisterCmd())
+	cmdLapi.AddCommand(NewLapiStatusCmd())
+	cmdLapi.AddCommand(NewLapiContextCmd())
+
+	return cmdLapi
+}
+
+func NewLapiContextCmd() *cobra.Command {
+	cmdContext := &cobra.Command{
+		Use:               "context [command]",
+		Short:             "Manage context to send with alerts",
 		DisableAutoGenTag: true,
 		DisableAutoGenTag: true,
-		Run: func(cmd *cobra.Command, args []string) {
-			var err error
-			if lapiUser == "" {
-				lapiUser, err = generateID("")
-				if err != nil {
-					log.Fatalf("unable to generate machine id: %s", err)
+		PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
+			if err := csConfig.LoadCrowdsec(); err != nil {
+				fileNotFoundMessage := fmt.Sprintf("failed to open context file: open %s: no such file or directory", csConfig.Crowdsec.ConsoleContextPath)
+				if err.Error() != fileNotFoundMessage {
+					log.Fatalf("Unable to load CrowdSec Agent: %s", err)
 				}
 				}
 			}
 			}
-			password := strfmt.Password(generatePassword(passwordLength))
-			if apiURL == "" {
-				if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" {
-					apiURL = csConfig.API.Client.Credentials.URL
-				} else {
-					log.Fatalf("No Local API URL. Please provide it in your configuration or with the -u parameter")
+			if csConfig.DisableAgent {
+				log.Fatalf("Agent is disabled and lapi context can only be used on the agent")
+			}
+
+			return nil
+		},
+		Run: func(cmd *cobra.Command, args []string) {
+			printHelp(cmd)
+		},
+	}
+
+	var keyToAdd string
+	var valuesToAdd []string
+	cmdContextAdd := &cobra.Command{
+		Use:   "add",
+		Short: "Add context to send with alerts. You must specify the output key with the expr value you want",
+		Example: `cscli lapi context add --key source_ip --value evt.Meta.source_ip
+cscli lapi context add --key file_source --value evt.Line.Src
+		`,
+		DisableAutoGenTag: true,
+		Run: func(cmd *cobra.Command, args []string) {
+			if err := alertcontext.ValidateContextExpr(keyToAdd, valuesToAdd); err != nil {
+				log.Fatalf("invalid context configuration :%s", err)
+			}
+			if _, ok := csConfig.Crowdsec.ContextToSend[keyToAdd]; !ok {
+				csConfig.Crowdsec.ContextToSend[keyToAdd] = make([]string, 0)
+				log.Infof("key '%s' added", keyToAdd)
+			}
+			data := csConfig.Crowdsec.ContextToSend[keyToAdd]
+			for _, val := range valuesToAdd {
+				if !inSlice(val, data) {
+					log.Infof("value '%s' added to key '%s'", val, keyToAdd)
+					data = append(data, val)
 				}
 				}
+				csConfig.Crowdsec.ContextToSend[keyToAdd] = data
 			}
 			}
-			/*URL needs to end with /, but user doesn't care*/
-			if !strings.HasSuffix(apiURL, "/") {
-				apiURL += "/"
+			if err := csConfig.Crowdsec.DumpContextConfigFile(); err != nil {
+				log.Fatalf(err.Error())
 			}
 			}
-			/*URL needs to start with http://, but user doesn't care*/
-			if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") {
-				apiURL = "http://" + apiURL
+		},
+	}
+	cmdContextAdd.Flags().StringVarP(&keyToAdd, "key", "k", "", "The key of the different values to send")
+	cmdContextAdd.Flags().StringSliceVar(&valuesToAdd, "value", []string{}, "The expr fields to associate with the key")
+	cmdContextAdd.MarkFlagRequired("key")
+	cmdContextAdd.MarkFlagRequired("value")
+	cmdContext.AddCommand(cmdContextAdd)
+
+	cmdContextStatus := &cobra.Command{
+		Use:               "status",
+		Short:             "List context to send with alerts",
+		DisableAutoGenTag: true,
+		Run: func(cmd *cobra.Command, args []string) {
+			if len(csConfig.Crowdsec.ContextToSend) == 0 {
+				fmt.Println("No context found on this agent. You can use 'cscli lapi context add' to add context to your alerts.")
+				return
 			}
 			}
-			apiurl, err := url.Parse(apiURL)
+
+			dump, err := yaml.Marshal(csConfig.Crowdsec.ContextToSend)
 			if err != nil {
 			if err != nil {
-				log.Fatalf("parsing api url: %s", err)
+				log.Fatalf("unable to show context status: %s", err)
 			}
 			}
-			_, err = apiclient.RegisterClient(&apiclient.Config{
-				MachineID:     lapiUser,
-				Password:      password,
-				UserAgent:     fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()),
-				URL:           apiurl,
-				VersionPrefix: LAPIURLPrefix,
-			}, nil)
 
 
+			fmt.Println(string(dump))
+
+		},
+	}
+	cmdContext.AddCommand(cmdContextStatus)
+
+	var detectAll bool
+	cmdContextDetect := &cobra.Command{
+		Use:   "detect",
+		Short: "Detect available fields from the installed parsers",
+		Example: `cscli lapi context detect --all
+cscli lapi context detect crowdsecurity/sshd-logs
+		`,
+		DisableAutoGenTag: true,
+		Run: func(cmd *cobra.Command, args []string) {
+			var err error
+
+			if !detectAll && len(args) == 0 {
+				log.Infof("Please provide parsers to detect or --all flag.")
+				printHelp(cmd)
+			}
+
+			// to avoid all the log.Info from the loaders functions
+			log.SetLevel(log.ErrorLevel)
+
+			err = exprhelpers.Init(nil)
 			if err != nil {
 			if err != nil {
-				log.Fatalf("api client register: %s", err)
+				log.Fatalf("Failed to init expr helpers : %s", err)
+			}
+
+			// Populate cwhub package tools
+			if err := cwhub.GetHubIdx(csConfig.Hub); err != nil {
+				log.Fatalf("Failed to load hub index : %s", err)
 			}
 			}
 
 
-			log.Printf("Successfully registered to Local API (LAPI)")
+			csParsers := parser.NewParsers()
+			if csParsers, err = parser.LoadParsers(csConfig, csParsers); err != nil {
+				log.Fatalf("unable to load parsers: %s", err)
+			}
+
+			fieldByParsers := make(map[string][]string)
+			for _, node := range csParsers.Nodes {
+				if !detectAll && !inSlice(node.Name, args) {
+					continue
+				}
+				if !detectAll {
+					args = removeFromSlice(node.Name, args)
+				}
+				fieldByParsers[node.Name] = make([]string, 0)
+				fieldByParsers[node.Name] = detectNode(node, *csParsers.Ctx)
+
+				subNodeFields := detectSubNode(node, *csParsers.Ctx)
+				for _, field := range subNodeFields {
+					if !inSlice(field, fieldByParsers[node.Name]) {
+						fieldByParsers[node.Name] = append(fieldByParsers[node.Name], field)
+					}
+				}
 
 
-			var dumpFile string
-			if outputFile != "" {
-				dumpFile = outputFile
-			} else if csConfig.API.Client.CredentialsFilePath != "" {
-				dumpFile = csConfig.API.Client.CredentialsFilePath
-			} else {
-				dumpFile = ""
 			}
 			}
-			apiCfg := csconfig.ApiCredentialsCfg{
-				Login:    lapiUser,
-				Password: password.String(),
-				URL:      apiURL,
+
+			fmt.Printf("Acquisition :\n\n")
+			fmt.Printf("  - evt.Line.Module\n")
+			fmt.Printf("  - evt.Line.Raw\n")
+			fmt.Printf("  - evt.Line.Src\n")
+			fmt.Println()
+
+			parsersKey := make([]string, 0)
+			for k := range fieldByParsers {
+				parsersKey = append(parsersKey, k)
 			}
 			}
-			apiConfigDump, err := yaml.Marshal(apiCfg)
-			if err != nil {
-				log.Fatalf("unable to marshal api credentials: %s", err)
+			sort.Strings(parsersKey)
+
+			for _, k := range parsersKey {
+				if len(fieldByParsers[k]) == 0 {
+					continue
+				}
+				fmt.Printf("%s :\n\n", k)
+				values := fieldByParsers[k]
+				sort.Strings(values)
+				for _, value := range values {
+					fmt.Printf("  - %s\n", value)
+				}
+				fmt.Println()
 			}
 			}
-			if dumpFile != "" {
-				err = os.WriteFile(dumpFile, apiConfigDump, 0644)
-				if err != nil {
-					log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err)
+
+			if len(args) > 0 {
+				for _, parserNotFound := range args {
+					log.Errorf("parser '%s' not found, can't detect fields", parserNotFound)
 				}
 				}
-				log.Printf("Local API credentials dumped to '%s'", dumpFile)
-			} else {
-				fmt.Printf("%s\n", string(apiConfigDump))
 			}
 			}
-			log.Warning(ReloadMessage())
 		},
 		},
 	}
 	}
-	cmdLapiRegister.Flags().StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)")
-	cmdLapiRegister.Flags().StringVarP(&outputFile, "file", "f", "", "output file destination")
-	cmdLapiRegister.Flags().StringVar(&lapiUser, "machine", "", "Name of the machine to register with")
-	cmdLapi.AddCommand(cmdLapiRegister)
+	cmdContextDetect.Flags().BoolVarP(&detectAll, "all", "a", false, "Detect evt field for all installed parser")
+	cmdContext.AddCommand(cmdContextDetect)
 
 
-	var cmdLapiStatus = &cobra.Command{
-		Use:               "status",
-		Short:             "Check authentication to Local API (LAPI)",
-		Args:              cobra.MinimumNArgs(0),
+	var keysToDelete []string
+	var valuesToDelete []string
+	cmdContextDelete := &cobra.Command{
+		Use:   "delete",
+		Short: "Delete context to send with alerts",
+		Example: `cscli lapi context delete --key source_ip
+cscli lapi context delete --value evt.Line.Src
+		`,
 		DisableAutoGenTag: true,
 		DisableAutoGenTag: true,
 		Run: func(cmd *cobra.Command, args []string) {
 		Run: func(cmd *cobra.Command, args []string) {
-			var err error
+			if len(keysToDelete) == 0 && len(valuesToDelete) == 0 {
+				log.Fatalf("please provide at least a key or a value to delete")
+			}
 
 
-			password := strfmt.Password(csConfig.API.Client.Credentials.Password)
-			apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL)
-			login := csConfig.API.Client.Credentials.Login
-			if err != nil {
-				log.Fatalf("parsing api url ('%s'): %s", apiurl, err)
+			for _, key := range keysToDelete {
+				if _, ok := csConfig.Crowdsec.ContextToSend[key]; ok {
+					delete(csConfig.Crowdsec.ContextToSend, key)
+					log.Infof("key '%s' has been removed", key)
+				} else {
+					log.Warningf("key '%s' doesn't exist", key)
+				}
+			}
+
+			for _, value := range valuesToDelete {
+				valueFound := false
+				for key, context := range csConfig.Crowdsec.ContextToSend {
+					if inSlice(value, context) {
+						valueFound = true
+						csConfig.Crowdsec.ContextToSend[key] = removeFromSlice(value, context)
+						log.Infof("value '%s' has been removed from key '%s'", value, key)
+					}
+					if len(csConfig.Crowdsec.ContextToSend[key]) == 0 {
+						delete(csConfig.Crowdsec.ContextToSend, key)
+					}
+				}
+				if !valueFound {
+					log.Warningf("value '%s' not found", value)
+				}
 			}
 			}
-			if err := csConfig.LoadHub(); err != nil {
-				log.Fatal(err)
+
+			if err := csConfig.Crowdsec.DumpContextConfigFile(); err != nil {
+				log.Fatalf(err.Error())
 			}
 			}
 
 
-			if err := cwhub.GetHubIdx(csConfig.Hub); err != nil {
-				log.Info("Run 'sudo cscli hub update' to get the hub index")
-				log.Fatalf("Failed to load hub index : %s", err)
+		},
+	}
+	cmdContextDelete.Flags().StringSliceVarP(&keysToDelete, "key", "k", []string{}, "The keys to delete")
+	cmdContextDelete.Flags().StringSliceVar(&valuesToDelete, "value", []string{}, "The expr fields to delete")
+	cmdContext.AddCommand(cmdContextDelete)
+
+	return cmdContext
+}
+
+func detectStaticField(GrokStatics []types.ExtraField) []string {
+	ret := make([]string, 0)
+	for _, static := range GrokStatics {
+		if static.Parsed != "" {
+			fieldName := fmt.Sprintf("evt.Parsed.%s", static.Parsed)
+			if !inSlice(fieldName, ret) {
+				ret = append(ret, fieldName)
 			}
 			}
-			scenarios, err := cwhub.GetInstalledScenariosAsString()
-			if err != nil {
-				log.Fatalf("failed to get scenarios : %s", err)
+		}
+		if static.Meta != "" {
+			fieldName := fmt.Sprintf("evt.Meta.%s", static.Meta)
+			if !inSlice(fieldName, ret) {
+				ret = append(ret, fieldName)
+			}
+		}
+		if static.TargetByName != "" {
+			fieldName := static.TargetByName
+			if !strings.HasPrefix(fieldName, "evt.") {
+				fieldName = "evt." + fieldName
 			}
 			}
+			if !inSlice(fieldName, ret) {
+				ret = append(ret, fieldName)
+			}
+		}
+	}
 
 
-			Client, err = apiclient.NewDefaultClient(apiurl,
-				LAPIURLPrefix,
-				fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()),
-				nil)
-			if err != nil {
-				log.Fatalf("init default client: %s", err)
+	return ret
+}
+
+func detectNode(node parser.Node, parserCTX parser.UnixParserCtx) []string {
+	var ret = make([]string, 0)
+	if node.Grok.RunTimeRegexp != nil {
+		for _, capturedField := range node.Grok.RunTimeRegexp.Names() {
+			fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField)
+			if !inSlice(fieldName, ret) {
+				ret = append(ret, fieldName)
+			}
+		}
+	}
+
+	if node.Grok.RegexpName != "" {
+		grokCompiled, err := parserCTX.Grok.Get(node.Grok.RegexpName)
+		if err != nil {
+			log.Warningf("Can't get subgrok: %s", err)
+		}
+		for _, capturedField := range grokCompiled.Names() {
+			fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField)
+			if !inSlice(fieldName, ret) {
+				ret = append(ret, fieldName)
+			}
+		}
+	}
+
+	if len(node.Grok.Statics) > 0 {
+		staticsField := detectStaticField(node.Grok.Statics)
+		for _, staticField := range staticsField {
+			if !inSlice(staticField, ret) {
+				ret = append(ret, staticField)
 			}
 			}
-			t := models.WatcherAuthRequest{
-				MachineID: &login,
-				Password:  &password,
-				Scenarios: scenarios,
+		}
+	}
+
+	if len(node.Statics) > 0 {
+		staticsField := detectStaticField(node.Statics)
+		for _, staticField := range staticsField {
+			if !inSlice(staticField, ret) {
+				ret = append(ret, staticField)
 			}
 			}
-			log.Infof("Loaded credentials from %s", csConfig.API.Client.CredentialsFilePath)
-			log.Infof("Trying to authenticate with username %s on %s", login, apiurl)
-			_, err = Client.Auth.AuthenticateWatcher(context.Background(), t)
+		}
+	}
+
+	return ret
+}
+
+func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string {
+	var ret = make([]string, 0)
+
+	for _, subnode := range node.LeavesNodes {
+		if subnode.Grok.RunTimeRegexp != nil {
+			for _, capturedField := range subnode.Grok.RunTimeRegexp.Names() {
+				fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField)
+				if !inSlice(fieldName, ret) {
+					ret = append(ret, fieldName)
+				}
+			}
+		}
+		if subnode.Grok.RegexpName != "" {
+			grokCompiled, err := parserCTX.Grok.Get(subnode.Grok.RegexpName)
 			if err != nil {
 			if err != nil {
-				log.Fatalf("Failed to authenticate to Local API (LAPI) : %s", err)
-			} else {
-				log.Infof("You can successfully interact with Local API (LAPI)")
+				log.Warningf("Can't get subgrok: %s", err)
 			}
 			}
-		},
+			for _, capturedField := range grokCompiled.Names() {
+				fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField)
+				if !inSlice(fieldName, ret) {
+					ret = append(ret, fieldName)
+				}
+			}
+		}
+
+		if len(subnode.Grok.Statics) > 0 {
+			staticsField := detectStaticField(subnode.Grok.Statics)
+			for _, staticField := range staticsField {
+				if !inSlice(staticField, ret) {
+					ret = append(ret, staticField)
+				}
+			}
+		}
+
+		if len(subnode.Statics) > 0 {
+			staticsField := detectStaticField(subnode.Statics)
+			for _, staticField := range staticsField {
+				if !inSlice(staticField, ret) {
+					ret = append(ret, staticField)
+				}
+			}
+		}
 	}
 	}
-	cmdLapi.AddCommand(cmdLapiStatus)
-	return cmdLapi
+
+	return ret
 }
 }

+ 183 - 137
cmd/crowdsec-cli/machines.go

@@ -29,22 +29,15 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 )
 )
 
 
-var machineID string
-var machinePassword string
-var interactive bool
-var apiURL string
-var outputFile string
-var forceAdd bool
-var autoAdd bool
-
 var (
 var (
 	passwordLength = 64
 	passwordLength = 64
-	upper          = "ABCDEFGHIJKLMNOPQRSTUVWXY"
-	lower          = "abcdefghijklmnopqrstuvwxyz"
-	digits         = "0123456789"
 )
 )
 
 
 func generatePassword(length int) string {
 func generatePassword(length int) string {
+	upper  := "ABCDEFGHIJKLMNOPQRSTUVWXY"
+	lower  := "abcdefghijklmnopqrstuvwxyz"
+	digits := "0123456789"
+
 	charset := upper + lower + digits
 	charset := upper + lower + digits
 	charsetLength := len(charset)
 	charsetLength := len(charset)
 
 
@@ -149,32 +142,8 @@ func getAgents(out io.Writer, dbClient *database.Client) error {
 	return nil
 	return nil
 }
 }
 
 
-func NewMachinesCmd() *cobra.Command {
-	/* ---- DECISIONS COMMAND */
-	var cmdMachines = &cobra.Command{
-		Use:   "machines [action]",
-		Short: "Manage local API machines [requires local API]",
-		Long: `To list/add/delete/validate machines.
-Note: This command requires database direct access, so is intended to be run on the local API machine.
-`,
-		Example:           `cscli machines [action]`,
-		DisableAutoGenTag: true,
-		Aliases:           []string{"machine"},
-		PersistentPreRun: func(cmd *cobra.Command, args []string) {
-			if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
-				if err != nil {
-					log.Errorf("local api : %s", err)
-				}
-				log.Fatal("Local API is disabled, please run this command on the local API machine")
-			}
-			if err := csConfig.LoadDBConfig(); err != nil {
-				log.Errorf("This command requires direct database access (must be run on the local API machine)")
-				log.Fatal(err)
-			}
-		},
-	}
-
-	var cmdMachinesList = &cobra.Command{
+func NewMachinesListCmd() *cobra.Command {
+	cmdMachinesList := &cobra.Command{
 		Use:               "list",
 		Use:               "list",
 		Short:             "List machines",
 		Short:             "List machines",
 		Long:              `List `,
 		Long:              `List `,
@@ -195,9 +164,12 @@ Note: This command requires database direct access, so is intended to be run on
 			}
 			}
 		},
 		},
 	}
 	}
-	cmdMachines.AddCommand(cmdMachinesList)
 
 
-	var cmdMachinesAdd = &cobra.Command{
+	return cmdMachinesList
+}
+
+func NewMachinesAddCmd() *cobra.Command {
+	cmdMachinesAdd := &cobra.Command{
 		Use:               "add",
 		Use:               "add",
 		Short:             "add machine to the database.",
 		Short:             "add machine to the database.",
 		DisableAutoGenTag: true,
 		DisableAutoGenTag: true,
@@ -214,93 +186,135 @@ cscli machines add MyTestMachine --password MyPassword
 				log.Fatalf("unable to create new database client: %s", err)
 				log.Fatalf("unable to create new database client: %s", err)
 			}
 			}
 		},
 		},
-		Run: func(cmd *cobra.Command, args []string) {
-			var dumpFile string
-			var err error
+		RunE: runMachinesAdd,
+	}
 
 
-			// create machineID if not specified by user
-			if len(args) == 0 {
-				if !autoAdd {
-					printHelp(cmd)
-					return
-				}
-				machineID, err = generateID("")
-				if err != nil {
-					log.Fatalf("unable to generate machine id : %s", err)
-				}
-			} else {
-				machineID = args[0]
-			}
+	flags := cmdMachinesAdd.Flags()
+	flags.StringP("password", "p", "", "machine password to login to the API")
+	flags.StringP("file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml"))
+	flags.StringP("url", "u", "", "URL of the local API")
+	flags.BoolP("interactive", "i", false, "interfactive mode to enter the password")
+	flags.BoolP("auto", "a", false, "automatically generate password (and username if not provided)")
+	flags.Bool("force", false, "will force add the machine if it already exist")
 
 
-			/*check if file already exists*/
-			if outputFile != "" {
-				dumpFile = outputFile
-			} else if csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" {
-				dumpFile = csConfig.API.Client.CredentialsFilePath
-			}
+	return cmdMachinesAdd
+}
 
 
-			// create a password if it's not specified by user
-			if machinePassword == "" && !interactive {
-				if !autoAdd {
-					printHelp(cmd)
-					return
-				}
-				machinePassword = generatePassword(passwordLength)
-			} else if machinePassword == "" && interactive {
-				qs := &survey.Password{
-					Message: "Please provide a password for the machine",
-				}
-				survey.AskOne(qs, &machinePassword)
-			}
-			password := strfmt.Password(machinePassword)
-			_, err = dbClient.CreateMachine(&machineID, &password, "", true, forceAdd, types.PasswordAuthType)
-			if err != nil {
-				log.Fatalf("unable to create machine: %s", err)
-			}
-			log.Infof("Machine '%s' successfully added to the local API", machineID)
-
-			if apiURL == "" {
-				if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" {
-					apiURL = csConfig.API.Client.Credentials.URL
-				} else if csConfig.API.Server != nil && csConfig.API.Server.ListenURI != "" {
-					apiURL = "http://" + csConfig.API.Server.ListenURI
-				} else {
-					log.Fatalf("unable to dump an api URL. Please provide it in your configuration or with the -u parameter")
-				}
-			}
-			apiCfg := csconfig.ApiCredentialsCfg{
-				Login:    machineID,
-				Password: password.String(),
-				URL:      apiURL,
-			}
-			apiConfigDump, err := yaml.Marshal(apiCfg)
-			if err != nil {
-				log.Fatalf("unable to marshal api credentials: %s", err)
-			}
-			if dumpFile != "" && dumpFile != "-" {
-				err = os.WriteFile(dumpFile, apiConfigDump, 0644)
-				if err != nil {
-					log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err)
-				}
-				log.Printf("API credentials dumped to '%s'", dumpFile)
-			} else {
-				fmt.Printf("%s\n", string(apiConfigDump))
-			}
-		},
+func runMachinesAdd(cmd *cobra.Command, args []string) error {
+	var dumpFile string
+	var err error
+
+	flags := cmd.Flags()
+
+	machinePassword, err := flags.GetString("password")
+	if err != nil {
+		return err
+	}
+
+	outputFile, err := flags.GetString("file")
+	if err != nil {
+		return err
+	}
+
+	apiURL, err := flags.GetString("url")
+	if err != nil {
+		return err
 	}
 	}
-	cmdMachinesAdd.Flags().StringVarP(&machinePassword, "password", "p", "", "machine password to login to the API")
-	cmdMachinesAdd.Flags().StringVarP(&outputFile, "file", "f", "",
-		"output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml"))
-	cmdMachinesAdd.Flags().StringVarP(&apiURL, "url", "u", "", "URL of the local API")
-	cmdMachinesAdd.Flags().BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password")
-	cmdMachinesAdd.Flags().BoolVarP(&autoAdd, "auto", "a", false, "automatically generate password (and username if not provided)")
-	cmdMachinesAdd.Flags().BoolVar(&forceAdd, "force", false, "will force add the machine if it already exist")
-	cmdMachines.AddCommand(cmdMachinesAdd)
-
-	var cmdMachinesDelete = &cobra.Command{
-		Use:               "delete --machine MyTestMachine",
+
+	interactive, err := flags.GetBool("interactive")
+	if err != nil {
+		return err
+	}
+
+	autoAdd, err := flags.GetBool("auto")
+	if err != nil {
+		return err
+	}
+
+	forceAdd, err := flags.GetBool("force")
+	if err != nil {
+		return err
+	}
+
+	var machineID string
+
+	// create machineID if not specified by user
+	if len(args) == 0 {
+		if !autoAdd {
+			printHelp(cmd)
+			return nil
+		}
+		machineID, err = generateID("")
+		if err != nil {
+			log.Fatalf("unable to generate machine id : %s", err)
+		}
+	} else {
+		machineID = args[0]
+	}
+
+	/*check if file already exists*/
+	if outputFile != "" {
+		dumpFile = outputFile
+	} else if csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" {
+		dumpFile = csConfig.API.Client.CredentialsFilePath
+	}
+
+	// create a password if it's not specified by user
+	if machinePassword == "" && !interactive {
+		if !autoAdd {
+			printHelp(cmd)
+			return nil
+		}
+		machinePassword = generatePassword(passwordLength)
+	} else if machinePassword == "" && interactive {
+		qs := &survey.Password{
+			Message: "Please provide a password for the machine",
+		}
+		survey.AskOne(qs, &machinePassword)
+	}
+	password := strfmt.Password(machinePassword)
+	_, err = dbClient.CreateMachine(&machineID, &password, "", true, forceAdd, types.PasswordAuthType)
+	if err != nil {
+		log.Fatalf("unable to create machine: %s", err)
+	}
+	log.Infof("Machine '%s' successfully added to the local API", machineID)
+
+	if apiURL == "" {
+		if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" {
+			apiURL = csConfig.API.Client.Credentials.URL
+		} else if csConfig.API.Server != nil && csConfig.API.Server.ListenURI != "" {
+			apiURL = "http://" + csConfig.API.Server.ListenURI
+		} else {
+			log.Fatalf("unable to dump an api URL. Please provide it in your configuration or with the -u parameter")
+		}
+	}
+	apiCfg := csconfig.ApiCredentialsCfg{
+		Login:    machineID,
+		Password: password.String(),
+		URL:      apiURL,
+	}
+	apiConfigDump, err := yaml.Marshal(apiCfg)
+	if err != nil {
+		log.Fatalf("unable to marshal api credentials: %s", err)
+	}
+	if dumpFile != "" && dumpFile != "-" {
+		err = os.WriteFile(dumpFile, apiConfigDump, 0644)
+		if err != nil {
+			log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err)
+		}
+		log.Printf("API credentials dumped to '%s'", dumpFile)
+	} else {
+		fmt.Printf("%s\n", string(apiConfigDump))
+	}
+
+	return nil
+}
+
+func NewMachinesDeleteCmd() *cobra.Command {
+	cmdMachinesDelete := &cobra.Command{
+		Use:               "delete [machine_name]...",
 		Short:             "delete machines",
 		Short:             "delete machines",
-		Example:           `cscli machines delete "machine_name"`,
+		Example:           `cscli machines delete "machine1" "machine2"`,
 		Args:              cobra.MinimumNArgs(1),
 		Args:              cobra.MinimumNArgs(1),
 		Aliases:           []string{"remove"},
 		Aliases:           []string{"remove"},
 		DisableAutoGenTag: true,
 		DisableAutoGenTag: true,
@@ -330,22 +344,27 @@ cscli machines add MyTestMachine --password MyPassword
 			}
 			}
 			return ret, cobra.ShellCompDirectiveNoFileComp
 			return ret, cobra.ShellCompDirectiveNoFileComp
 		},
 		},
-		Run: func(cmd *cobra.Command, args []string) {
-			machineID = args[0]
-			for _, machineID := range args {
-				err := dbClient.DeleteWatcher(machineID)
-				if err != nil {
-					log.Errorf("unable to delete machine '%s': %s", machineID, err)
-					return
-				}
-				log.Infof("machine '%s' deleted successfully", machineID)
-			}
-		},
+		RunE: runMachinesDelete,
 	}
 	}
-	cmdMachinesDelete.Flags().StringVarP(&machineID, "machine", "m", "", "machine to delete")
-	cmdMachines.AddCommand(cmdMachinesDelete)
 
 
-	var cmdMachinesValidate = &cobra.Command{
+	return cmdMachinesDelete
+}
+
+func runMachinesDelete(cmd *cobra.Command, args []string) error {
+	for _, machineID := range args {
+		err := dbClient.DeleteWatcher(machineID)
+		if err != nil {
+			log.Errorf("unable to delete machine '%s': %s", machineID, err)
+			return nil
+		}
+		log.Infof("machine '%s' deleted successfully", machineID)
+	}
+
+	return nil
+}
+
+func NewMachinesValidateCmd() *cobra.Command {
+	cmdMachinesValidate := &cobra.Command{
 		Use:               "validate",
 		Use:               "validate",
 		Short:             "validate a machine to access the local API",
 		Short:             "validate a machine to access the local API",
 		Long:              `validate a machine to access the local API.`,
 		Long:              `validate a machine to access the local API.`,
@@ -360,14 +379,41 @@ cscli machines add MyTestMachine --password MyPassword
 			}
 			}
 		},
 		},
 		Run: func(cmd *cobra.Command, args []string) {
 		Run: func(cmd *cobra.Command, args []string) {
-			machineID = args[0]
+			machineID := args[0]
 			if err := dbClient.ValidateMachine(machineID); err != nil {
 			if err := dbClient.ValidateMachine(machineID); err != nil {
 				log.Fatalf("unable to validate machine '%s': %s", machineID, err)
 				log.Fatalf("unable to validate machine '%s': %s", machineID, err)
 			}
 			}
 			log.Infof("machine '%s' validated successfully", machineID)
 			log.Infof("machine '%s' validated successfully", machineID)
 		},
 		},
 	}
 	}
-	cmdMachines.AddCommand(cmdMachinesValidate)
+
+	return cmdMachinesValidate
+}
+
+func NewMachinesCmd() *cobra.Command {
+	var cmdMachines = &cobra.Command{
+		Use:   "machines [action]",
+		Short: "Manage local API machines [requires local API]",
+		Long: `To list/add/delete/validate machines.
+Note: This command requires database direct access, so is intended to be run on the local API machine.
+`,
+		Example:           `cscli machines [action]`,
+		DisableAutoGenTag: true,
+		Aliases:           []string{"machine"},
+		PersistentPreRun: func(cmd *cobra.Command, args []string) {
+			if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
+				if err != nil {
+					log.Errorf("local api : %s", err)
+				}
+				log.Fatal("Local API is disabled, please run this command on the local API machine")
+			}
+		},
+	}
+
+	cmdMachines.AddCommand(NewMachinesListCmd())
+	cmdMachines.AddCommand(NewMachinesAddCmd())
+	cmdMachines.AddCommand(NewMachinesDeleteCmd())
+	cmdMachines.AddCommand(NewMachinesValidateCmd())
 
 
 	return cmdMachines
 	return cmdMachines
 }
 }

+ 20 - 4
cmd/crowdsec-cli/main.go

@@ -18,6 +18,7 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/database"
 	"github.com/crowdsecurity/crowdsec/pkg/database"
+	"github.com/crowdsecurity/crowdsec/pkg/fflag"
 )
 )
 
 
 var bincoverTesting = ""
 var bincoverTesting = ""
@@ -35,7 +36,6 @@ var downloadOnly bool
 var forceAction bool
 var forceAction bool
 var purge bool
 var purge bool
 var all bool
 var all bool
-var restoreOldBackup bool
 
 
 var prometheusURL string
 var prometheusURL string
 
 
@@ -52,11 +52,9 @@ func initConfig() {
 	} else if err_lvl {
 	} else if err_lvl {
 		log.SetLevel(log.ErrorLevel)
 		log.SetLevel(log.ErrorLevel)
 	}
 	}
-	logFormatter := &log.TextFormatter{TimestampFormat: "02-01-2006 15:04:05", FullTimestamp: true}
-	log.SetFormatter(logFormatter)
 
 
 	if !inSlice(os.Args[1], NoNeedConfig) {
 	if !inSlice(os.Args[1], NoNeedConfig) {
-		csConfig, err = csconfig.NewConfig(ConfigFilePath, false, false)
+		csConfig, err = csconfig.NewConfig(ConfigFilePath, false, false, true)
 		if err != nil {
 		if err != nil {
 			log.Fatal(err)
 			log.Fatal(err)
 		}
 		}
@@ -68,6 +66,11 @@ func initConfig() {
 		csConfig = csconfig.NewDefaultConfig()
 		csConfig = csconfig.NewDefaultConfig()
 	}
 	}
 
 
+	featurePath := filepath.Join(csConfig.ConfigPaths.ConfigDir, "feature.yaml")
+	if err = fflag.Crowdsec.SetFromYamlFile(featurePath, log.StandardLogger()); err != nil {
+		log.Fatalf("File %s: %s", featurePath, err)
+	}
+
 	if csConfig.Cscli == nil {
 	if csConfig.Cscli == nil {
 		log.Fatalf("missing 'cscli' configuration in '%s', exiting", ConfigFilePath)
 		log.Fatalf("missing 'cscli' configuration in '%s', exiting", ConfigFilePath)
 	}
 	}
@@ -130,6 +133,19 @@ var (
 )
 )
 
 
 func main() {
 func main() {
+	// set the formatter asap and worry about level later
+	logFormatter := &log.TextFormatter{TimestampFormat: "02-01-2006 15:04:05", FullTimestamp: true}
+	log.SetFormatter(logFormatter)
+
+	if err := fflag.RegisterAllFeatures(); err != nil {
+		log.Fatalf("failed to register features: %s", err)
+	}
+
+	// some features can require configuration or command-line options,
+	// so we need to parse them asap. we'll load from feature.yaml later.
+	if err := fflag.Crowdsec.SetFromEnv(log.StandardLogger()); err != nil {
+		log.Fatalf("failed to set features from environment: %s", err)
+	}
 
 
 	var rootCmd = &cobra.Command{
 	var rootCmd = &cobra.Command{
 		Use:   "cscli",
 		Use:   "cscli",

+ 14 - 2
cmd/crowdsec-cli/metrics.go

@@ -57,6 +57,10 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
 	lapi_bouncer_stats := map[string]map[string]map[string]int{}
 	lapi_bouncer_stats := map[string]map[string]map[string]int{}
 	decisions_stats := map[string]map[string]map[string]int{}
 	decisions_stats := map[string]map[string]map[string]int{}
 	alerts_stats := map[string]int{}
 	alerts_stats := map[string]int{}
+	stash_stats := map[string]struct {
+		Type  string
+		Count int
+	}{}
 
 
 	for idx, fam := range result {
 	for idx, fam := range result {
 		if !strings.HasPrefix(fam.Name, "cs_") {
 		if !strings.HasPrefix(fam.Name, "cs_") {
@@ -93,6 +97,8 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
 			origin := metric.Labels["origin"]
 			origin := metric.Labels["origin"]
 			action := metric.Labels["action"]
 			action := metric.Labels["action"]
 
 
+			mtype := metric.Labels["type"]
+
 			fval, err := strconv.ParseFloat(value, 32)
 			fval, err := strconv.ParseFloat(value, 32)
 			if err != nil {
 			if err != nil {
 				log.Errorf("Unexpected int value %s : %s", value, err)
 				log.Errorf("Unexpected int value %s : %s", value, err)
@@ -208,6 +214,11 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
 					alerts_stats[scenario] = make(map[string]int)
 					alerts_stats[scenario] = make(map[string]int)
 				}*/
 				}*/
 				alerts_stats[reason] += ival
 				alerts_stats[reason] += ival
+			case "cs_cache_size":
+				stash_stats[name] = struct {
+					Type  string
+					Count int
+				}{Type: mtype, Count: ival}
 			default:
 			default:
 				continue
 				continue
 			}
 			}
@@ -225,8 +236,9 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
 		lapiDecisionStatsTable(out, lapi_decisions_stats)
 		lapiDecisionStatsTable(out, lapi_decisions_stats)
 		decisionStatsTable(out, decisions_stats)
 		decisionStatsTable(out, decisions_stats)
 		alertStatsTable(out, alerts_stats)
 		alertStatsTable(out, alerts_stats)
+		stashStatsTable(out, stash_stats)
 	} else if formatType == "json" {
 	} else if formatType == "json" {
-		for _, val := range []interface{}{acquis_stats, parsers_stats, buckets_stats, lapi_stats, lapi_bouncer_stats, lapi_machine_stats, lapi_decisions_stats, decisions_stats, alerts_stats} {
+		for _, val := range []interface{}{acquis_stats, parsers_stats, buckets_stats, lapi_stats, lapi_bouncer_stats, lapi_machine_stats, lapi_decisions_stats, decisions_stats, alerts_stats, stash_stats} {
 			x, err := json.MarshalIndent(val, "", " ")
 			x, err := json.MarshalIndent(val, "", " ")
 			if err != nil {
 			if err != nil {
 				return fmt.Errorf("failed to unmarshal metrics : %v", err)
 				return fmt.Errorf("failed to unmarshal metrics : %v", err)
@@ -236,7 +248,7 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
 		return nil
 		return nil
 
 
 	} else if formatType == "raw" {
 	} else if formatType == "raw" {
-		for _, val := range []interface{}{acquis_stats, parsers_stats, buckets_stats, lapi_stats, lapi_bouncer_stats, lapi_machine_stats, lapi_decisions_stats, decisions_stats, alerts_stats} {
+		for _, val := range []interface{}{acquis_stats, parsers_stats, buckets_stats, lapi_stats, lapi_bouncer_stats, lapi_machine_stats, lapi_decisions_stats, decisions_stats, alerts_stats, stash_stats} {
 			x, err := yaml.Marshal(val)
 			x, err := yaml.Marshal(val)
 			if err != nil {
 			if err != nil {
 				return fmt.Errorf("failed to unmarshal metrics : %v", err)
 				return fmt.Errorf("failed to unmarshal metrics : %v", err)

+ 35 - 0
cmd/crowdsec-cli/metrics_table.go

@@ -129,6 +129,41 @@ func parserStatsTable(out io.Writer, stats map[string]map[string]int) {
 	}
 	}
 }
 }
 
 
+func stashStatsTable(out io.Writer, stats map[string]struct {
+	Type  string
+	Count int
+}) {
+
+	t := newTable(out)
+	t.SetRowLines(false)
+	t.SetHeaders("Name", "Type", "Items")
+	t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft)
+
+	// unfortunately, we can't reuse metricsToTable as the structure is too different :/
+	sortedKeys := []string{}
+	for k := range stats {
+		sortedKeys = append(sortedKeys, k)
+	}
+	sort.Strings(sortedKeys)
+
+	numRows := 0
+	for _, alabel := range sortedKeys {
+		astats := stats[alabel]
+
+		row := []string{
+			alabel,
+			astats.Type,
+			fmt.Sprintf("%d", astats.Count),
+		}
+		t.AddRow(row...)
+		numRows++
+	}
+	if numRows > 0 {
+		renderTableTitle(out, "\nParser Stash Metrics:")
+		t.Render()
+	}
+}
+
 func lapiStatsTable(out io.Writer, stats map[string]map[string]int) {
 func lapiStatsTable(out io.Writer, stats map[string]map[string]int) {
 	t := newTable(out)
 	t := newTable(out)
 	t.SetRowLines(false)
 	t.SetRowLines(false)

+ 76 - 48
cmd/crowdsec-cli/postoverflows.go

@@ -10,46 +10,10 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 )
 )
 
 
-func NewPostOverflowsCmd() *cobra.Command {
-	var cmdPostOverflows = &cobra.Command{
-		Use:   "postoverflows [action] [config]",
-		Short: "Install/Remove/Upgrade/Inspect postoverflow(s) from hub",
-		Example: `cscli postoverflows install crowdsecurity/cdn-whitelist
-		cscli postoverflows inspect crowdsecurity/cdn-whitelist
-		cscli postoverflows upgrade crowdsecurity/cdn-whitelist
-		cscli postoverflows list
-		cscli postoverflows remove crowdsecurity/cdn-whitelist`,
-		Args:              cobra.MinimumNArgs(1),
-		Aliases:           []string{"postoverflow"},
-		DisableAutoGenTag: true,
-		PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
-			if err := csConfig.LoadHub(); err != nil {
-				log.Fatal(err)
-			}
-			if csConfig.Hub == nil {
-				return fmt.Errorf("you must configure cli before interacting with hub")
-			}
-
-			if err := cwhub.SetHubBranch(); err != nil {
-				return fmt.Errorf("error while setting hub branch: %s", err)
-			}
-
-			if err := cwhub.GetHubIdx(csConfig.Hub); err != nil {
-				log.Info("Run 'sudo cscli hub update' to get the hub index")
-				log.Fatalf("Failed to get Hub index : %v", err)
-			}
-			return nil
-		},
-		PersistentPostRun: func(cmd *cobra.Command, args []string) {
-			if cmd.Name() == "inspect" || cmd.Name() == "list" {
-				return
-			}
-			log.Infof(ReloadMessage())
-		},
-	}
-
+func NewPostOverflowsInstallCmd() *cobra.Command {
 	var ignoreError bool
 	var ignoreError bool
-	var cmdPostOverflowsInstall = &cobra.Command{
+
+	cmdPostOverflowsInstall := &cobra.Command{
 		Use:               "install [config]",
 		Use:               "install [config]",
 		Short:             "Install given postoverflow(s)",
 		Short:             "Install given postoverflow(s)",
 		Long:              `Fetch and install given postoverflow(s) from hub`,
 		Long:              `Fetch and install given postoverflow(s) from hub`,
@@ -77,12 +41,16 @@ func NewPostOverflowsCmd() *cobra.Command {
 			}
 			}
 		},
 		},
 	}
 	}
+
 	cmdPostOverflowsInstall.PersistentFlags().BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable")
 	cmdPostOverflowsInstall.PersistentFlags().BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable")
 	cmdPostOverflowsInstall.PersistentFlags().BoolVar(&forceAction, "force", false, "Force install : Overwrite tainted and outdated files")
 	cmdPostOverflowsInstall.PersistentFlags().BoolVar(&forceAction, "force", false, "Force install : Overwrite tainted and outdated files")
 	cmdPostOverflowsInstall.PersistentFlags().BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple postoverflows")
 	cmdPostOverflowsInstall.PersistentFlags().BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple postoverflows")
-	cmdPostOverflows.AddCommand(cmdPostOverflowsInstall)
 
 
-	var cmdPostOverflowsRemove = &cobra.Command{
+	return cmdPostOverflowsInstall
+}
+
+func NewPostOverflowsRemoveCmd() *cobra.Command {
+	cmdPostOverflowsRemove := &cobra.Command{
 		Use:               "remove [config]",
 		Use:               "remove [config]",
 		Short:             "Remove given postoverflow(s)",
 		Short:             "Remove given postoverflow(s)",
 		Long:              `remove given postoverflow(s)`,
 		Long:              `remove given postoverflow(s)`,
@@ -107,12 +75,16 @@ func NewPostOverflowsCmd() *cobra.Command {
 			}
 			}
 		},
 		},
 	}
 	}
+
 	cmdPostOverflowsRemove.PersistentFlags().BoolVar(&purge, "purge", false, "Delete source file too")
 	cmdPostOverflowsRemove.PersistentFlags().BoolVar(&purge, "purge", false, "Delete source file too")
 	cmdPostOverflowsRemove.PersistentFlags().BoolVar(&forceAction, "force", false, "Force remove : Remove tainted and outdated files")
 	cmdPostOverflowsRemove.PersistentFlags().BoolVar(&forceAction, "force", false, "Force remove : Remove tainted and outdated files")
 	cmdPostOverflowsRemove.PersistentFlags().BoolVar(&all, "all", false, "Delete all the postoverflows")
 	cmdPostOverflowsRemove.PersistentFlags().BoolVar(&all, "all", false, "Delete all the postoverflows")
-	cmdPostOverflows.AddCommand(cmdPostOverflowsRemove)
 
 
-	var cmdPostOverflowsUpgrade = &cobra.Command{
+	return cmdPostOverflowsRemove
+}
+
+func NewPostOverflowsUpgradeCmd() *cobra.Command {
+	cmdPostOverflowsUpgrade := &cobra.Command{
 		Use:               "upgrade [config]",
 		Use:               "upgrade [config]",
 		Short:             "Upgrade given postoverflow(s)",
 		Short:             "Upgrade given postoverflow(s)",
 		Long:              `Fetch and Upgrade given postoverflow(s) from hub`,
 		Long:              `Fetch and Upgrade given postoverflow(s) from hub`,
@@ -134,11 +106,15 @@ func NewPostOverflowsCmd() *cobra.Command {
 			}
 			}
 		},
 		},
 	}
 	}
+
 	cmdPostOverflowsUpgrade.PersistentFlags().BoolVarP(&all, "all", "a", false, "Upgrade all the postoverflows")
 	cmdPostOverflowsUpgrade.PersistentFlags().BoolVarP(&all, "all", "a", false, "Upgrade all the postoverflows")
 	cmdPostOverflowsUpgrade.PersistentFlags().BoolVar(&forceAction, "force", false, "Force upgrade : Overwrite tainted and outdated files")
 	cmdPostOverflowsUpgrade.PersistentFlags().BoolVar(&forceAction, "force", false, "Force upgrade : Overwrite tainted and outdated files")
-	cmdPostOverflows.AddCommand(cmdPostOverflowsUpgrade)
 
 
-	var cmdPostOverflowsInspect = &cobra.Command{
+	return cmdPostOverflowsUpgrade
+}
+
+func NewPostOverflowsInspectCmd() *cobra.Command {
+	cmdPostOverflowsInspect := &cobra.Command{
 		Use:               "inspect [config]",
 		Use:               "inspect [config]",
 		Short:             "Inspect given postoverflow",
 		Short:             "Inspect given postoverflow",
 		Long:              `Inspect given postoverflow`,
 		Long:              `Inspect given postoverflow`,
@@ -152,9 +128,12 @@ func NewPostOverflowsCmd() *cobra.Command {
 			InspectItem(args[0], cwhub.PARSERS_OVFLW)
 			InspectItem(args[0], cwhub.PARSERS_OVFLW)
 		},
 		},
 	}
 	}
-	cmdPostOverflows.AddCommand(cmdPostOverflowsInspect)
 
 
-	var cmdPostOverflowsList = &cobra.Command{
+	return cmdPostOverflowsInspect
+}
+
+func NewPostOverflowsListCmd() *cobra.Command {
+	cmdPostOverflowsList := &cobra.Command{
 		Use:   "list [config]",
 		Use:   "list [config]",
 		Short: "List all postoverflows or given one",
 		Short: "List all postoverflows or given one",
 		Long:  `List all postoverflows or given one`,
 		Long:  `List all postoverflows or given one`,
@@ -165,8 +144,57 @@ cscli postoverflows list crowdsecurity/xxx`,
 			ListItems(color.Output, []string{cwhub.PARSERS_OVFLW}, args, false, true, all)
 			ListItems(color.Output, []string{cwhub.PARSERS_OVFLW}, args, false, true, all)
 		},
 		},
 	}
 	}
+
 	cmdPostOverflowsList.PersistentFlags().BoolVarP(&all, "all", "a", false, "List disabled items as well")
 	cmdPostOverflowsList.PersistentFlags().BoolVarP(&all, "all", "a", false, "List disabled items as well")
-	cmdPostOverflows.AddCommand(cmdPostOverflowsList)
+
+	return cmdPostOverflowsList
+}
+
+
+
+func NewPostOverflowsCmd() *cobra.Command {
+	cmdPostOverflows := &cobra.Command{
+		Use:   "postoverflows [action] [config]",
+		Short: "Install/Remove/Upgrade/Inspect postoverflow(s) from hub",
+		Example: `cscli postoverflows install crowdsecurity/cdn-whitelist
+		cscli postoverflows inspect crowdsecurity/cdn-whitelist
+		cscli postoverflows upgrade crowdsecurity/cdn-whitelist
+		cscli postoverflows list
+		cscli postoverflows remove crowdsecurity/cdn-whitelist`,
+		Args:              cobra.MinimumNArgs(1),
+		Aliases:           []string{"postoverflow"},
+		DisableAutoGenTag: true,
+		PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
+			if err := csConfig.LoadHub(); err != nil {
+				log.Fatal(err)
+			}
+			if csConfig.Hub == nil {
+				return fmt.Errorf("you must configure cli before interacting with hub")
+			}
+
+			if err := cwhub.SetHubBranch(); err != nil {
+				return fmt.Errorf("error while setting hub branch: %s", err)
+			}
+
+			if err := cwhub.GetHubIdx(csConfig.Hub); err != nil {
+				log.Info("Run 'sudo cscli hub update' to get the hub index")
+				log.Fatalf("Failed to get Hub index : %v", err)
+			}
+			return nil
+		},
+		PersistentPostRun: func(cmd *cobra.Command, args []string) {
+			if cmd.Name() == "inspect" || cmd.Name() == "list" {
+				return
+			}
+			log.Infof(ReloadMessage())
+		},
+	}
+
+	cmdPostOverflows.AddCommand(NewPostOverflowsInstallCmd())
+	cmdPostOverflows.AddCommand(NewPostOverflowsRemoveCmd())
+	cmdPostOverflows.AddCommand(NewPostOverflowsUpgradeCmd())
+	cmdPostOverflows.AddCommand(NewPostOverflowsInspectCmd())
+	cmdPostOverflows.AddCommand(NewPostOverflowsListCmd())
 
 
 	return cmdPostOverflows
 	return cmdPostOverflows
 }
 }

+ 19 - 4
cmd/crowdsec-cli/support.go

@@ -22,6 +22,7 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/database"
 	"github.com/crowdsecurity/crowdsec/pkg/database"
+	"github.com/crowdsecurity/crowdsec/pkg/fflag"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 )
 )
@@ -30,6 +31,7 @@ const (
 	SUPPORT_METRICS_HUMAN_PATH           = "metrics/metrics.human"
 	SUPPORT_METRICS_HUMAN_PATH           = "metrics/metrics.human"
 	SUPPORT_METRICS_PROMETHEUS_PATH      = "metrics/metrics.prometheus"
 	SUPPORT_METRICS_PROMETHEUS_PATH      = "metrics/metrics.prometheus"
 	SUPPORT_VERSION_PATH                 = "version.txt"
 	SUPPORT_VERSION_PATH                 = "version.txt"
+	SUPPORT_FEATURES_PATH                = "features.txt"
 	SUPPORT_OS_INFO_PATH                 = "osinfo.txt"
 	SUPPORT_OS_INFO_PATH                 = "osinfo.txt"
 	SUPPORT_PARSERS_PATH                 = "hub/parsers.txt"
 	SUPPORT_PARSERS_PATH                 = "hub/parsers.txt"
 	SUPPORT_SCENARIOS_PATH               = "hub/scenarios.txt"
 	SUPPORT_SCENARIOS_PATH               = "hub/scenarios.txt"
@@ -89,6 +91,18 @@ func collectVersion() []byte {
 	return []byte(cwversion.ShowStr())
 	return []byte(cwversion.ShowStr())
 }
 }
 
 
+func collectFeatures() []byte {
+	log.Info("Collecting feature flags")
+	enabledFeatures := fflag.Crowdsec.GetEnabledFeatures()
+
+	w := bytes.NewBuffer(nil)
+	for _, k := range enabledFeatures {
+		fmt.Fprintf(w, "%s\n", k)
+	}
+	return w.Bytes()
+}
+
+
 func collectOSInfo() ([]byte, error) {
 func collectOSInfo() ([]byte, error) {
 	log.Info("Collecting OS info")
 	log.Info("Collecting OS info")
 	info, err := osinfo.GetOSInfo()
 	info, err := osinfo.GetOSInfo()
@@ -264,6 +278,7 @@ cscli support dump -f /tmp/crowdsec-support.zip
 			var skipHub, skipDB, skipCAPI, skipLAPI, skipAgent bool
 			var skipHub, skipDB, skipCAPI, skipLAPI, skipAgent bool
 			infos := map[string][]byte{
 			infos := map[string][]byte{
 				SUPPORT_VERSION_PATH: collectVersion(),
 				SUPPORT_VERSION_PATH: collectVersion(),
+				SUPPORT_FEATURES_PATH: collectFeatures(),
 			}
 			}
 
 
 			if outFile == "" {
 			if outFile == "" {
@@ -271,7 +286,6 @@ cscli support dump -f /tmp/crowdsec-support.zip
 			}
 			}
 
 
 			dbClient, err = database.NewClient(csConfig.DbConfig)
 			dbClient, err = database.NewClient(csConfig.DbConfig)
-
 			if err != nil {
 			if err != nil {
 				log.Warnf("Could not connect to database: %s", err)
 				log.Warnf("Could not connect to database: %s", err)
 				skipDB = true
 				skipDB = true
@@ -291,7 +305,6 @@ cscli support dump -f /tmp/crowdsec-support.zip
 			}
 			}
 
 
 			err = initHub()
 			err = initHub()
-
 			if err != nil {
 			if err != nil {
 				log.Warn("Could not init hub, running on LAPI ? Hub related information will not be collected")
 				log.Warn("Could not init hub, running on LAPI ? Hub related information will not be collected")
 				skipHub = true
 				skipHub = true
@@ -309,7 +322,7 @@ cscli support dump -f /tmp/crowdsec-support.zip
 				skipLAPI = true
 				skipLAPI = true
 			}
 			}
 
 
-			if csConfig.API.Server == nil || csConfig.API.Server.OnlineClient.Credentials == nil {
+			if csConfig.API.Server == nil || csConfig.API.Server.OnlineClient == nil || csConfig.API.Server.OnlineClient.Credentials == nil {
 				log.Warn("no CAPI credentials found, skipping CAPI connectivity check")
 				log.Warn("no CAPI credentials found, skipping CAPI connectivity check")
 				skipCAPI = true
 				skipCAPI = true
 			}
 			}
@@ -322,7 +335,6 @@ cscli support dump -f /tmp/crowdsec-support.zip
 			}
 			}
 
 
 			infos[SUPPORT_OS_INFO_PATH], err = collectOSInfo()
 			infos[SUPPORT_OS_INFO_PATH], err = collectOSInfo()
-
 			if err != nil {
 			if err != nil {
 				log.Warnf("could not collect OS information: %s", err)
 				log.Warnf("could not collect OS information: %s", err)
 				infos[SUPPORT_OS_INFO_PATH] = []byte(err.Error())
 				infos[SUPPORT_OS_INFO_PATH] = []byte(err.Error())
@@ -389,14 +401,17 @@ cscli support dump -f /tmp/crowdsec-support.zip
 				}
 				}
 				fw.Write([]byte(types.StripAnsiString(string(data))))
 				fw.Write([]byte(types.StripAnsiString(string(data))))
 			}
 			}
+
 			err = zipWriter.Close()
 			err = zipWriter.Close()
 			if err != nil {
 			if err != nil {
 				log.Fatalf("could not finalize zip file: %s", err)
 				log.Fatalf("could not finalize zip file: %s", err)
 			}
 			}
+
 			err = os.WriteFile(outFile, w.Bytes(), 0600)
 			err = os.WriteFile(outFile, w.Bytes(), 0600)
 			if err != nil {
 			if err != nil {
 				log.Fatalf("could not write zip file to %s: %s", outFile, err)
 				log.Fatalf("could not write zip file to %s: %s", outFile, err)
 			}
 			}
+
 			log.Infof("Written zip file to %s", outFile)
 			log.Infof("Written zip file to %s", outFile)
 		},
 		},
 	}
 	}

+ 25 - 3
cmd/crowdsec-cli/utils.go

@@ -739,12 +739,34 @@ func getDBClient() (*database.Client, error) {
 	if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
 	if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
 		return nil, err
 		return nil, err
 	}
 	}
-	if err := csConfig.LoadDBConfig(); err != nil {
-		return nil, err
-	}
 	ret, err := database.NewClient(csConfig.DbConfig)
 	ret, err := database.NewClient(csConfig.DbConfig)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	return ret, nil
 	return ret, nil
 }
 }
+
+
+func removeFromSlice(val string, slice []string) []string {
+	var i int
+	var value string
+
+	valueFound := false
+
+	// get the index
+	for i, value = range slice {
+		if value == val {
+			valueFound = true
+			break
+		}
+	}
+
+	if valueFound {
+		slice[i] = slice[len(slice)-1]
+		slice[len(slice)-1] = ""
+		slice = slice[:len(slice)-1]
+	}
+
+	return slice
+
+}

+ 1 - 1
cmd/crowdsec/crowdsec.go

@@ -28,7 +28,7 @@ func initCrowdsec(cConfig *csconfig.Config) (*parser.Parsers, error) {
 	}
 	}
 
 
 	// Start loading configs
 	// Start loading configs
-	csParsers := newParsers()
+	csParsers := parser.NewParsers()
 	if csParsers, err = parser.LoadParsers(cConfig, csParsers); err != nil {
 	if csParsers, err = parser.LoadParsers(cConfig, csParsers); err != nil {
 		return &parser.Parsers{}, fmt.Errorf("Failed to load parsers: %s", err)
 		return &parser.Parsers{}, fmt.Errorf("Failed to load parsers: %s", err)
 	}
 	}

+ 47 - 41
cmd/crowdsec/main.go

@@ -5,8 +5,8 @@ import (
 	"fmt"
 	"fmt"
 	_ "net/http/pprof"
 	_ "net/http/pprof"
 	"os"
 	"os"
+	"path/filepath"
 	"runtime"
 	"runtime"
-	"sort"
 	"strings"
 	"strings"
 	"time"
 	"time"
 
 
@@ -20,6 +20,7 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/csplugin"
 	"github.com/crowdsecurity/crowdsec/pkg/csplugin"
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
+	"github.com/crowdsecurity/crowdsec/pkg/fflag"
 	"github.com/crowdsecurity/crowdsec/pkg/leakybucket"
 	"github.com/crowdsecurity/crowdsec/pkg/leakybucket"
 	"github.com/crowdsecurity/crowdsec/pkg/parser"
 	"github.com/crowdsecurity/crowdsec/pkg/parser"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
@@ -70,45 +71,6 @@ type Flags struct {
 
 
 type labelsMap map[string]string
 type labelsMap map[string]string
 
 
-// Return new parsers
-// nodes and povfwnodes are already initialized in parser.LoadStages
-func newParsers() *parser.Parsers {
-	parsers := &parser.Parsers{
-		Ctx:             &parser.UnixParserCtx{},
-		Povfwctx:        &parser.UnixParserCtx{},
-		StageFiles:      make([]parser.Stagefile, 0),
-		PovfwStageFiles: make([]parser.Stagefile, 0),
-	}
-	for _, itemType := range []string{cwhub.PARSERS, cwhub.PARSERS_OVFLW} {
-		for _, hubParserItem := range cwhub.GetItemMap(itemType) {
-			if hubParserItem.Installed {
-				stagefile := parser.Stagefile{
-					Filename: hubParserItem.LocalPath,
-					Stage:    hubParserItem.Stage,
-				}
-				if itemType == cwhub.PARSERS {
-					parsers.StageFiles = append(parsers.StageFiles, stagefile)
-				}
-				if itemType == cwhub.PARSERS_OVFLW {
-					parsers.PovfwStageFiles = append(parsers.PovfwStageFiles, stagefile)
-				}
-			}
-		}
-	}
-	if parsers.StageFiles != nil {
-		sort.Slice(parsers.StageFiles, func(i, j int) bool {
-			return parsers.StageFiles[i].Filename < parsers.StageFiles[j].Filename
-		})
-	}
-	if parsers.PovfwStageFiles != nil {
-		sort.Slice(parsers.PovfwStageFiles, func(i, j int) bool {
-			return parsers.PovfwStageFiles[i].Filename < parsers.PovfwStageFiles[j].Filename
-		})
-	}
-
-	return parsers
-}
-
 func LoadBuckets(cConfig *csconfig.Config) error {
 func LoadBuckets(cConfig *csconfig.Config) error {
 	var (
 	var (
 		err   error
 		err   error
@@ -223,7 +185,7 @@ func newLogLevel(curLevelPtr *log.Level, f *Flags) *log.Level {
 	default:
 	default:
 	}
 	}
 
 
-	if ret == *curLevelPtr {
+	if curLevelPtr != nil && ret == *curLevelPtr {
 		// avoid returning a new ptr to the same value
 		// avoid returning a new ptr to the same value
 		return curLevelPtr
 		return curLevelPtr
 	}
 	}
@@ -232,6 +194,10 @@ func newLogLevel(curLevelPtr *log.Level, f *Flags) *log.Level {
 
 
 // LoadConfig returns a configuration parsed from configuration file
 // LoadConfig returns a configuration parsed from configuration file
 func LoadConfig(cConfig *csconfig.Config) error {
 func LoadConfig(cConfig *csconfig.Config) error {
+	if (cConfig.Common == nil || *cConfig.Common == csconfig.CommonCfg{}) {
+		return fmt.Errorf("unable to load configuration: common section is empty")
+	}
+
 	cConfig.Common.LogLevel = newLogLevel(cConfig.Common.LogLevel, flags)
 	cConfig.Common.LogLevel = newLogLevel(cConfig.Common.LogLevel, flags)
 
 
 	if dumpFolder != "" {
 	if dumpFolder != "" {
@@ -295,9 +261,39 @@ func LoadConfig(cConfig *csconfig.Config) error {
 		return err
 		return err
 	}
 	}
 
 
+	err := LoadFeatureFlags(cConfig, log.StandardLogger())
+	if err != nil {
+		return err
+	}
+
 	return nil
 	return nil
 }
 }
 
 
+
+// LoadFeatureFlags parses {ConfigDir}/feature.yaml to enable/disable features.
+//
+// Since CROWDSEC_FEATURE_ envvars are parsed before config.yaml,
+// when the logger is not yet initialized, we also log here a recap
+// of what has been enabled.
+func LoadFeatureFlags(cConfig *csconfig.Config, logger *log.Logger) error {
+	featurePath := filepath.Join(cConfig.ConfigPaths.ConfigDir, "feature.yaml")
+
+	if err := fflag.Crowdsec.SetFromYamlFile(featurePath, logger); err != nil {
+		return fmt.Errorf("file %s: %s", featurePath, err)
+	}
+
+	enabledFeatures := fflag.Crowdsec.GetEnabledFeatures()
+
+	msg := "<none>"
+	if len(enabledFeatures) > 0 {
+		msg = strings.Join(enabledFeatures, ", ")
+	}
+	logger.Infof("Enabled features: %s", msg)
+
+	return nil
+}
+
+
 // exitWithCode must be called right before the program termination,
 // exitWithCode must be called right before the program termination,
 // to allow measuring functional test coverage in case of abnormal exit.
 // to allow measuring functional test coverage in case of abnormal exit.
 //
 //
@@ -322,6 +318,16 @@ func exitWithCode(exitCode int, err error) {
 var crowdsecT0 time.Time
 var crowdsecT0 time.Time
 
 
 func main() {
 func main() {
+	if err := fflag.RegisterAllFeatures(); err != nil {
+		log.Fatalf("failed to register features: %s", err)
+	}
+
+	// some features can require configuration or command-line options,
+	// so wwe need to parse them asap. we'll load from feature.yaml later.
+	if err := fflag.Crowdsec.SetFromEnv(log.StandardLogger()); err != nil {
+		log.Fatalf("failed set features from environment: %s", err)
+	}
+
 	crowdsecT0 = time.Now()
 	crowdsecT0 = time.Now()
 
 
 	defer types.CatchPanic("crowdsec/main")
 	defer types.CatchPanic("crowdsec/main")

+ 9 - 2
cmd/crowdsec/metrics.go

@@ -5,6 +5,7 @@ import (
 	"time"
 	"time"
 
 
 	v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1"
 	v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1"
+	"github.com/crowdsecurity/crowdsec/pkg/cache"
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/database"
 	"github.com/crowdsecurity/crowdsec/pkg/database"
@@ -100,6 +101,10 @@ var globalPourHistogram = prometheus.NewHistogramVec(
 
 
 func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.HandlerFunc {
 func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.HandlerFunc {
 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		//update cache metrics (stash)
+		cache.UpdateCacheMetrics()
+
+		//decision metrics are only relevant for LAPI
 		if dbClient == nil {
 		if dbClient == nil {
 			next.ServeHTTP(w, r)
 			next.ServeHTTP(w, r)
 			return
 			return
@@ -160,7 +165,8 @@ func registerPrometheus(config *csconfig.PrometheusCfg) {
 			globalCsInfo, globalParsingHistogram, globalPourHistogram,
 			globalCsInfo, globalParsingHistogram, globalPourHistogram,
 			leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow,
 			leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow,
 			v1.LapiRouteHits,
 			v1.LapiRouteHits,
-			leaky.BucketsCurrentCount)
+			leaky.BucketsCurrentCount,
+			cache.CacheMetrics)
 	} else {
 	} else {
 		log.Infof("Loading prometheus collectors")
 		log.Infof("Loading prometheus collectors")
 		prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo,
 		prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo,
@@ -168,7 +174,8 @@ func registerPrometheus(config *csconfig.PrometheusCfg) {
 			globalCsInfo, globalParsingHistogram, globalPourHistogram,
 			globalCsInfo, globalParsingHistogram, globalPourHistogram,
 			v1.LapiRouteHits, v1.LapiMachineHits, v1.LapiBouncerHits, v1.LapiNilDecisions, v1.LapiNonNilDecisions, v1.LapiResponseTime,
 			v1.LapiRouteHits, v1.LapiMachineHits, v1.LapiBouncerHits, v1.LapiNilDecisions, v1.LapiNonNilDecisions, v1.LapiResponseTime,
 			leaky.BucketsPour, leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow, leaky.BucketsCurrentCount,
 			leaky.BucketsPour, leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow, leaky.BucketsCurrentCount,
-			globalActiveDecisions, globalAlerts)
+			globalActiveDecisions, globalAlerts,
+			cache.CacheMetrics)
 
 
 	}
 	}
 }
 }

+ 1 - 1
cmd/crowdsec/run_in_svc.go

@@ -30,7 +30,7 @@ func StartRunSvc() error {
 		},
 		},
 	})
 	})
 
 
-	cConfig, err = csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI)
+	cConfig, err = csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI, false)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}

+ 1 - 1
cmd/crowdsec/run_in_svc_windows.go

@@ -58,7 +58,7 @@ func WindowsRun() error {
 		err     error
 		err     error
 	)
 	)
 
 
-	cConfig, err = csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI)
+	cConfig, err = csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI, false)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}

+ 1 - 1
cmd/crowdsec/serve.go

@@ -54,7 +54,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) {
 	crowdsecTomb = tomb.Tomb{}
 	crowdsecTomb = tomb.Tomb{}
 	pluginTomb = tomb.Tomb{}
 	pluginTomb = tomb.Tomb{}
 
 
-	cConfig, err := csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI)
+	cConfig, err := csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI, false)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}

+ 1 - 1
cmd/crowdsec/win_service.go

@@ -97,7 +97,7 @@ func runService(name string) error {
 		log.Warnf("Failed to open event log: %s", err)
 		log.Warnf("Failed to open event log: %s", err)
 	}
 	}
 
 
-	cConfig, err := csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI)
+	cConfig, err := csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI, false)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}

+ 1 - 0
config/config.yaml

@@ -16,6 +16,7 @@ config_paths:
   notification_dir: /etc/crowdsec/notifications/
   notification_dir: /etc/crowdsec/notifications/
   plugin_dir: /usr/local/lib/crowdsec/plugins/
   plugin_dir: /usr/local/lib/crowdsec/plugins/
 crowdsec_service:
 crowdsec_service:
+  #console_context_path: /etc/crowdsec/console/context.yaml
   acquisition_path: /etc/crowdsec/acquis.yaml
   acquisition_path: /etc/crowdsec/acquis.yaml
   acquisition_dir: /etc/crowdsec/acquis.d
   acquisition_dir: /etc/crowdsec/acquis.d
   parser_routines: 1
   parser_routines: 1

+ 1 - 0
config/config_win.yaml

@@ -13,6 +13,7 @@ config_paths:
   plugin_dir: C:\ProgramData\CrowdSec\plugins\
   plugin_dir: C:\ProgramData\CrowdSec\plugins\
   notification_dir:  C:\ProgramData\CrowdSec\config\notifications\
   notification_dir:  C:\ProgramData\CrowdSec\config\notifications\
 crowdsec_service:
 crowdsec_service:
+  #console_context_path: C:\ProgramData\CrowdSec\console\context.yaml
   acquisition_path:  C:\ProgramData\CrowdSec\config\acquis.yaml
   acquisition_path:  C:\ProgramData\CrowdSec\config\acquis.yaml
   parser_routines: 1
   parser_routines: 1
 cscli:
 cscli:

+ 1 - 0
config/console.yaml

@@ -1,3 +1,4 @@
 share_manual_decisions: false
 share_manual_decisions: false
 share_custom: true
 share_custom: true
 share_tainted: true
 share_tainted: true
+share_context: false

+ 0 - 0
config/context.yaml


+ 2 - 0
config/crowdsec.service

@@ -9,6 +9,8 @@ ExecStartPre=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t
 ExecStart=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml
 ExecStart=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml
 #ExecStartPost=/bin/sleep 0.1
 #ExecStartPost=/bin/sleep 0.1
 ExecReload=/bin/kill -HUP $MAINPID
 ExecReload=/bin/kill -HUP $MAINPID
+Restart=always
+RestartSec=60
 
 
 [Install]
 [Install]
 WantedBy=multi-user.target
 WantedBy=multi-user.target

+ 2 - 1
debian/rules

@@ -28,7 +28,7 @@ override_dh_auto_install:
 	mkdir -p debian/crowdsec/usr/share/crowdsec
 	mkdir -p debian/crowdsec/usr/share/crowdsec
 	mkdir -p debian/crowdsec/etc/crowdsec/hub/
 	mkdir -p debian/crowdsec/etc/crowdsec/hub/
 	mkdir -p debian/crowdsec/usr/share/crowdsec/config
 	mkdir -p debian/crowdsec/usr/share/crowdsec/config
-
+	mkdir -p debian/crowdsec/etc/crowdsec/console/
 
 
 	mkdir -p debian/crowdsec/usr/lib/crowdsec/plugins/
 	mkdir -p debian/crowdsec/usr/lib/crowdsec/plugins/
 	mkdir -p debian/crowdsec/etc/crowdsec/notifications/
 	mkdir -p debian/crowdsec/etc/crowdsec/notifications/
@@ -44,6 +44,7 @@ override_dh_auto_install:
 	install -m 600 config/config.yaml debian/crowdsec/etc/crowdsec/config.yaml
 	install -m 600 config/config.yaml debian/crowdsec/etc/crowdsec/config.yaml
 	cp config/simulation.yaml debian/crowdsec/etc/crowdsec/simulation.yaml
 	cp config/simulation.yaml debian/crowdsec/etc/crowdsec/simulation.yaml
 	cp config/profiles.yaml debian/crowdsec/etc/crowdsec/profiles.yaml
 	cp config/profiles.yaml debian/crowdsec/etc/crowdsec/profiles.yaml
+	cp config/context.yaml debian/crowdsec/etc/crowdsec/console/context.yaml
 	cp config/console.yaml debian/crowdsec/etc/crowdsec/console.yaml
 	cp config/console.yaml debian/crowdsec/etc/crowdsec/console.yaml
 	cp -a config/patterns debian/crowdsec/etc/crowdsec
 	cp -a config/patterns debian/crowdsec/etc/crowdsec
 
 

+ 34 - 14
docker/README.md

@@ -179,7 +179,7 @@ To use environment variables, they should be in the format `BOUNCER_KEY_<name>=<
 
 
 To use Docker secrets, the secret should be named `bouncer_key_<name>` with a content of `<key>`. e.g. `bouncer_key_nginx` with content `mysecretkey12345`.
 To use Docker secrets, the secret should be named `bouncer_key_<name>` with a content of `<key>`. e.g. `bouncer_key_nginx` with content `mysecretkey12345`.
 
 
-A bouncer key can be any string but we recommend an alphanumeric value for consistency with the crowdsec-generated keys and to avoid problems with escaping special characters.
+A bouncer key can be any string but we recommend an alphanumeric value for consistency with the keys generated by crowdsec and to avoid problems with escaping special characters.
 
 
 With TLS authentication:
 With TLS authentication:
 
 
@@ -198,22 +198,33 @@ Using binds rather than named volumes ([complete explanation here](https://docs.
 # Reference
 # Reference
 ## Environment Variables
 ## Environment Variables
 
 
+Note for persistent configurations (i.e. bind mount or volumes): when a
+variable is set, its value may be written to the appropriate file (usually
+config.yaml) each time the container is run.
+
+
 | Variable                | Default                   | Description |
 | Variable                | Default                   | Description |
 | ----------------------- | ------------------------- | ----------- |
 | ----------------------- | ------------------------- | ----------- |
 | `CONFIG_FILE`           | `/etc/crowdsec/config.yaml` | Configuration file location |
 | `CONFIG_FILE`           | `/etc/crowdsec/config.yaml` | Configuration file location |
-| `DSN`                   | | Process a single source in time-machine: `-e DSN="file:///var/log/toto.log"` or `-e DSN="cloudwatch:///your/group/path:stream_name?profile=dev&backlog=16h"` or `-e DSN="journalctl://filters=_SYSTEMD_UNIT=ssh.service"` |
-| `TYPE`                  | | [`Labels.type`](https://docs.crowdsec.net/Crowdsec/v1/references/acquisition/) for file in time-machine: `-e TYPE="<type>"` |
-| `TEST_MODE`             | false | Don't run the service, only test the configuration: `-e TEST_MODE=true` |
-| `TZ`                    | | Set the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) to ensure the logs have a local timestamp. |
-| `LOCAL_API_URL`         | `http://0.0.0.0:8080` | The LAPI URL, you need to change this when `DISABLE_LOCAL_API` is true: `-e LOCAL_API_URL="http://lapi-address:8080"` |
 | `DISABLE_AGENT`         | false | Disable the agent, run a LAPI-only container |
 | `DISABLE_AGENT`         | false | Disable the agent, run a LAPI-only container |
 | `DISABLE_LOCAL_API`     | false | Disable LAPI, run an agent-only container |
 | `DISABLE_LOCAL_API`     | false | Disable LAPI, run an agent-only container |
 | `DISABLE_ONLINE_API`    | false | Disable online API registration for signal sharing |
 | `DISABLE_ONLINE_API`    | false | Disable online API registration for signal sharing |
-| `CUSTOM_HOSTNAME`       | localhost | Custom hostname for LAPI registration (with agent and LAPI on the same container) |
+| `TEST_MODE`             | false | Don't run the service, only test the configuration: `-e TEST_MODE=true` |
+| `TZ`                    | | Set the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) to ensure the logs have a local timestamp. |
+| `LOCAL_API_URL`         | `http://0.0.0.0:8080` | The LAPI URL, you need to change this when `DISABLE_LOCAL_API` is true: `-e LOCAL_API_URL="http://lapi-address:8080"` |
 | `PLUGIN_DIR`            | `/usr/local/lib/crowdsec/plugins/` | Directory for plugins: `-e PLUGIN_DIR="<path>"` |
 | `PLUGIN_DIR`            | `/usr/local/lib/crowdsec/plugins/` | Directory for plugins: `-e PLUGIN_DIR="<path>"` |
-| `BOUNCER_KEY_<name>`    | | Register a bouncer with the name `<name>` and a key equal to the value of the environment variable. |
 | `METRICS_PORT`          | 6060 | Port to expose Prometheus metrics |
 | `METRICS_PORT`          | 6060 | Port to expose Prometheus metrics |
+|                         | | |
+| __LAPI__                | | (useless with DISABLE_LOCAL_API) |
 | `USE_WAL`               | false | Enable Write-Ahead Logging with SQLite |
 | `USE_WAL`               | false | Enable Write-Ahead Logging with SQLite |
+| `CUSTOM_HOSTNAME`       | localhost | Name for the local agent (running in the container with LAPI) |
+|                         | | |
+| __Agent__               | | (these don't work with DISABLE_AGENT) |
+| `TYPE`                  | | [`Labels.type`](https://docs.crowdsec.net/Crowdsec/v1/references/acquisition/) for file in time-machine: `-e TYPE="<type>"` |
+| `DSN`                   | | Process a single source in time-machine: `-e DSN="file:///var/log/toto.log"` or `-e DSN="cloudwatch:///your/group/path:stream_name?profile=dev&backlog=16h"` or `-e DSN="journalctl://filters=_SYSTEMD_UNIT=ssh.service"` |
+|                         | | |
+| __Bouncers__            | | |
+| `BOUNCER_KEY_<name>`    | | Register a bouncer with the name `<name>` and a key equal to the value of the environment variable. |
 |                         | | |
 |                         | | |
 | __Console__             | | |
 | __Console__             | | |
 | `ENROLL_KEY`            | | Enroll key retrieved from [the console](https://app.crowdsec.net/) to enroll the instance. |
 | `ENROLL_KEY`            | | Enroll key retrieved from [the console](https://app.crowdsec.net/) to enroll the instance. |
@@ -224,18 +235,23 @@ Using binds rather than named volumes ([complete explanation here](https://docs.
 | `AGENT_USERNAME`        | | Agent username (to register if is LAPI or to use if it's an agent): `-e AGENT_USERNAME="machine_id"` |
 | `AGENT_USERNAME`        | | Agent username (to register if is LAPI or to use if it's an agent): `-e AGENT_USERNAME="machine_id"` |
 | `AGENT_PASSWORD`        | | Agent password (to register if is LAPI or to use if it's an agent): `-e AGENT_PASSWORD="machine_password"` |
 | `AGENT_PASSWORD`        | | Agent password (to register if is LAPI or to use if it's an agent): `-e AGENT_PASSWORD="machine_password"` |
 |                         | | |
 |                         | | |
-| __TLS Auth/encryption   | | |
-| `USE_TLS`               | false | Enable TLS on the LAPI |
-| `CERT_FILE`             | /etc/ssl/cert.pem | TLS Certificate path |
-| `KEY_FILE`              | /etc/ssl/key.pem | TLS Key path |
-| `CACERT_FILE`           | | CA certificate bundle |
+| __TLS Encryption__      | | |
+| `USE_TLS`               | false | Enable TLS encryption (either as a LAPI or agent) |
+| `CACERT_FILE`           | | CA certificate bundle (for self-signed certificates) |
+| `INSECURE_SKIP_VERIFY`  | | Skip LAPI certificate validation |
+| `LAPI_CERT_FILE`        | | LAPI TLS Certificate path |
+| `LAPI_KEY_FILE`         | | LAPI TLS Key path |
+|                         | | |
+| __TLS Authentication__  | | (these require USE_TLS=true) |
+| `CLIENT_CERT_FILE`      | | Client TLS Certificate path (enable TLS authentication) |
+| `CLIENT_KEY_FILE`       | | Client TLS Key path |
 | `AGENTS_ALLOWED_OU`     | agent-ou | OU values allowed for agents, separated by comma |
 | `AGENTS_ALLOWED_OU`     | agent-ou | OU values allowed for agents, separated by comma |
 | `BOUNCERS_ALLOWED_OU`   | bouncer-ou | OU values allowed for bouncers, separated by comma |
 | `BOUNCERS_ALLOWED_OU`   | bouncer-ou | OU values allowed for bouncers, separated by comma |
 |                         | | |
 |                         | | |
 | __Hub management__      | | |
 | __Hub management__      | | |
 | `COLLECTIONS`           | | Collections to install, separated by space: `-e COLLECTIONS="crowdsecurity/linux crowdsecurity/apache2"` |
 | `COLLECTIONS`           | | Collections to install, separated by space: `-e COLLECTIONS="crowdsecurity/linux crowdsecurity/apache2"` |
-| `SCENARIOS`             | | Scenarios to install, separated by space |
 | `PARSERS`               | | Parsers to install, separated by space |
 | `PARSERS`               | | Parsers to install, separated by space |
+| `SCENARIOS`             | | Scenarios to install, separated by space |
 | `POSTOVERFLOWS`         | | Postoverflows to install, separated by space |
 | `POSTOVERFLOWS`         | | Postoverflows to install, separated by space |
 | `DISABLE_COLLECTIONS`   | | Collections to remove, separated by space: `-e DISABLE_COLLECTIONS="crowdsecurity/linux crowdsecurity/nginx"` |
 | `DISABLE_COLLECTIONS`   | | Collections to remove, separated by space: `-e DISABLE_COLLECTIONS="crowdsecurity/linux crowdsecurity/nginx"` |
 | `DISABLE_PARSERS`       | | Parsers to remove, separated by space |
 | `DISABLE_PARSERS`       | | Parsers to remove, separated by space |
@@ -246,6 +262,10 @@ Using binds rather than named volumes ([complete explanation here](https://docs.
 | `LEVEL_INFO`            | false | Force INFO level for the container log |
 | `LEVEL_INFO`            | false | Force INFO level for the container log |
 | `LEVEL_DEBUG`           | false | Force DEBUG level for the container log |
 | `LEVEL_DEBUG`           | false | Force DEBUG level for the container log |
 | `LEVEL_TRACE`           | false | Force TRACE level (VERY verbose) for the container log |
 | `LEVEL_TRACE`           | false | Force TRACE level (VERY verbose) for the container log |
+|                         | | |
+| __Developer options__   | | |
+| `CI_TESTING`            | false | Used during functional tests |
+| `DEBUG`                 | false | Trace the entrypoint |
 
 
 ## Volumes
 ## Volumes
 
 

+ 3 - 5
docker/config.yaml

@@ -16,8 +16,8 @@ crowdsec_service:
   acquisition_path: /etc/crowdsec/acquis.yaml
   acquisition_path: /etc/crowdsec/acquis.yaml
   parser_routines: 1
   parser_routines: 1
 plugin_config:
 plugin_config:
- user: nobody
- group: nobody
+  user: nobody
+  group: nobody
 cscli:
 cscli:
   output: human
   output: human
 db_config:
 db_config:
@@ -40,10 +40,8 @@ api:
       - 127.0.0.1
       - 127.0.0.1
       - ::1
       - ::1
     online_client: # Central API credentials (to push signals and receive bad IPs)
     online_client: # Central API credentials (to push signals and receive bad IPs)
-      #credentials_path: /etc/crowdsec/online_api_credentials.yaml
+    #credentials_path: /etc/crowdsec/online_api_credentials.yaml
     tls:
     tls:
-      cert_file: /etc/ssl/cert.pem
-      key_file: /etc/ssl/key.pem
       agents_allowed_ou:
       agents_allowed_ou:
         - agent-ou
         - agent-ou
       bouncers_allowed_ou:
       bouncers_allowed_ou:

+ 93 - 53
docker/docker_start.sh

@@ -3,14 +3,9 @@
 # shellcheck disable=SC2292      # allow [ test ] syntax
 # shellcheck disable=SC2292      # allow [ test ] syntax
 # shellcheck disable=SC2310      # allow "if function..." syntax with -e
 # shellcheck disable=SC2310      # allow "if function..." syntax with -e
 
 
-#set -x
-#export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
-
 set -e
 set -e
 shopt -s inherit_errexit
 shopt -s inherit_errexit
 
 
-#- HELPER FUNCTIONS ----------------#
-
 # match true, TRUE, True, tRuE, etc.
 # match true, TRUE, True, tRuE, etc.
 istrue() {
 istrue() {
   case "$(echo "$1" | tr '[:upper:]' '[:lower:]')" in
   case "$(echo "$1" | tr '[:upper:]' '[:lower:]')" in
@@ -27,6 +22,22 @@ isfalse() {
     fi
     fi
 }
 }
 
 
+if istrue "$DEBUG"; then
+    set -x
+    export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
+fi
+
+if istrue "$CI_TESTING"; then
+    echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" >/etc/machine-id
+fi
+
+#- DEFAULTS -----------------------#
+
+export CONFIG_FILE="${CONFIG_FILE:=/etc/crowdsec/config.yaml}"
+export CUSTOM_HOSTNAME="${CUSTOM_HOSTNAME:=localhost}"
+
+#- HELPER FUNCTIONS ----------------#
+
 # csv2yaml <string>
 # csv2yaml <string>
 # generate a yaml list from a comma-separated string of values
 # generate a yaml list from a comma-separated string of values
 csv2yaml() {
 csv2yaml() {
@@ -58,8 +69,20 @@ conf_set() {
     else
     else
         YAML_FILE="$CONFIG_FILE"
         YAML_FILE="$CONFIG_FILE"
     fi
     fi
-    YAML_CONTENT=$(cat "$YAML_FILE" 2>/dev/null || true)
-    echo "$YAML_CONTENT" | yq e "$1" | install -m 0600 /dev/stdin "$YAML_FILE"
+    if [ ! -f "$YAML_FILE" ]; then
+        install -m 0600 /dev/null "$YAML_FILE"
+    fi
+    yq e "$1" -i "$YAML_FILE"
+}
+
+# conf_set_if(): used to update the configuration
+# only if a given variable is provided
+# conf_set_if "$VAR" <yq_expression> [file_path]
+conf_set_if() {
+    if [ "$1" != "" ]; then
+        shift
+        conf_set "$@"
+    fi
 }
 }
 
 
 # register_bouncer <bouncer_name> <bouncer_key>
 # register_bouncer <bouncer_name> <bouncer_key>
@@ -90,6 +113,16 @@ cscli_if_clean() {
 
 
 #-----------------------------------#
 #-----------------------------------#
 
 
+if [ -n "$CERT_FILE" ] || [ -n "$KEY_FILE" ] ; then
+    printf '%b' '\033[0;33m'
+    echo "Warning: the variables CERT_FILE and KEY_FILE have been deprecated." >&2
+    echo "Please use LAPI_CERT_FILE and LAPI_KEY_FILE insted." >&2
+    echo "The old variables will be removed in a future release." >&2
+    printf '%b' '\033[0m'
+    LAPI_CERT_FILE=${LAPI_CERT_FILE:-$CERT_FILE}
+    LAPI_KEY_FILE=${LAPI_KEY_FILE:-$KEY_FILE}
+fi
+
 # Check and prestage databases
 # Check and prestage databases
 for geodb in GeoLite2-ASN.mmdb GeoLite2-City.mmdb; do
 for geodb in GeoLite2-ASN.mmdb GeoLite2-City.mmdb; do
     # We keep the pre-populated geoip databases in /staging instead of /var,
     # We keep the pre-populated geoip databases in /staging instead of /var,
@@ -122,56 +155,65 @@ elif [ -n "$USE_WAL" ] && isfalse "$USE_WAL"; then
     conf_set '.db_config.use_wal = false'
     conf_set '.db_config.use_wal = false'
 fi
 fi
 
 
-# regenerate local agent credentials (ignore if agent is disabled)
-if isfalse "$DISABLE_AGENT"; then
-    if isfalse "$DISABLE_LOCAL_API"; then
+# regenerate local agent credentials (even if agent is disabled, cscli needs a
+# connection to the API)
+cscli machines delete "$CUSTOM_HOSTNAME" 2>/dev/null || true
+if isfalse "$DISABLE_LOCAL_API"; then
+    if isfalse "$USE_TLS" || [ "$CLIENT_CERT_FILE" = "" ]; then
         echo "Regenerate local agent credentials"
         echo "Regenerate local agent credentials"
-        cscli machines delete "$CUSTOM_HOSTNAME" 2>/dev/null || true
-        cscli machines add "$CUSTOM_HOSTNAME" --auto --url "$LOCAL_API_URL"
+        cscli machines add "$CUSTOM_HOSTNAME" --auto
     fi
     fi
 
 
-    lapi_credentials_path=$(conf_get '.api.client.credentials_path')
+    echo "Check if lapi needs to register an additional agent"
+    # pre-registration is not needed with TLS authentication, but we can have TLS transport with user/pw
+    if [ "$AGENT_USERNAME" != "" ] && [ "$AGENT_PASSWORD" != "" ] ; then
+        # re-register because pw may have been changed
+        cscli machines add "$AGENT_USERNAME" --password "$AGENT_PASSWORD" -f /dev/null --force
+        echo "Agent registered to lapi"
+    fi
+fi
+
+# ----------------
+
+lapi_credentials_path=$(conf_get '.api.client.credentials_path')
+
+conf_set_if "$LOCAL_API_URL" '.url = strenv(LOCAL_API_URL)' "$lapi_credentials_path"
 
 
+if istrue "$DISABLE_LOCAL_API"; then
     # we only use the envvars that are actually defined
     # we only use the envvars that are actually defined
     # in case of persistent configuration
     # in case of persistent configuration
-    conf_set '
-        with(select(strenv(LOCAL_API_URL)!=""); .url = strenv(LOCAL_API_URL)) |
-        with(select(strenv(AGENT_USERNAME)!=""); .login = strenv(AGENT_USERNAME)) |
-        with(select(strenv(AGENT_PASSWORD)!=""); .password = strenv(AGENT_PASSWORD))
-        ' "$lapi_credentials_path"
-
-    if istrue "$USE_TLS"; then
-        conf_set '
-            with(select(strenv(CACERT_FILE)!=""); .ca_cert_path = strenv(CACERT_FILE)) |
-            with(select(strenv(KEY_FILE)!=""); .key_path = strenv(KEY_FILE)) |
-            with(select(strenv(CERT_FILE)!=""); .cert_path = strenv(CERT_FILE))
-        ' "$lapi_credentials_path"
-    else
-        conf_set '
-            del(.ca_cert_path) |
-            del(.key_path) |
-            del(.cert_path)
-        ' "$lapi_credentials_path"
-    fi
+    conf_set_if "$AGENT_USERNAME" '.login = strenv(AGENT_USERNAME)' "$lapi_credentials_path"
+    conf_set_if "$AGENT_PASSWORD" '.password = strenv(AGENT_PASSWORD)' "$lapi_credentials_path"
 fi
 fi
 
 
-if isfalse "$DISABLE_LOCAL_API"; then
-    echo "Check if lapi needs to automatically register an agent"
+conf_set_if "$INSECURE_SKIP_VERIFY" '.api.client.insecure_skip_verify = env(INSECURE_SKIP_VERIFY)'
 
 
-    # pre-registration is not needed with TLS
-    if isfalse "$USE_TLS" && [ "$AGENT_USERNAME" != "" ] && [ "$AGENT_PASSWORD" != "" ] ; then
-        # re-register because pw may have been changed
-        cscli machines add "$AGENT_USERNAME" --password "$AGENT_PASSWORD" --url "$LOCAL_API_URL" --force
-        echo "Agent registered to lapi"
-    fi
+# agent-only containers still require USE_TLS
+if istrue "$USE_TLS"; then
+    # shellcheck disable=SC2153
+    conf_set_if "$CACERT_FILE" '.ca_cert_path = strenv(CACERT_FILE)' "$lapi_credentials_path"
+    conf_set_if "$CLIENT_KEY_FILE" '.key_path = strenv(CLIENT_KEY_FILE)' "$lapi_credentials_path"
+    conf_set_if "$CLIENT_CERT_FILE" '.cert_path = strenv(CLIENT_CERT_FILE)' "$lapi_credentials_path"
+else
+    conf_set '
+        del(.ca_cert_path) |
+        del(.key_path) |
+        del(.cert_path)
+    ' "$lapi_credentials_path"
+fi
+
+if istrue "$DISABLE_ONLINE_API"; then
+    conf_set 'del(.api.server.online_client)'
 fi
 fi
 
 
 # registration to online API for signal push
 # registration to online API for signal push
-if isfalse "$DISABLE_ONLINE_API" && [ "$CONFIG_FILE" == "/etc/crowdsec/config.yaml" ] ; then
+if isfalse "$DISABLE_ONLINE_API" ; then
+    CONFIG_DIR=$(conf_get '.config_paths.config_dir')
     config_exists=$(conf_get '.api.server.online_client | has("credentials_path")')
     config_exists=$(conf_get '.api.server.online_client | has("credentials_path")')
     if isfalse "$config_exists"; then
     if isfalse "$config_exists"; then
-        conf_set '.api.server.online_client = {"credentials_path": "/etc/crowdsec/online_api_credentials.yaml"}'
-        cscli capi register > /etc/crowdsec/online_api_credentials.yaml
+        export CONFIG_DIR
+        conf_set '.api.server.online_client = {"credentials_path": strenv(CONFIG_DIR) + "/online_api_credentials.yaml"}'
+        cscli capi register > "$CONFIG_DIR/online_api_credentials.yaml"
         echo "Registration to online API done"
         echo "Registration to online API done"
     fi
     fi
 fi
 fi
@@ -200,22 +242,20 @@ if [ "$GID" != "" ]; then
     fi
     fi
 fi
 fi
 
 
+# XXX only with LAPI
 if istrue "$USE_TLS"; then
 if istrue "$USE_TLS"; then
     agents_allowed_yaml=$(csv2yaml "$AGENTS_ALLOWED_OU") \
     agents_allowed_yaml=$(csv2yaml "$AGENTS_ALLOWED_OU") \
     bouncers_allowed_yaml=$(csv2yaml "$BOUNCERS_ALLOWED_OU") \
     bouncers_allowed_yaml=$(csv2yaml "$BOUNCERS_ALLOWED_OU") \
-    conf_set '
-        with(select(strenv(CACERT_FILE)!=""); .api.server.tls.ca_cert_path = strenv(CACERT_FILE)) |
-        with(select(strenv(CERT_FILE)!=""); .api.server.tls.cert_file = strenv(CERT_FILE)) |
-        with(select(strenv(KEY_FILE)!=""); .api.server.tls.key_file = strenv(KEY_FILE)) |
-        with(select(strenv(BOUNCERS_ALLOWED_OU)!=""); .api.server.tls.bouncers_allowed_ou = env(bouncers_allowed_yaml)) |
-        with(select(strenv(AGENTS_ALLOWED_OU)!=""); .api.server.tls.agents_allowed_ou = env(agents_allowed_yaml)) |
-        ... comments=""
-        '
+    conf_set_if "$CACERT_FILE" '.api.server.tls.ca_cert_path = strenv(CACERT_FILE)'
+    conf_set_if "$LAPI_CERT_FILE" '.api.server.tls.cert_file = strenv(LAPI_CERT_FILE)'
+    conf_set_if "$LAPI_KEY_FILE" '.api.server.tls.key_file = strenv(LAPI_KEY_FILE)'
+    conf_set_if "$BOUNCERS_ALLOWED_OU" '.api.server.tls.bouncers_allowed_ou = env(bouncers_allowed_yaml)'
+    conf_set_if "$AGENTS_ALLOWED_OU" '.api.server.tls.agents_allowed_ou = env(agents_allowed_yaml)'
 else
 else
     conf_set 'del(.api.server.tls)'
     conf_set 'del(.api.server.tls)'
 fi
 fi
 
 
-conf_set 'with(select(strenv(PLUGIN_DIR)!=""); .config_paths.plugin_dir = strenv(PLUGIN_DIR))'
+conf_set_if "$PLUGIN_DIR" '.config_paths.plugin_dir = strenv(PLUGIN_DIR)'
 
 
 ## Install collections, parsers, scenarios & postoverflows
 ## Install collections, parsers, scenarios & postoverflows
 cscli hub update
 cscli hub update
@@ -322,7 +362,7 @@ if istrue "$LEVEL_INFO"; then
     ARGS="$ARGS -info"
     ARGS="$ARGS -info"
 fi
 fi
 
 
-conf_set 'with(select(strenv(METRICS_PORT)!=""); .prometheus.listen_port=env(METRICS_PORT))'
+conf_set_if "$METRICS_PORT" '.prometheus.listen_port=env(METRICS_PORT)'
 
 
 # shellcheck disable=SC2086
 # shellcheck disable=SC2086
 exec crowdsec $ARGS
 exec crowdsec $ARGS

+ 3 - 0
go.mod

@@ -69,6 +69,7 @@ require (
 	github.com/aquasecurity/table v1.8.0
 	github.com/aquasecurity/table v1.8.0
 	github.com/beevik/etree v1.1.0
 	github.com/beevik/etree v1.1.0
 	github.com/blackfireio/osinfo v1.0.3
 	github.com/blackfireio/osinfo v1.0.3
+	github.com/goccy/go-yaml v1.9.7
 	github.com/google/winops v0.0.0-20211216095627-f0e86eb1453b
 	github.com/google/winops v0.0.0-20211216095627-f0e86eb1453b
 	github.com/ivanpirog/coloredcobra v1.0.1
 	github.com/ivanpirog/coloredcobra v1.0.1
 	github.com/mattn/go-isatty v0.0.14
 	github.com/mattn/go-isatty v0.0.14
@@ -90,6 +91,7 @@ require (
 	github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
 	github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
 	github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect
 	github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
+	github.com/bluele/gcache v0.0.2 // indirect
 	github.com/cespare/xxhash/v2 v2.1.2 // indirect
 	github.com/cespare/xxhash/v2 v2.1.2 // indirect
 	github.com/containerd/containerd v1.6.2 // indirect
 	github.com/containerd/containerd v1.6.2 // indirect
 	github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
 	github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
@@ -169,6 +171,7 @@ require (
 	golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
 	golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
 	golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
 	golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
 	golang.org/x/text v0.3.7 // indirect
 	golang.org/x/text v0.3.7 // indirect
+	golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
 	google.golang.org/appengine v1.6.7 // indirect
 	google.golang.org/appengine v1.6.7 // indirect
 	google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
 	google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect

+ 8 - 1
go.sum

@@ -121,6 +121,8 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm
 github.com/blackfireio/osinfo v1.0.3 h1:Yk2t2GTPjBcESv6nDSWZKO87bGMQgO+Hi9OoXPpxX8c=
 github.com/blackfireio/osinfo v1.0.3 h1:Yk2t2GTPjBcESv6nDSWZKO87bGMQgO+Hi9OoXPpxX8c=
 github.com/blackfireio/osinfo v1.0.3/go.mod h1:Pd987poVNmd5Wsx6PRPw4+w7kLlf9iJxoRKPtPAjOrA=
 github.com/blackfireio/osinfo v1.0.3/go.mod h1:Pd987poVNmd5Wsx6PRPw4+w7kLlf9iJxoRKPtPAjOrA=
 github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
 github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
+github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
 github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
 github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
 github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
 github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
 github.com/c-robinson/iplib v1.0.3 h1:NG0UF0GoEsrC1/vyfX1Lx2Ss7CySWl3KqqXh3q4DdPU=
 github.com/c-robinson/iplib v1.0.3 h1:NG0UF0GoEsrC1/vyfX1Lx2Ss7CySWl3KqqXh3q4DdPU=
@@ -209,6 +211,7 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
 github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
 github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
 github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
 github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
 github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
 github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
 github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
@@ -372,6 +375,8 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe
 github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
 github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
 github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
 github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
 github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
 github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/goccy/go-yaml v1.9.7 h1:D/Vx+JITklB1ugSkncB4BNR67M3X6AKs9+rqVeo3ddw=
+github.com/goccy/go-yaml v1.9.7/go.mod h1:JubOolP3gh0HpiBc4BLRD4YmjEjHAmIIB2aaXKkTfoE=
 github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
 github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
 github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
 github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
 github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
 github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
@@ -651,6 +656,7 @@ github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcncea
 github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
 github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
 github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
 github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
 github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
 github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
 github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
 github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
 github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
 github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
 github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
 github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
@@ -664,7 +670,6 @@ github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9
 github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
 github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
 github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
 github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
 github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
 github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
 github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
 github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
 github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
 github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
 github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
 github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
@@ -1173,6 +1178,7 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1264,6 +1270,7 @@ golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8T
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
 google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
 google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
 google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=

+ 1 - 1
pkg/acquisition/acquisition_test.go

@@ -176,7 +176,7 @@ wowo: ajsajasjas
 			yaml.Unmarshal([]byte(tc.String), &common)
 			yaml.Unmarshal([]byte(tc.String), &common)
 			ds, err := DataSourceConfigure(common)
 			ds, err := DataSourceConfigure(common)
 			cstest.RequireErrorContains(t, err, tc.ExpectedError)
 			cstest.RequireErrorContains(t, err, tc.ExpectedError)
-			if tc.ExpectedError == "" {
+			if tc.ExpectedError != "" {
 				return
 				return
 			}
 			}
 
 

+ 2 - 2
pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go

@@ -54,7 +54,7 @@ func TestTimestamp(t *testing.T) {
 		currentYear bool
 		currentYear bool
 	}{
 	}{
 		{"May 20 09:33:54", "0000-05-20T09:33:54Z", "", false},
 		{"May 20 09:33:54", "0000-05-20T09:33:54Z", "", false},
-		{"May 20 09:33:54", "2022-05-20T09:33:54Z", "", true},
+		{"May 20 09:33:54", "2023-05-20T09:33:54Z", "", true},
 		{"May 20 09:33:54 2022", "2022-05-20T09:33:54Z", "", false},
 		{"May 20 09:33:54 2022", "2022-05-20T09:33:54Z", "", false},
 		{"May  1 09:33:54 2022", "2022-05-01T09:33:54Z", "", false},
 		{"May  1 09:33:54 2022", "2022-05-01T09:33:54Z", "", false},
 		{"May 01 09:33:54 2021", "2021-05-01T09:33:54Z", "", true},
 		{"May 01 09:33:54 2021", "2021-05-01T09:33:54Z", "", true},
@@ -257,7 +257,7 @@ func TestParse(t *testing.T) {
 		},
 		},
 		{
 		{
 			"<12>May 20 09:33:54 UDMPRO,a2edd0c6ae48,udm-1.10.0.3686 kernel: foo", expected{
 			"<12>May 20 09:33:54 UDMPRO,a2edd0c6ae48,udm-1.10.0.3686 kernel: foo", expected{
-				Timestamp: time.Date(2022, time.May, 20, 9, 33, 54, 0, time.UTC),
+				Timestamp: time.Date(2023, time.May, 20, 9, 33, 54, 0, time.UTC),
 				Hostname:  "UDMPRO,a2edd0c6ae48,udm-1.10.0.3686",
 				Hostname:  "UDMPRO,a2edd0c6ae48,udm-1.10.0.3686",
 				Tag:       "kernel",
 				Tag:       "kernel",
 				PID:       "",
 				PID:       "",

+ 157 - 0
pkg/alertcontext/alertcontext.go

@@ -0,0 +1,157 @@
+package alertcontext
+
+import (
+	"encoding/json"
+	"fmt"
+	"strconv"
+
+	"github.com/antonmedv/expr"
+	"github.com/antonmedv/expr/vm"
+	"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
+	"github.com/crowdsecurity/crowdsec/pkg/models"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
+	log "github.com/sirupsen/logrus"
+)
+
+const (
+	maxContextValueLen = 4000
+)
+
+var (
+	alertContext = Context{}
+)
+
+type Context struct {
+	ContextToSend         map[string][]string
+	ContextValueLen       int
+	ContextToSendCompiled map[string][]*vm.Program
+	Log                   *log.Logger
+}
+
+func ValidateContextExpr(key string, expressions []string) error {
+	for _, expression := range expressions {
+		_, err := expr.Compile(expression, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
+		if err != nil {
+			return fmt.Errorf("compilation of '%s' failed: %v", expression, err)
+		}
+	}
+	return nil
+}
+
+func NewAlertContext(contextToSend map[string][]string, valueLength int) error {
+	var clog = log.New()
+	if err := types.ConfigureLogger(clog); err != nil {
+		return fmt.Errorf("couldn't create logger for alert context: %s", err)
+	}
+
+	if valueLength == 0 {
+		clog.Debugf("No console context value length provided, using default: %d", maxContextValueLen)
+		valueLength = maxContextValueLen
+	}
+	if valueLength > maxContextValueLen {
+		clog.Debugf("Provided console context value length (%d) is higher than the maximum, using default: %d", valueLength, maxContextValueLen)
+		valueLength = maxContextValueLen
+	}
+
+	alertContext = Context{
+		ContextToSend:         contextToSend,
+		ContextValueLen:       valueLength,
+		Log:                   clog,
+		ContextToSendCompiled: make(map[string][]*vm.Program),
+	}
+
+	for key, values := range contextToSend {
+		alertContext.ContextToSendCompiled[key] = make([]*vm.Program, 0)
+		for _, value := range values {
+			valueCompiled, err := expr.Compile(value, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
+			if err != nil {
+				return fmt.Errorf("compilation of '%s' context value failed: %v", value, err)
+			}
+			alertContext.ContextToSendCompiled[key] = append(alertContext.ContextToSendCompiled[key], valueCompiled)
+		}
+	}
+
+	return nil
+}
+
+func truncate(values []string, contextValueLen int) (string, error) {
+	var ret string
+	valueByte, err := json.Marshal(values)
+	if err != nil {
+		return "", fmt.Errorf("unable to dump metas: %s", err)
+	}
+	ret = string(valueByte)
+	for {
+		if len(ret) <= contextValueLen {
+			break
+		}
+		// if there is only 1 value left and that the size is too big, truncate it
+		if len(values) == 1 {
+			valueToTruncate := values[0]
+			half := len(valueToTruncate) / 2
+			lastValueTruncated := valueToTruncate[:half] + "..."
+			values = values[:len(values)-1]
+			values = append(values, lastValueTruncated)
+		} else {
+			// if there is multiple value inside, just remove the last one
+			values = values[:len(values)-1]
+		}
+		valueByte, err = json.Marshal(values)
+		if err != nil {
+			return "", fmt.Errorf("unable to dump metas: %s", err)
+		}
+		ret = string(valueByte)
+	}
+	return ret, nil
+}
+
+func EventToContext(events []types.Event) (models.Meta, []error) {
+	var errors []error
+
+	metas := make([]*models.MetaItems0, 0)
+	tmpContext := make(map[string][]string)
+	for _, evt := range events {
+		for key, values := range alertContext.ContextToSendCompiled {
+			if _, ok := tmpContext[key]; !ok {
+				tmpContext[key] = make([]string, 0)
+			}
+			for _, value := range values {
+				var val string
+				output, err := expr.Run(value, exprhelpers.GetExprEnv(map[string]interface{}{"evt": evt}))
+				if err != nil {
+					errors = append(errors, fmt.Errorf("failed to get value for %s : %v", key, err))
+					continue
+				}
+				switch out := output.(type) {
+				case string:
+					val = out
+				case int:
+					val = strconv.Itoa(out)
+				default:
+					errors = append(errors, fmt.Errorf("unexpected return type for %s : %T", key, output))
+					continue
+				}
+				if val != "" && !types.InSlice(val, tmpContext[key]) {
+					tmpContext[key] = append(tmpContext[key], val)
+				}
+			}
+		}
+	}
+	for key, values := range tmpContext {
+		if len(values) == 0 {
+			continue
+		}
+		valueStr, err := truncate(values, alertContext.ContextValueLen)
+		if err != nil {
+			log.Warningf(err.Error())
+		}
+		meta := models.MetaItems0{
+			Key:   key,
+			Value: valueStr,
+		}
+		metas = append(metas, &meta)
+	}
+
+	ret := models.Meta(metas)
+	return ret, errors
+}

+ 201 - 0
pkg/alertcontext/alertcontext_test.go

@@ -0,0 +1,201 @@
+package alertcontext
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/crowdsecurity/crowdsec/pkg/models"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestNewAlertContext(t *testing.T) {
+	tests := []struct {
+		name          string
+		contextToSend map[string][]string
+		valueLength   int
+		expectedErr   error
+	}{
+		{
+			name: "basic config test",
+			contextToSend: map[string][]string{
+				"test": []string{"evt.Parsed.source_ip"},
+			},
+			valueLength: 100,
+			expectedErr: nil,
+		},
+	}
+
+	for _, test := range tests {
+		fmt.Printf("Running test '%s'\n", test.name)
+		err := NewAlertContext(test.contextToSend, test.valueLength)
+		assert.ErrorIs(t, err, test.expectedErr)
+
+	}
+}
+
+func TestEventToContext(t *testing.T) {
+	tests := []struct {
+		name           string
+		contextToSend  map[string][]string
+		valueLength    int
+		events         []types.Event
+		expectedResult models.Meta
+	}{
+		{
+			name: "basic test",
+			contextToSend: map[string][]string{
+				"source_ip":         []string{"evt.Parsed.source_ip"},
+				"nonexistent_field": []string{"evt.Parsed.nonexist"},
+			},
+			valueLength: 100,
+			events: []types.Event{
+				{
+					Parsed: map[string]string{
+						"source_ip":      "1.2.3.4",
+						"source_machine": "mymachine",
+					},
+				},
+			},
+			expectedResult: []*models.MetaItems0{
+				{
+					Key:   "source_ip",
+					Value: "[\"1.2.3.4\"]",
+				},
+			},
+		},
+		{
+			name: "test many events",
+			contextToSend: map[string][]string{
+				"source_ip":      []string{"evt.Parsed.source_ip"},
+				"source_machine": []string{"evt.Parsed.source_machine"},
+				"cve":            []string{"evt.Parsed.cve"},
+			},
+			valueLength: 100,
+			events: []types.Event{
+				{
+					Parsed: map[string]string{
+						"source_ip":      "1.2.3.4",
+						"source_machine": "mymachine",
+						"cve":            "CVE-2022-1234",
+					},
+				},
+				{
+					Parsed: map[string]string{
+						"source_ip":      "1.2.3.4",
+						"source_machine": "mymachine",
+						"cve":            "CVE-2022-1235",
+					},
+				},
+				{
+					Parsed: map[string]string{
+						"source_ip":      "1.2.3.4",
+						"source_machine": "mymachine",
+						"cve":            "CVE-2022-125",
+					},
+				},
+			},
+			expectedResult: []*models.MetaItems0{
+				{
+					Key:   "source_ip",
+					Value: "[\"1.2.3.4\"]",
+				},
+				{
+					Key:   "source_machine",
+					Value: "[\"mymachine\"]",
+				},
+				{
+					Key:   "cve",
+					Value: "[\"CVE-2022-1234\",\"CVE-2022-1235\",\"CVE-2022-125\"]",
+				},
+			},
+		},
+		{
+			name: "test many events with result above max length (need truncate, keep only 2 on 3 elements)",
+			contextToSend: map[string][]string{
+				"source_ip":      []string{"evt.Parsed.source_ip"},
+				"source_machine": []string{"evt.Parsed.source_machine"},
+				"uri":            []string{"evt.Parsed.uri"},
+			},
+			valueLength: 100,
+			events: []types.Event{
+				{
+					Parsed: map[string]string{
+						"source_ip":      "1.2.3.4",
+						"source_machine": "mymachine",
+						"uri":            "/test/test/test/../../../../../../../../",
+					},
+				},
+				{
+					Parsed: map[string]string{
+						"source_ip":      "1.2.3.4",
+						"source_machine": "mymachine",
+						"uri":            "/admin/admin/admin/../../../../../../../../",
+					},
+				},
+				{
+					Parsed: map[string]string{
+						"source_ip":      "1.2.3.4",
+						"source_machine": "mymachine",
+						"uri":            "/login/login/login/../../../../../../../../../../../",
+					},
+				},
+			},
+			expectedResult: []*models.MetaItems0{
+				{
+					Key:   "source_ip",
+					Value: "[\"1.2.3.4\"]",
+				},
+				{
+					Key:   "source_machine",
+					Value: "[\"mymachine\"]",
+				},
+				{
+					Key:   "uri",
+					Value: "[\"/test/test/test/../../../../../../../../\",\"/admin/admin/admin/../../../../../../../../\"]",
+				},
+			},
+		},
+		{
+			name: "test one events with result above max length (need truncate on one element)",
+			contextToSend: map[string][]string{
+				"source_ip":      []string{"evt.Parsed.source_ip"},
+				"source_machine": []string{"evt.Parsed.source_machine"},
+				"uri":            []string{"evt.Parsed.uri"},
+			},
+			valueLength: 100,
+			events: []types.Event{
+				{
+					Parsed: map[string]string{
+						"source_ip":      "1.2.3.4",
+						"source_machine": "mymachine",
+						"uri":            "/test/test/test/../../../../.should_truncate_just_after_this/../../../..../../../../../../../../../../../../../../../end",
+					},
+				},
+			},
+			expectedResult: []*models.MetaItems0{
+				{
+					Key:   "source_machine",
+					Value: "[\"mymachine\"]",
+				},
+				{
+					Key:   "uri",
+					Value: "[\"/test/test/test/../../../../.should_truncate_just_after_this...\"]",
+				},
+				{
+					Key:   "source_ip",
+					Value: "[\"1.2.3.4\"]",
+				},
+			},
+		},
+	}
+
+	for _, test := range tests {
+		fmt.Printf("Running test '%s'\n", test.name)
+		err := NewAlertContext(test.contextToSend, test.valueLength)
+		assert.ErrorIs(t, err, nil)
+
+		metas, _ := EventToContext(test.events)
+		assert.ElementsMatch(t, test.expectedResult, metas)
+	}
+}

+ 120 - 20
pkg/apiclient/auth.go

@@ -3,6 +3,8 @@ package apiclient
 import (
 import (
 	"bytes"
 	"bytes"
 	"encoding/json"
 	"encoding/json"
+	"math/rand"
+	"sync"
 	"time"
 	"time"
 
 
 	//"errors"
 	//"errors"
@@ -12,6 +14,7 @@ import (
 	"net/http/httputil"
 	"net/http/httputil"
 	"net/url"
 	"net/url"
 
 
+	"github.com/crowdsecurity/crowdsec/pkg/fflag"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
 	"github.com/go-openapi/strfmt"
 	"github.com/go-openapi/strfmt"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
@@ -75,10 +78,57 @@ func (t *APIKeyTransport) transport() http.RoundTripper {
 	return http.DefaultTransport
 	return http.DefaultTransport
 }
 }
 
 
+type retryRoundTripper struct {
+	next             http.RoundTripper
+	maxAttempts      int
+	retryStatusCodes []int
+	withBackOff      bool
+	onBeforeRequest  func(attempt int)
+}
+
+func (r retryRoundTripper) ShouldRetry(statusCode int) bool {
+	for _, code := range r.retryStatusCodes {
+		if code == statusCode {
+			return true
+		}
+	}
+	return false
+}
+
+func (r retryRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	var resp *http.Response
+	var err error
+	backoff := 0
+	for i := 0; i < r.maxAttempts; i++ {
+		if i > 0 {
+			if r.withBackOff && !fflag.DisableHttpRetryBackoff.IsEnabled() {
+				backoff += 10 + rand.Intn(20)
+			}
+			log.Infof("retrying in %d seconds (attempt %d of %d)", backoff, i+1, r.maxAttempts)
+			select {
+			case <-req.Context().Done():
+				return resp, req.Context().Err()
+			case <-time.After(time.Duration(backoff) * time.Second):
+			}
+		}
+		if r.onBeforeRequest != nil {
+			r.onBeforeRequest(i)
+		}
+		clonedReq := cloneRequest(req)
+		resp, err = r.next.RoundTrip(clonedReq)
+		if err == nil {
+			if !r.ShouldRetry(resp.StatusCode) {
+				return resp, nil
+			}
+		}
+	}
+	return resp, err
+}
+
 type JWTTransport struct {
 type JWTTransport struct {
 	MachineID     *string
 	MachineID     *string
 	Password      *strfmt.Password
 	Password      *strfmt.Password
-	token         string
+	Token         string
 	Expiration    time.Time
 	Expiration    time.Time
 	Scenarios     []string
 	Scenarios     []string
 	URL           *url.URL
 	URL           *url.URL
@@ -86,8 +136,9 @@ type JWTTransport struct {
 	UserAgent     string
 	UserAgent     string
 	// Transport is the underlying HTTP transport to use when making requests.
 	// Transport is the underlying HTTP transport to use when making requests.
 	// It will default to http.DefaultTransport if nil.
 	// It will default to http.DefaultTransport if nil.
-	Transport      http.RoundTripper
-	UpdateScenario func() ([]string, error)
+	Transport         http.RoundTripper
+	UpdateScenario    func() ([]string, error)
+	refreshTokenMutex sync.Mutex
 }
 }
 
 
 func (t *JWTTransport) refreshJwtToken() error {
 func (t *JWTTransport) refreshJwtToken() error {
@@ -123,7 +174,14 @@ func (t *JWTTransport) refreshJwtToken() error {
 		return errors.Wrap(err, "could not create request")
 		return errors.Wrap(err, "could not create request")
 	}
 	}
 	req.Header.Add("Content-Type", "application/json")
 	req.Header.Add("Content-Type", "application/json")
-	client := &http.Client{}
+	client := &http.Client{
+		Transport: &retryRoundTripper{
+			next:             http.DefaultTransport,
+			maxAttempts:      5,
+			withBackOff:      true,
+			retryStatusCodes: []int{http.StatusTooManyRequests, http.StatusServiceUnavailable, http.StatusGatewayTimeout, http.StatusInternalServerError},
+		},
+	}
 	if t.UserAgent != "" {
 	if t.UserAgent != "" {
 		req.Header.Add("User-Agent", t.UserAgent)
 		req.Header.Add("User-Agent", t.UserAgent)
 	}
 	}
@@ -149,6 +207,7 @@ func (t *JWTTransport) refreshJwtToken() error {
 
 
 	if resp.StatusCode < 200 || resp.StatusCode >= 300 {
 	if resp.StatusCode < 200 || resp.StatusCode >= 300 {
 		log.Debugf("received response status %q when fetching %v", resp.Status, req.URL)
 		log.Debugf("received response status %q when fetching %v", resp.Status, req.URL)
+
 		err = CheckResponse(resp)
 		err = CheckResponse(resp)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
@@ -161,45 +220,51 @@ func (t *JWTTransport) refreshJwtToken() error {
 	if err := t.Expiration.UnmarshalText([]byte(response.Expire)); err != nil {
 	if err := t.Expiration.UnmarshalText([]byte(response.Expire)); err != nil {
 		return errors.Wrap(err, "unable to parse jwt expiration")
 		return errors.Wrap(err, "unable to parse jwt expiration")
 	}
 	}
-	t.token = response.Token
+	t.Token = response.Token
 
 
-	log.Debugf("token %s will expire on %s", t.token, t.Expiration.String())
+	log.Debugf("token %s will expire on %s", t.Token, t.Expiration.String())
 	return nil
 	return nil
 }
 }
 
 
 // RoundTrip implements the RoundTripper interface.
 // RoundTrip implements the RoundTripper interface.
 func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) {
 func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) {
-	if t.token == "" || t.Expiration.Add(-time.Minute).Before(time.Now().UTC()) {
+	// in a few occasions several goroutines will execute refreshJwtToken concurrently which is useless and will cause overload on CAPI
+	// we use a mutex to avoid this
+	t.refreshTokenMutex.Lock()
+	if t.Token == "" || t.Expiration.Add(-time.Minute).Before(time.Now().UTC()) {
 		if err := t.refreshJwtToken(); err != nil {
 		if err := t.refreshJwtToken(); err != nil {
+			t.refreshTokenMutex.Unlock()
 			return nil, err
 			return nil, err
 		}
 		}
 	}
 	}
+	t.refreshTokenMutex.Unlock()
+
+	if t.UserAgent != "" {
+		req.Header.Add("User-Agent", t.UserAgent)
+	}
+
+	req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", t.Token))
 
 
-	// We must make a copy of the Request so
-	// that we don't modify the Request we were given. This is required by the
-	// specification of http.RoundTripper.
-	req = cloneRequest(req)
-	req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", t.token))
-	log.Debugf("req-jwt: %s %s", req.Method, req.URL.String())
 	if log.GetLevel() >= log.TraceLevel {
 	if log.GetLevel() >= log.TraceLevel {
+		//requestToDump := cloneRequest(req)
 		dump, _ := httputil.DumpRequest(req, true)
 		dump, _ := httputil.DumpRequest(req, true)
 		log.Tracef("req-jwt: %s", string(dump))
 		log.Tracef("req-jwt: %s", string(dump))
 	}
 	}
-	if t.UserAgent != "" {
-		req.Header.Add("User-Agent", t.UserAgent)
-	}
+
 	// Make the HTTP request.
 	// Make the HTTP request.
 	resp, err := t.transport().RoundTrip(req)
 	resp, err := t.transport().RoundTrip(req)
 	if log.GetLevel() >= log.TraceLevel {
 	if log.GetLevel() >= log.TraceLevel {
 		dump, _ := httputil.DumpResponse(resp, true)
 		dump, _ := httputil.DumpResponse(resp, true)
 		log.Tracef("resp-jwt: %s (err:%v)", string(dump), err)
 		log.Tracef("resp-jwt: %s (err:%v)", string(dump), err)
 	}
 	}
-	if err != nil || resp.StatusCode == http.StatusForbidden || resp.StatusCode == http.StatusUnauthorized {
+	if err != nil {
 		/*we had an error (network error for example, or 401 because token is refused), reset the token ?*/
 		/*we had an error (network error for example, or 401 because token is refused), reset the token ?*/
-		t.token = ""
+		t.Token = ""
 		return resp, errors.Wrapf(err, "performing jwt auth")
 		return resp, errors.Wrapf(err, "performing jwt auth")
 	}
 	}
+
 	log.Debugf("resp-jwt: %d", resp.StatusCode)
 	log.Debugf("resp-jwt: %d", resp.StatusCode)
+
 	return resp, nil
 	return resp, nil
 }
 }
 
 
@@ -207,11 +272,39 @@ func (t *JWTTransport) Client() *http.Client {
 	return &http.Client{Transport: t}
 	return &http.Client{Transport: t}
 }
 }
 
 
+func (t *JWTTransport) ResetToken() {
+	log.Debug("resetting jwt token")
+	t.refreshTokenMutex.Lock()
+	t.Token = ""
+	t.refreshTokenMutex.Unlock()
+}
+
 func (t *JWTTransport) transport() http.RoundTripper {
 func (t *JWTTransport) transport() http.RoundTripper {
+	var transport http.RoundTripper
 	if t.Transport != nil {
 	if t.Transport != nil {
-		return t.Transport
+		transport = t.Transport
+	} else {
+		transport = http.DefaultTransport
+	}
+	// a round tripper that retries once when the status is unauthorized and 5 times when infrastructure is overloaded
+	return &retryRoundTripper{
+		next: &retryRoundTripper{
+			next:             transport,
+			maxAttempts:      5,
+			withBackOff:      true,
+			retryStatusCodes: []int{http.StatusTooManyRequests, http.StatusServiceUnavailable, http.StatusGatewayTimeout},
+		},
+		maxAttempts:      2,
+		withBackOff:      false,
+		retryStatusCodes: []int{http.StatusUnauthorized, http.StatusForbidden},
+		onBeforeRequest: func(attempt int) {
+			// reset the token only in the second attempt as this is when we know we had a 401 or 403
+			// the second attempt is supposed to refresh the token
+			if attempt > 0 {
+				t.ResetToken()
+			}
+		},
 	}
 	}
-	return http.DefaultTransport
 }
 }
 
 
 // cloneRequest returns a clone of the provided *http.Request. The clone is a
 // cloneRequest returns a clone of the provided *http.Request. The clone is a
@@ -225,5 +318,12 @@ func cloneRequest(r *http.Request) *http.Request {
 	for k, s := range r.Header {
 	for k, s := range r.Header {
 		r2.Header[k] = append([]string(nil), s...)
 		r2.Header[k] = append([]string(nil), s...)
 	}
 	}
+
+	if r.Body != nil {
+		var b bytes.Buffer
+		b.ReadFrom(r.Body)
+		r.Body = io.NopCloser(&b)
+		r2.Body = io.NopCloser(bytes.NewReader(b.Bytes()))
+	}
 	return r2
 	return r2
 }
 }

+ 1 - 1
pkg/apiclient/auth_service_test.go

@@ -234,5 +234,5 @@ func TestWatcherEnroll(t *testing.T) {
 	}
 	}
 
 
 	_, err = client.Auth.EnrollWatcher(context.Background(), "badkey", "", []string{}, false)
 	_, err = client.Auth.EnrollWatcher(context.Background(), "badkey", "", []string{}, false)
-	assert.Contains(t, err.Error(), "the attachment key provided is not valid")
+	assert.Contains(t, err.Error(), "the attachment key provided is not valid", "got %s", err.Error())
 }
 }

+ 2 - 2
pkg/apiclient/client.go

@@ -53,8 +53,8 @@ func NewClient(config *Config) (*ApiClient, error) {
 		UpdateScenario: config.UpdateScenario,
 		UpdateScenario: config.UpdateScenario,
 	}
 	}
 	tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify}
 	tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify}
+	tlsconfig.RootCAs = CaCertPool
 	if Cert != nil {
 	if Cert != nil {
-		tlsconfig.RootCAs = CaCertPool
 		tlsconfig.Certificates = []tls.Certificate{*Cert}
 		tlsconfig.Certificates = []tls.Certificate{*Cert}
 	}
 	}
 	http.DefaultTransport.(*http.Transport).TLSClientConfig = &tlsconfig
 	http.DefaultTransport.(*http.Transport).TLSClientConfig = &tlsconfig
@@ -75,8 +75,8 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt
 		client = &http.Client{}
 		client = &http.Client{}
 		if ht, ok := http.DefaultTransport.(*http.Transport); ok {
 		if ht, ok := http.DefaultTransport.(*http.Transport); ok {
 			tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify}
 			tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify}
+			tlsconfig.RootCAs = CaCertPool
 			if Cert != nil {
 			if Cert != nil {
-				tlsconfig.RootCAs = CaCertPool
 				tlsconfig.Certificates = []tls.Certificate{*Cert}
 				tlsconfig.Certificates = []tls.Certificate{*Cert}
 			}
 			}
 			ht.TLSClientConfig = &tlsconfig
 			ht.TLSClientConfig = &tlsconfig

+ 15 - 4
pkg/apiserver/apic.go

@@ -85,8 +85,8 @@ func (a *apic) FetchScenariosListFromDB() ([]string, error) {
 	return scenarios, nil
 	return scenarios, nil
 }
 }
 
 
-func alertToSignal(alert *models.Alert, scenarioTrust string) *models.AddSignalsRequestItem {
-	return &models.AddSignalsRequestItem{
+func alertToSignal(alert *models.Alert, scenarioTrust string, shareContext bool) *models.AddSignalsRequestItem {
+	signal := &models.AddSignalsRequestItem{
 		Message:         alert.Message,
 		Message:         alert.Message,
 		Scenario:        alert.Scenario,
 		Scenario:        alert.Scenario,
 		ScenarioHash:    alert.ScenarioHash,
 		ScenarioHash:    alert.ScenarioHash,
@@ -96,8 +96,19 @@ func alertToSignal(alert *models.Alert, scenarioTrust string) *models.AddSignals
 		StopAt:          alert.StopAt,
 		StopAt:          alert.StopAt,
 		CreatedAt:       alert.CreatedAt,
 		CreatedAt:       alert.CreatedAt,
 		MachineID:       alert.MachineID,
 		MachineID:       alert.MachineID,
-		ScenarioTrust:   &scenarioTrust,
+		ScenarioTrust:   scenarioTrust,
+	}
+	if shareContext {
+		signal.Context = make([]*models.AddSignalsRequestItemContextItems0, 0)
+		for _, meta := range alert.Meta {
+			contextItem := models.AddSignalsRequestItemContextItems0{
+				Key:   meta.Key,
+				Value: meta.Value,
+			}
+			signal.Context = append(signal.Context, &contextItem)
+		}
 	}
 	}
+	return signal
 }
 }
 
 
 func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig) (*apic, error) {
 func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig) (*apic, error) {
@@ -176,7 +187,7 @@ func (a *apic) Push() error {
 			var signals []*models.AddSignalsRequestItem
 			var signals []*models.AddSignalsRequestItem
 			for _, alert := range alerts {
 			for _, alert := range alerts {
 				if ok := shouldShareAlert(alert, a.consoleConfig); ok {
 				if ok := shouldShareAlert(alert, a.consoleConfig); ok {
-					signals = append(signals, alertToSignal(alert, getScenarioTrustOfAlert(alert)))
+					signals = append(signals, alertToSignal(alert, getScenarioTrustOfAlert(alert), *a.consoleConfig.ShareContext))
 				}
 				}
 			}
 			}
 			a.mu.Lock()
 			a.mu.Lock()

+ 1 - 0
pkg/apiserver/apic_test.go

@@ -58,6 +58,7 @@ func getAPIC(t *testing.T) *apic {
 			ShareManualDecisions:  types.BoolPtr(false),
 			ShareManualDecisions:  types.BoolPtr(false),
 			ShareTaintedScenarios: types.BoolPtr(false),
 			ShareTaintedScenarios: types.BoolPtr(false),
 			ShareCustomScenarios:  types.BoolPtr(false),
 			ShareCustomScenarios:  types.BoolPtr(false),
+			ShareContext:          types.BoolPtr(false),
 		},
 		},
 	}
 	}
 }
 }

+ 119 - 0
pkg/cache/cache.go

@@ -0,0 +1,119 @@
+package cache
+
+import (
+	"time"
+
+	"github.com/bluele/gcache"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/sirupsen/logrus"
+	log "github.com/sirupsen/logrus"
+)
+
+var Caches []gcache.Cache
+var CacheNames []string
+var CacheConfig []CacheCfg
+
+/*prometheus*/
+var CacheMetrics = prometheus.NewGaugeVec(
+	prometheus.GaugeOpts{
+		Name: "cs_cache_size",
+		Help: "Entries per cache.",
+	},
+	[]string{"name", "type"},
+)
+
+// UpdateCacheMetrics is called directly by the prom handler
+func UpdateCacheMetrics() {
+	CacheMetrics.Reset()
+	for i, name := range CacheNames {
+		CacheMetrics.With(prometheus.Labels{"name": name, "type": CacheConfig[i].Strategy}).Set(float64(Caches[i].Len(false)))
+	}
+}
+
+type CacheCfg struct {
+	Name     string
+	Size     int
+	TTL      time.Duration
+	Strategy string
+	LogLevel *log.Level
+	Logger   *log.Entry
+}
+
+func CacheInit(cfg CacheCfg) error {
+
+	for _, name := range CacheNames {
+		if name == cfg.Name {
+			log.Infof("Cache %s already exists", cfg.Name)
+		}
+	}
+	//get a default logger
+	if cfg.LogLevel == nil {
+		cfg.LogLevel = new(log.Level)
+		*cfg.LogLevel = log.InfoLevel
+	}
+	var clog = logrus.New()
+	if err := types.ConfigureLogger(clog); err != nil {
+		log.Fatalf("While creating cache logger : %s", err)
+	}
+	clog.SetLevel(*cfg.LogLevel)
+	cfg.Logger = clog.WithFields(log.Fields{
+		"cache": cfg.Name,
+	})
+
+	tmpCache := gcache.New(cfg.Size)
+	switch cfg.Strategy {
+	case "LRU":
+		tmpCache = tmpCache.LRU()
+	case "LFU":
+		tmpCache = tmpCache.LFU()
+	case "ARC":
+		tmpCache = tmpCache.ARC()
+	default:
+		cfg.Strategy = "LRU"
+		tmpCache = tmpCache.LRU()
+
+	}
+
+	CTICache := tmpCache.Build()
+	Caches = append(Caches, CTICache)
+	CacheNames = append(CacheNames, cfg.Name)
+	CacheConfig = append(CacheConfig, cfg)
+
+	return nil
+}
+
+func SetKey(cacheName string, key string, value string, expiration *time.Duration) error {
+
+	for i, name := range CacheNames {
+		if name == cacheName {
+			if expiration == nil {
+				expiration = &CacheConfig[i].TTL
+			}
+			CacheConfig[i].Logger.Debugf("Setting key %s to %s with expiration %v", key, value, *expiration)
+			if err := Caches[i].SetWithExpire(key, value, *expiration); err != nil {
+				CacheConfig[i].Logger.Warningf("While setting key %s in cache %s: %s", key, cacheName, err)
+			}
+		}
+	}
+	return nil
+}
+
+func GetKey(cacheName string, key string) (string, error) {
+	for i, name := range CacheNames {
+		if name == cacheName {
+			if value, err := Caches[i].Get(key); err != nil {
+				//do not warn or log if key not found
+				if err == gcache.KeyNotFoundError {
+					return "", nil
+				}
+				CacheConfig[i].Logger.Warningf("While getting key %s in cache %s: %s", key, cacheName, err)
+				return "", err
+			} else {
+				return value.(string), nil
+			}
+		}
+	}
+	log.Warningf("Cache %s not found", cacheName)
+	return "", nil
+}

+ 30 - 0
pkg/cache/cache_test.go

@@ -0,0 +1,30 @@
+package cache
+
+import (
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestCreateSetGet(t *testing.T) {
+	err := CacheInit(CacheCfg{Name: "test", Size: 100, TTL: 1 * time.Second})
+	assert.Empty(t, err)
+	//set & get
+	err = SetKey("test", "testkey0", "testvalue1", nil)
+	assert.Empty(t, err)
+
+	ret, err := GetKey("test", "testkey0")
+	assert.Equal(t, "testvalue1", ret)
+	assert.Empty(t, err)
+	//re-set
+	err = SetKey("test", "testkey0", "testvalue2", nil)
+	assert.Empty(t, err)
+	assert.Equal(t, "testvalue1", ret)
+	assert.Empty(t, err)
+	//expire
+	time.Sleep(1500 * time.Millisecond)
+	ret, err = GetKey("test", "testkey0")
+	assert.Equal(t, "", ret)
+	assert.Empty(t, err)
+}

+ 11 - 8
pkg/csconfig/api.go

@@ -82,7 +82,7 @@ func (l *LocalApiClientCfg) Load() error {
 		}
 		}
 	}
 	}
 
 
-	if l.Credentials.Login != "" && (l.Credentials.CACertPath != "" || l.Credentials.CertPath != "" || l.Credentials.KeyPath != "") {
+	if l.Credentials.Login != "" && (l.Credentials.CertPath != "" || l.Credentials.KeyPath != "") {
 		return fmt.Errorf("user/password authentication and TLS authentication are mutually exclusive")
 		return fmt.Errorf("user/password authentication and TLS authentication are mutually exclusive")
 	}
 	}
 
 
@@ -92,12 +92,7 @@ func (l *LocalApiClientCfg) Load() error {
 		apiclient.InsecureSkipVerify = *l.InsecureSkipVerify
 		apiclient.InsecureSkipVerify = *l.InsecureSkipVerify
 	}
 	}
 
 
-	if l.Credentials.CACertPath != "" && l.Credentials.CertPath != "" && l.Credentials.KeyPath != "" {
-		cert, err := tls.LoadX509KeyPair(l.Credentials.CertPath, l.Credentials.KeyPath)
-		if err != nil {
-			return errors.Wrapf(err, "failed to load api client certificate")
-		}
-
+	if l.Credentials.CACertPath != ""  {
 		caCert, err := os.ReadFile(l.Credentials.CACertPath)
 		caCert, err := os.ReadFile(l.Credentials.CACertPath)
 		if err != nil {
 		if err != nil {
 			return errors.Wrapf(err, "failed to load cacert")
 			return errors.Wrapf(err, "failed to load cacert")
@@ -105,10 +100,18 @@ func (l *LocalApiClientCfg) Load() error {
 
 
 		caCertPool := x509.NewCertPool()
 		caCertPool := x509.NewCertPool()
 		caCertPool.AppendCertsFromPEM(caCert)
 		caCertPool.AppendCertsFromPEM(caCert)
+		apiclient.CaCertPool = caCertPool
+	}
+
+	if l.Credentials.CertPath != "" && l.Credentials.KeyPath != "" {
+		cert, err := tls.LoadX509KeyPair(l.Credentials.CertPath, l.Credentials.KeyPath)
+		if err != nil {
+			return errors.Wrapf(err, "failed to load api client certificate")
+		}
 
 
 		apiclient.Cert = &cert
 		apiclient.Cert = &cert
-		apiclient.CaCertPool = caCertPool
 	}
 	}
+
 	return nil
 	return nil
 }
 }
 
 

+ 1 - 0
pkg/csconfig/api_test.go

@@ -213,6 +213,7 @@ func TestLoadAPIServer(t *testing.T) {
 					ShareManualDecisions:  types.BoolPtr(false),
 					ShareManualDecisions:  types.BoolPtr(false),
 					ShareTaintedScenarios: types.BoolPtr(true),
 					ShareTaintedScenarios: types.BoolPtr(true),
 					ShareCustomScenarios:  types.BoolPtr(true),
 					ShareCustomScenarios:  types.BoolPtr(true),
+					ShareContext:          types.BoolPtr(false),
 				},
 				},
 				LogDir:   LogDirFullPath,
 				LogDir:   LogDirFullPath,
 				LogMedia: "stdout",
 				LogMedia: "stdout",

+ 2 - 1
pkg/csconfig/config.go

@@ -46,8 +46,9 @@ func (c *Config) Dump() error {
 	return nil
 	return nil
 }
 }
 
 
-func NewConfig(configFile string, disableAgent bool, disableAPI bool) (*Config, error) {
+func NewConfig(configFile string, disableAgent bool, disableAPI bool, quiet bool) (*Config, error) {
 	patcher := yamlpatch.NewPatcher(configFile, ".local")
 	patcher := yamlpatch.NewPatcher(configFile, ".local")
+	patcher.SetQuiet(quiet)
 	fcontent, err := patcher.MergedPatchContent()
 	fcontent, err := patcher.MergedPatchContent()
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err

+ 3 - 3
pkg/csconfig/config_test.go

@@ -10,13 +10,13 @@ import (
 )
 )
 
 
 func TestNormalLoad(t *testing.T) {
 func TestNormalLoad(t *testing.T) {
-	_, err := NewConfig("./tests/config.yaml", false, false)
+	_, err := NewConfig("./tests/config.yaml", false, false, false)
 	require.NoError(t, err)
 	require.NoError(t, err)
 
 
-	_, err = NewConfig("./tests/xxx.yaml", false, false)
+	_, err = NewConfig("./tests/xxx.yaml", false, false, false)
 	assert.EqualError(t, err, "while reading yaml file: open ./tests/xxx.yaml: "+cstest.FileNotFoundMessage)
 	assert.EqualError(t, err, "while reading yaml file: open ./tests/xxx.yaml: "+cstest.FileNotFoundMessage)
 
 
-	_, err = NewConfig("./tests/simulation.yaml", false, false)
+	_, err = NewConfig("./tests/simulation.yaml", false, false, false)
 	assert.EqualError(t, err, "./tests/simulation.yaml: yaml: unmarshal errors:\n  line 1: field simulation not found in type csconfig.Config")
 	assert.EqualError(t, err, "./tests/simulation.yaml: yaml: unmarshal errors:\n  line 1: field simulation not found in type csconfig.Config")
 }
 }
 
 

+ 10 - 1
pkg/csconfig/console.go

@@ -14,9 +14,10 @@ const (
 	SEND_CUSTOM_SCENARIOS  = "custom"
 	SEND_CUSTOM_SCENARIOS  = "custom"
 	SEND_TAINTED_SCENARIOS = "tainted"
 	SEND_TAINTED_SCENARIOS = "tainted"
 	SEND_MANUAL_SCENARIOS  = "manual"
 	SEND_MANUAL_SCENARIOS  = "manual"
+	SEND_CONTEXT           = "context"
 )
 )
 
 
-var CONSOLE_CONFIGS = []string{SEND_CUSTOM_SCENARIOS, SEND_MANUAL_SCENARIOS, SEND_TAINTED_SCENARIOS}
+var CONSOLE_CONFIGS = []string{SEND_CUSTOM_SCENARIOS, SEND_MANUAL_SCENARIOS, SEND_TAINTED_SCENARIOS, SEND_CONTEXT}
 
 
 var DefaultConsoleConfigFilePath = DefaultConfigPath("console.yaml")
 var DefaultConsoleConfigFilePath = DefaultConfigPath("console.yaml")
 
 
@@ -24,6 +25,7 @@ type ConsoleConfig struct {
 	ShareManualDecisions  *bool `yaml:"share_manual_decisions"`
 	ShareManualDecisions  *bool `yaml:"share_manual_decisions"`
 	ShareTaintedScenarios *bool `yaml:"share_tainted"`
 	ShareTaintedScenarios *bool `yaml:"share_tainted"`
 	ShareCustomScenarios  *bool `yaml:"share_custom"`
 	ShareCustomScenarios  *bool `yaml:"share_custom"`
+	ShareContext          *bool `yaml:"share_context"`
 }
 }
 
 
 func (c *LocalApiServerCfg) LoadConsoleConfig() error {
 func (c *LocalApiServerCfg) LoadConsoleConfig() error {
@@ -33,6 +35,7 @@ func (c *LocalApiServerCfg) LoadConsoleConfig() error {
 		c.ConsoleConfig.ShareCustomScenarios = types.BoolPtr(true)
 		c.ConsoleConfig.ShareCustomScenarios = types.BoolPtr(true)
 		c.ConsoleConfig.ShareTaintedScenarios = types.BoolPtr(true)
 		c.ConsoleConfig.ShareTaintedScenarios = types.BoolPtr(true)
 		c.ConsoleConfig.ShareManualDecisions = types.BoolPtr(false)
 		c.ConsoleConfig.ShareManualDecisions = types.BoolPtr(false)
+		c.ConsoleConfig.ShareContext = types.BoolPtr(false)
 		return nil
 		return nil
 	}
 	}
 
 
@@ -57,6 +60,12 @@ func (c *LocalApiServerCfg) LoadConsoleConfig() error {
 		log.Debugf("no share_manual scenarios found, setting to false")
 		log.Debugf("no share_manual scenarios found, setting to false")
 		c.ConsoleConfig.ShareManualDecisions = types.BoolPtr(false)
 		c.ConsoleConfig.ShareManualDecisions = types.BoolPtr(false)
 	}
 	}
+
+	if c.ConsoleConfig.ShareContext == nil {
+		log.Debugf("no 'context' found, setting to false")
+		c.ConsoleConfig.ShareContext = types.BoolPtr(false)
+	}
+
 	log.Debugf("Console configuration '%s' loaded successfully", c.ConsoleConfigPath)
 	log.Debugf("Console configuration '%s' loaded successfully", c.ConsoleConfigPath)
 
 
 	return nil
 	return nil

+ 67 - 19
pkg/csconfig/crowdsec_service.go

@@ -7,31 +7,34 @@ import (
 
 
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	log "github.com/sirupsen/logrus"
 	log "github.com/sirupsen/logrus"
+	"gopkg.in/yaml.v2"
 
 
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 )
 )
 
 
 // CrowdsecServiceCfg contains the location of parsers/scenarios/... and acquisition files
 // CrowdsecServiceCfg contains the location of parsers/scenarios/... and acquisition files
 type CrowdsecServiceCfg struct {
 type CrowdsecServiceCfg struct {
-	Enable              *bool  `yaml:"enable"`
-	AcquisitionFilePath string `yaml:"acquisition_path,omitempty"`
-	AcquisitionDirPath  string `yaml:"acquisition_dir,omitempty"`
-
-	AcquisitionFiles     []string          `yaml:"-"`
-	ParserRoutinesCount  int               `yaml:"parser_routines"`
-	BucketsRoutinesCount int               `yaml:"buckets_routines"`
-	OutputRoutinesCount  int               `yaml:"output_routines"`
-	SimulationConfig     *SimulationConfig `yaml:"-"`
-	LintOnly             bool              `yaml:"-"`                          // if set to true, exit after loading configs
-	BucketStateFile      string            `yaml:"state_input_file,omitempty"` // if we need to unserialize buckets at start
-	BucketStateDumpDir   string            `yaml:"state_output_dir,omitempty"` // if we need to unserialize buckets on shutdown
-	BucketsGCEnabled     bool              `yaml:"-"`                          // we need to garbage collect buckets when in forensic mode
-
-	HubDir             string `yaml:"-"`
-	DataDir            string `yaml:"-"`
-	ConfigDir          string `yaml:"-"`
-	HubIndexFile       string `yaml:"-"`
-	SimulationFilePath string `yaml:"-"`
+	Enable                    *bool             `yaml:"enable"`
+	AcquisitionFilePath       string            `yaml:"acquisition_path,omitempty"`
+	AcquisitionDirPath        string            `yaml:"acquisition_dir,omitempty"`
+	ConsoleContextPath        string            `yaml:"console_context_path"`
+	ConsoleContextValueLength int               `yaml:"console_context_value_length"`
+	AcquisitionFiles          []string          `yaml:"-"`
+	ParserRoutinesCount       int               `yaml:"parser_routines"`
+	BucketsRoutinesCount      int               `yaml:"buckets_routines"`
+	OutputRoutinesCount       int               `yaml:"output_routines"`
+	SimulationConfig          *SimulationConfig `yaml:"-"`
+	LintOnly                  bool              `yaml:"-"`                          // if set to true, exit after loading configs
+	BucketStateFile           string            `yaml:"state_input_file,omitempty"` // if we need to unserialize buckets at start
+	BucketStateDumpDir        string            `yaml:"state_output_dir,omitempty"` // if we need to unserialize buckets on shutdown
+	BucketsGCEnabled          bool              `yaml:"-"`                          // we need to garbage collect buckets when in forensic mode
+
+	HubDir             string              `yaml:"-"`
+	DataDir            string              `yaml:"-"`
+	ConfigDir          string              `yaml:"-"`
+	HubIndexFile       string              `yaml:"-"`
+	SimulationFilePath string              `yaml:"-"`
+	ContextToSend      map[string][]string `yaml:"-"`
 }
 }
 
 
 func (c *Config) LoadCrowdsec() error {
 func (c *Config) LoadCrowdsec() error {
@@ -152,5 +155,50 @@ func (c *Config) LoadCrowdsec() error {
 		return errors.Wrap(err, "while loading hub")
 		return errors.Wrap(err, "while loading hub")
 	}
 	}
 
 
+	c.Crowdsec.ContextToSend = make(map[string][]string, 0)
+	fallback := false
+	if c.Crowdsec.ConsoleContextPath == "" {
+		// fallback to default config file
+		c.Crowdsec.ConsoleContextPath = filepath.Join(c.Crowdsec.ConfigDir, "console", "context.yaml")
+		fallback = true
+	}
+
+	f, err := filepath.Abs(c.Crowdsec.ConsoleContextPath)
+	if err != nil {
+		return fmt.Errorf("fail to get absolute path of %s: %s", c.Crowdsec.ConsoleContextPath, err)
+	}
+
+	c.Crowdsec.ConsoleContextPath = f
+	yamlFile, err := os.ReadFile(c.Crowdsec.ConsoleContextPath)
+	if err != nil {
+		if fallback {
+			log.Debugf("Default context config file doesn't exist, will not use it")
+		} else {
+			return fmt.Errorf("failed to open context file: %s", err)
+		}
+	} else {
+		err = yaml.Unmarshal(yamlFile, c.Crowdsec.ContextToSend)
+		if err != nil {
+			return fmt.Errorf("unmarshaling labels console config file '%s': %s", c.Crowdsec.ConsoleContextPath, err)
+		}
+	}
+
+	return nil
+}
+
+func (c *CrowdsecServiceCfg) DumpContextConfigFile() error {
+	var out []byte
+	var err error
+
+	if out, err = yaml.Marshal(c.ContextToSend); err != nil {
+		return errors.Wrapf(err, "while marshaling ConsoleConfig (for %s)", c.ConsoleContextPath)
+	}
+
+	if err := os.WriteFile(c.ConsoleContextPath, out, 0600); err != nil {
+		return errors.Wrapf(err, "while dumping console config to %s", c.ConsoleContextPath)
+	}
+
+	log.Infof("%s file saved", c.ConsoleContextPath)
+
 	return nil
 	return nil
 }
 }

+ 64 - 39
pkg/csconfig/crowdsec_service_test.go

@@ -33,6 +33,9 @@ func TestLoadCrowdsec(t *testing.T) {
 	hubIndexFileFullPath, err := filepath.Abs("./hub/.index.json")
 	hubIndexFileFullPath, err := filepath.Abs("./hub/.index.json")
 	require.NoError(t, err)
 	require.NoError(t, err)
 
 
+	contextFileFullPath, err := filepath.Abs("./tests/context.yaml")
+	require.NoError(t, err)
+
 	tests := []struct {
 	tests := []struct {
 		name           string
 		name           string
 		input          *Config
 		input          *Config
@@ -53,23 +56,30 @@ func TestLoadCrowdsec(t *testing.T) {
 					},
 					},
 				},
 				},
 				Crowdsec: &CrowdsecServiceCfg{
 				Crowdsec: &CrowdsecServiceCfg{
-					AcquisitionFilePath: "./tests/acquis.yaml",
-					SimulationFilePath:  "./tests/simulation.yaml",
+					AcquisitionFilePath:       "./tests/acquis.yaml",
+					SimulationFilePath:        "./tests/simulation.yaml",
+					ConsoleContextPath:        "./tests/context.yaml",
+					ConsoleContextValueLength: 2500,
 				},
 				},
 			},
 			},
 			expectedResult: &CrowdsecServiceCfg{
 			expectedResult: &CrowdsecServiceCfg{
-				Enable:               types.BoolPtr(true),
-				AcquisitionDirPath:   "",
-				AcquisitionFilePath:  acquisFullPath,
-				ConfigDir:            configDirFullPath,
-				DataDir:              dataFullPath,
-				HubDir:               hubFullPath,
-				HubIndexFile:         hubIndexFileFullPath,
-				BucketsRoutinesCount: 1,
-				ParserRoutinesCount:  1,
-				OutputRoutinesCount:  1,
-				AcquisitionFiles:     []string{acquisFullPath},
-				SimulationFilePath:   "./tests/simulation.yaml",
+				Enable:                    types.BoolPtr(true),
+				AcquisitionDirPath:        "",
+				ConsoleContextPath:        contextFileFullPath,
+				AcquisitionFilePath:       acquisFullPath,
+				ConfigDir:                 configDirFullPath,
+				DataDir:                   dataFullPath,
+				HubDir:                    hubFullPath,
+				HubIndexFile:              hubIndexFileFullPath,
+				BucketsRoutinesCount:      1,
+				ParserRoutinesCount:       1,
+				OutputRoutinesCount:       1,
+				ConsoleContextValueLength: 2500,
+				AcquisitionFiles:          []string{acquisFullPath},
+				SimulationFilePath:        "./tests/simulation.yaml",
+				ContextToSend: map[string][]string{
+					"source_ip": {"evt.Parsed.source_ip"},
+				},
 				SimulationConfig: &SimulationConfig{
 				SimulationConfig: &SimulationConfig{
 					Simulation: &falseBoolPtr,
 					Simulation: &falseBoolPtr,
 				},
 				},
@@ -92,21 +102,27 @@ func TestLoadCrowdsec(t *testing.T) {
 					AcquisitionFilePath: "./tests/acquis.yaml",
 					AcquisitionFilePath: "./tests/acquis.yaml",
 					AcquisitionDirPath:  "./tests/acquis/",
 					AcquisitionDirPath:  "./tests/acquis/",
 					SimulationFilePath:  "./tests/simulation.yaml",
 					SimulationFilePath:  "./tests/simulation.yaml",
+					ConsoleContextPath:  "./tests/context.yaml",
 				},
 				},
 			},
 			},
 			expectedResult: &CrowdsecServiceCfg{
 			expectedResult: &CrowdsecServiceCfg{
-				Enable:               types.BoolPtr(true),
-				AcquisitionDirPath:   acquisDirFullPath,
-				AcquisitionFilePath:  acquisFullPath,
-				ConfigDir:            configDirFullPath,
-				HubIndexFile:         hubIndexFileFullPath,
-				DataDir:              dataFullPath,
-				HubDir:               hubFullPath,
-				BucketsRoutinesCount: 1,
-				ParserRoutinesCount:  1,
-				OutputRoutinesCount:  1,
-				AcquisitionFiles:     []string{acquisFullPath, acquisInDirFullPath},
-				SimulationFilePath:   "./tests/simulation.yaml",
+				Enable:                    types.BoolPtr(true),
+				AcquisitionDirPath:        acquisDirFullPath,
+				AcquisitionFilePath:       acquisFullPath,
+				ConsoleContextPath:        contextFileFullPath,
+				ConfigDir:                 configDirFullPath,
+				HubIndexFile:              hubIndexFileFullPath,
+				DataDir:                   dataFullPath,
+				HubDir:                    hubFullPath,
+				BucketsRoutinesCount:      1,
+				ParserRoutinesCount:       1,
+				OutputRoutinesCount:       1,
+				ConsoleContextValueLength: 0,
+				AcquisitionFiles:          []string{acquisFullPath, acquisInDirFullPath},
+				ContextToSend: map[string][]string{
+					"source_ip": {"evt.Parsed.source_ip"},
+				},
+				SimulationFilePath: "./tests/simulation.yaml",
 				SimulationConfig: &SimulationConfig{
 				SimulationConfig: &SimulationConfig{
 					Simulation: &falseBoolPtr,
 					Simulation: &falseBoolPtr,
 				},
 				},
@@ -125,21 +141,29 @@ func TestLoadCrowdsec(t *testing.T) {
 						CredentialsFilePath: "./tests/lapi-secrets.yaml",
 						CredentialsFilePath: "./tests/lapi-secrets.yaml",
 					},
 					},
 				},
 				},
-				Crowdsec: &CrowdsecServiceCfg{},
+				Crowdsec: &CrowdsecServiceCfg{
+					ConsoleContextPath:        contextFileFullPath,
+					ConsoleContextValueLength: 10,
+				},
 			},
 			},
 			expectedResult: &CrowdsecServiceCfg{
 			expectedResult: &CrowdsecServiceCfg{
-				Enable:               types.BoolPtr(true),
-				AcquisitionDirPath:   "",
-				AcquisitionFilePath:  "",
-				ConfigDir:            configDirFullPath,
-				HubIndexFile:         hubIndexFileFullPath,
-				DataDir:              dataFullPath,
-				HubDir:               hubFullPath,
-				BucketsRoutinesCount: 1,
-				ParserRoutinesCount:  1,
-				OutputRoutinesCount:  1,
-				AcquisitionFiles:     []string{},
-				SimulationFilePath:   "",
+				Enable:                    types.BoolPtr(true),
+				AcquisitionDirPath:        "",
+				AcquisitionFilePath:       "",
+				ConfigDir:                 configDirFullPath,
+				HubIndexFile:              hubIndexFileFullPath,
+				DataDir:                   dataFullPath,
+				HubDir:                    hubFullPath,
+				ConsoleContextPath:        contextFileFullPath,
+				BucketsRoutinesCount:      1,
+				ParserRoutinesCount:       1,
+				OutputRoutinesCount:       1,
+				ConsoleContextValueLength: 10,
+				AcquisitionFiles:          []string{},
+				SimulationFilePath:        "",
+				ContextToSend: map[string][]string{
+					"source_ip": {"evt.Parsed.source_ip"},
+				},
 				SimulationConfig: &SimulationConfig{
 				SimulationConfig: &SimulationConfig{
 					Simulation: &falseBoolPtr,
 					Simulation: &falseBoolPtr,
 				},
 				},
@@ -159,6 +183,7 @@ func TestLoadCrowdsec(t *testing.T) {
 					},
 					},
 				},
 				},
 				Crowdsec: &CrowdsecServiceCfg{
 				Crowdsec: &CrowdsecServiceCfg{
+					ConsoleContextPath:  "",
 					AcquisitionFilePath: "./tests/acquis_not_exist.yaml",
 					AcquisitionFilePath: "./tests/acquis_not_exist.yaml",
 				},
 				},
 			},
 			},

+ 2 - 0
pkg/csconfig/tests/context.yaml

@@ -0,0 +1,2 @@
+source_ip:
+ - evt.Parsed.source_ip

+ 50 - 0
pkg/cstest/utils.go

@@ -7,6 +7,8 @@ import (
 
 
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 	"github.com/stretchr/testify/require"
+
+	logtest "github.com/sirupsen/logrus/hooks/test"
 )
 )
 
 
 func AssertErrorContains(t *testing.T, err error, expectedErr string) {
 func AssertErrorContains(t *testing.T, err error, expectedErr string) {
@@ -20,6 +22,21 @@ func AssertErrorContains(t *testing.T, err error, expectedErr string) {
 	assert.NoError(t, err)
 	assert.NoError(t, err)
 }
 }
 
 
+func AssertErrorMessage(t *testing.T, err error, expectedErr string) {
+	t.Helper()
+
+	if expectedErr != "" {
+		errmsg := ""
+		if err != nil {
+			errmsg = err.Error()
+		}
+		assert.Equal(t, expectedErr, errmsg)
+		return
+	}
+
+	require.NoError(t, err)
+}
+
 func RequireErrorContains(t *testing.T, err error, expectedErr string) {
 func RequireErrorContains(t *testing.T, err error, expectedErr string) {
 	t.Helper()
 	t.Helper()
 
 
@@ -31,6 +48,39 @@ func RequireErrorContains(t *testing.T, err error, expectedErr string) {
 	require.NoError(t, err)
 	require.NoError(t, err)
 }
 }
 
 
+func RequireErrorMessage(t *testing.T, err error, expectedErr string) {
+	t.Helper()
+
+	if expectedErr != "" {
+		errmsg := ""
+		if err != nil {
+			errmsg = err.Error()
+		}
+		require.Equal(t, expectedErr, errmsg)
+		return
+	}
+
+	require.NoError(t, err)
+}
+
+func RequireLogContains(t *testing.T, hook *logtest.Hook, expected string) {
+	t.Helper()
+
+	// look for a log entry that matches the expected message
+	for _, entry := range hook.AllEntries() {
+		if strings.Contains(entry.Message, expected) {
+			return
+		}
+	}
+
+	// show all hook entries, in case the test fails we'll need them
+	for _, entry := range hook.AllEntries() {
+		t.Logf("log entry: %s", entry.Message)
+	}
+
+	require.Fail(t, "no log entry found with message", expected)
+}
+
 // Interpolate fills a string template with the given values, can be map or struct.
 // Interpolate fills a string template with the given values, can be map or struct.
 // example: Interpolate("{{.Name}}", map[string]string{"Name": "JohnDoe"})
 // example: Interpolate("{{.Name}}", map[string]string{"Name": "JohnDoe"})
 func Interpolate(s string, data interface{}) (string, error) {
 func Interpolate(s string, data interface{}) (string, error) {

+ 24 - 2
pkg/exprhelpers/exprlib.go

@@ -14,6 +14,7 @@ import (
 
 
 	"github.com/c-robinson/iplib"
 	"github.com/c-robinson/iplib"
 
 
+	"github.com/crowdsecurity/crowdsec/pkg/cache"
 	"github.com/crowdsecurity/crowdsec/pkg/database"
 	"github.com/crowdsecurity/crowdsec/pkg/database"
 	"github.com/davecgh/go-spew/spew"
 	"github.com/davecgh/go-spew/spew"
 	log "github.com/sirupsen/logrus"
 	log "github.com/sirupsen/logrus"
@@ -68,6 +69,9 @@ func GetExprEnv(ctx map[string]interface{}) map[string]interface{} {
 		"GetDecisionsCount":      GetDecisionsCount,
 		"GetDecisionsCount":      GetDecisionsCount,
 		"GetDecisionsSinceCount": GetDecisionsSinceCount,
 		"GetDecisionsSinceCount": GetDecisionsSinceCount,
 		"Sprintf":                fmt.Sprintf,
 		"Sprintf":                fmt.Sprintf,
+		"ParseUnix":              ParseUnix,
+		"GetFromStash":           cache.GetKey,
+		"SetInStash":             cache.SetKey,
 	}
 	}
 	for k, v := range ctx {
 	for k, v := range ctx {
 		ExprLib[k] = v
 		ExprLib[k] = v
@@ -283,10 +287,28 @@ func GetDecisionsSinceCount(value string, since string) int {
 }
 }
 
 
 func LookupHost(value string) []string {
 func LookupHost(value string) []string {
-	addresses , err := net.LookupHost(value)
+	addresses, err := net.LookupHost(value)
 	if err != nil {
 	if err != nil {
 		log.Errorf("Failed to lookup host '%s' : %s", value, err)
 		log.Errorf("Failed to lookup host '%s' : %s", value, err)
-		return []string{} 
+		return []string{}
 	}
 	}
 	return addresses
 	return addresses
 }
 }
+
+func ParseUnixTime(value string) (time.Time, error) {
+	//Splitting string here as some unix timestamp may have milliseconds and break ParseInt
+	i, err := strconv.ParseInt(strings.Split(value, ".")[0], 10, 64)
+	if err != nil || i <= 0 {
+		return time.Time{}, fmt.Errorf("unable to parse %s as unix timestamp", value)
+	}
+	return time.Unix(i, 0), nil
+}
+
+func ParseUnix(value string) string {
+	t, err := ParseUnixTime(value)
+	if err != nil {
+		log.Error(err)
+		return ""
+	}
+	return t.Format(time.RFC3339)
+}

+ 45 - 0
pkg/exprhelpers/exprlib_test.go

@@ -12,6 +12,7 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/database"
 	"github.com/crowdsecurity/crowdsec/pkg/database"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
+	"github.com/crowdsecurity/crowdsec/pkg/cstest"
 	log "github.com/sirupsen/logrus"
 	log "github.com/sirupsen/logrus"
 
 
 	"testing"
 	"testing"
@@ -971,3 +972,47 @@ func TestGetDecisionsSinceCount(t *testing.T) {
 		log.Printf("test '%s' : OK", test.name)
 		log.Printf("test '%s' : OK", test.name)
 	}
 	}
 }
 }
+
+func TestParseUnixTime(t *testing.T) {
+	tests := []struct {
+		name        string
+		value       string
+		expected    time.Time
+		expectedErr string
+	}{
+		{
+			name: "ParseUnix() test: valid value with milli",
+			value: "1672239773.3590894",
+			expected: time.Date(2022, 12, 28, 15, 02, 53, 0, time.UTC),
+		},
+		{
+			name: "ParseUnix() test: valid value without milli",
+			value: "1672239773",
+			expected: time.Date(2022, 12, 28, 15, 02, 53, 0, time.UTC),
+		},
+		{
+			name: "ParseUnix() test: invalid input",
+			value: "AbcDefG!#",
+			expected: time.Time{},
+			expectedErr: "unable to parse AbcDefG!# as unix timestamp",
+		},
+		{
+			name: "ParseUnix() test: negative value",
+			value: "-1000",
+			expected: time.Time{},
+			expectedErr: "unable to parse -1000 as unix timestamp",
+		},
+	}
+
+	for _, tc := range tests {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			output, err := ParseUnixTime(tc.value)
+			cstest.RequireErrorContains(t, err, tc.expectedErr)
+			if tc.expectedErr != "" {
+				return
+			}
+			require.WithinDuration(t, tc.expected, output, time.Second)
+		})
+	}
+}

+ 19 - 0
pkg/fflag/crowdsec.go

@@ -0,0 +1,19 @@
+package fflag
+
+var Crowdsec = FeatureRegister{EnvPrefix: "CROWDSEC_FEATURE_"}
+
+var CscliSetup = &Feature{Name: "cscli_setup"}
+var DisableHttpRetryBackoff = &Feature{Name: "disable_http_retry_backoff", Description: "Disable http retry backoff"}
+
+func RegisterAllFeatures() error {
+	err := Crowdsec.RegisterFeature(CscliSetup)
+	if err != nil {
+		return err
+	}
+	err = Crowdsec.RegisterFeature(DisableHttpRetryBackoff)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}

+ 264 - 0
pkg/fflag/features.go

@@ -0,0 +1,264 @@
+// Package fflag provides a simple feature flag system.
+//
+// Feature names are lowercase and can only contain letters, numbers, undercores
+// and dots.
+//
+// good: "foo", "foo_bar", "foo.bar"
+// bad: "Foo", "foo-bar"
+//
+// A feature flag can be enabled by the user with an environment variable
+// or by adding it to {ConfigDir}/feature.yaml
+//
+// I.e. CROWDSEC_FEATURE_FOO_BAR=true
+// or in feature.yaml:
+// ---
+// - foo_bar
+//
+// If the variable is set to false, the feature can still be enabled
+// in feature.yaml. Features cannot be disabled in the file.
+//
+// A feature flag can be deprecated or retired. A deprecated feature flag is
+// still accepted but a warning is logged. A retired feature flag is ignored
+// and an error is logged.
+//
+// A specific deprecation message is used to inform the user of the behavior
+// that has been decided when the flag is/was finally retired.
+
+package fflag
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"regexp"
+	"sort"
+	"strings"
+
+	"github.com/goccy/go-yaml"
+	"github.com/sirupsen/logrus"
+)
+
+var (
+	ErrFeatureNameEmpty   = errors.New("name is empty")
+	ErrFeatureNameCase    = errors.New("name is not lowercase")
+	ErrFeatureNameInvalid = errors.New("invalid name (allowed a-z, 0-9, _, .)")
+	ErrFeatureUnknown     = errors.New("unknown feature")
+	ErrFeatureDeprecated  = errors.New("the flag is deprecated")
+	ErrFeatureRetired     = errors.New("the flag is retired")
+)
+
+const (
+	ActiveState     = iota // the feature can be enabled, and its description is logged (Info)
+	DeprecatedState        // the feature can be enabled, and a deprecation message is logged (Warning)
+	RetiredState           // the feature is ignored and a deprecation message is logged (Error)
+)
+
+type Feature struct {
+	Name  string
+	State int // active, deprecated, retired
+
+	// Description should be a short sentence, explaining the feature.
+	Description string
+
+	// DeprecationMessage is used to inform the user of the behavior that has
+	// been decided when the flag is/was finally retired.
+	DeprecationMsg string
+
+	enabled bool
+}
+
+func (f *Feature) IsEnabled() bool {
+	return f.enabled
+}
+
+// Set enables or disables a feature flag
+// It should not be called directly by the user, but by SetFromEnv or SetFromYaml
+func (f *Feature) Set(value bool) error {
+	// retired feature flags are ignored
+	if f.State == RetiredState {
+		return ErrFeatureRetired
+	}
+
+	f.enabled = value
+
+	// deprecated feature flags are still accepted, but a warning is triggered.
+	// We return an error but set the feature anyway.
+	if f.State == DeprecatedState {
+		return ErrFeatureDeprecated
+	}
+
+	return nil
+}
+
+// A register allows to enable features from the environment or a file
+type FeatureRegister struct {
+	EnvPrefix string
+	features  map[string]*Feature
+}
+
+var featureNameRexp = regexp.MustCompile(`^[a-z0-9_\.]+$`)
+
+func validateFeatureName(featureName string) error {
+	if featureName == "" {
+		return ErrFeatureNameEmpty
+	}
+
+	if featureName != strings.ToLower(featureName) {
+		return ErrFeatureNameCase
+	}
+
+	if !featureNameRexp.MatchString(featureName) {
+		return ErrFeatureNameInvalid
+	}
+
+	return nil
+}
+
+func (fr *FeatureRegister) RegisterFeature(feat *Feature) error {
+	if err := validateFeatureName(feat.Name); err != nil {
+		return fmt.Errorf("feature flag '%s': %w", feat.Name, err)
+	}
+
+	if fr.features == nil {
+		fr.features = make(map[string]*Feature)
+	}
+
+	fr.features[feat.Name] = feat
+
+	return nil
+}
+
+func (fr *FeatureRegister) GetFeature(featureName string) (*Feature, error) {
+	feat, ok := fr.features[featureName]
+	if !ok {
+		return feat, ErrFeatureUnknown
+	}
+
+	return feat, nil
+}
+
+func (fr *FeatureRegister) SetFromEnv(logger *logrus.Logger) error {
+	for _, e := range os.Environ() {
+		// ignore non-feature variables
+		if !strings.HasPrefix(e, fr.EnvPrefix) {
+			continue
+		}
+
+		// extract feature name and value
+		pair := strings.SplitN(e, "=", 2)
+		varName := pair[0]
+		featureName := strings.ToLower(varName[len(fr.EnvPrefix):])
+		value := pair[1]
+
+		var enable bool
+
+		switch value {
+		case "true":
+			enable = true
+		case "false":
+			enable = false
+		default:
+			logger.Errorf("Ignored envvar %s=%s: invalid value (must be 'true' or 'false')", varName, value)
+			continue
+		}
+
+		feat, err := fr.GetFeature(featureName)
+		if err != nil {
+			logger.Errorf("Ignored envvar '%s': %s.", varName, err)
+			continue
+		}
+
+		err = feat.Set(enable)
+
+		switch {
+		case errors.Is(err, ErrFeatureRetired):
+			logger.Errorf("Ignored envvar '%s': %s. %s", varName, err, feat.DeprecationMsg)
+			continue
+		case errors.Is(err, ErrFeatureDeprecated):
+			logger.Warningf("Envvar '%s': %s. %s", varName, err, feat.DeprecationMsg)
+		case err != nil:
+			return err
+		}
+
+		logger.Infof("Feature flag: %s=%t (from envvar). %s", featureName, enable, feat.Description)
+	}
+
+	return nil
+}
+
+func (fr *FeatureRegister) SetFromYaml(r io.Reader, logger *logrus.Logger) error {
+	var cfg []string
+
+	bys, err := io.ReadAll(r)
+	if err != nil {
+		return err
+	}
+
+	// parse config file
+	if err := yaml.Unmarshal(bys, &cfg); err != nil {
+		if !errors.Is(err, io.EOF) {
+			return fmt.Errorf("failed to parse feature flags: %w", err)
+		}
+
+		logger.Debug("No feature flags in config file")
+	}
+
+	// set features
+	for _, k := range cfg {
+		feat, err := fr.GetFeature(k)
+		if err != nil {
+			logger.Errorf("Ignored feature flag '%s': %s", k, err)
+			continue
+		}
+
+		err = feat.Set(true)
+
+		switch {
+		case errors.Is(err, ErrFeatureRetired):
+			logger.Errorf("Ignored feature flag '%s': %s. %s", k, err, feat.DeprecationMsg)
+			continue
+		case errors.Is(err, ErrFeatureDeprecated):
+			logger.Warningf("Feature '%s': %s. %s", k, err, feat.DeprecationMsg)
+		case err != nil:
+			return err
+		}
+
+		logger.Infof("Feature flag: %s=true (from config file). %s", k, feat.Description)
+	}
+
+	return nil
+}
+
+func (fr *FeatureRegister) SetFromYamlFile(path string, logger *logrus.Logger) error {
+	f, err := os.Open(path)
+	if err != nil {
+		if os.IsNotExist(err) {
+			logger.Debugf("Feature flags config file '%s' does not exist", path)
+
+			return nil
+		}
+
+		return fmt.Errorf("failed to open feature flags file: %w", err)
+	}
+	defer f.Close()
+
+	logger.Debugf("Reading feature flags from %s", path)
+
+	return fr.SetFromYaml(f, logger)
+}
+
+// GetEnabledFeatures returns the list of features that have been enabled by the user
+func (fr *FeatureRegister) GetEnabledFeatures() []string {
+	ret := make([]string, 0)
+
+	for k, feat := range fr.features {
+		if feat.IsEnabled() {
+			ret = append(ret, k)
+		}
+	}
+
+	sort.Strings(ret)
+
+	return ret
+}

+ 397 - 0
pkg/fflag/features_test.go

@@ -0,0 +1,397 @@
+package fflag_test
+
+import (
+	"os"
+	"strings"
+	"testing"
+
+	"github.com/sirupsen/logrus"
+	logtest "github.com/sirupsen/logrus/hooks/test"
+	"github.com/stretchr/testify/require"
+
+	"github.com/crowdsecurity/crowdsec/pkg/cstest"
+	"github.com/crowdsecurity/crowdsec/pkg/fflag"
+)
+
+func TestRegisterFeature(t *testing.T) {
+	tests := []struct {
+		name        string
+		feature     fflag.Feature
+		expectedErr string
+	}{
+		{
+			name: "a plain feature",
+			feature: fflag.Feature{
+				Name: "plain",
+			},
+		},
+		{
+			name: "capitalized feature name",
+			feature: fflag.Feature{
+				Name: "Plain",
+			},
+			expectedErr: "feature flag 'Plain': name is not lowercase",
+		},
+		{
+			name: "empty feature name",
+			feature: fflag.Feature{
+				Name: "",
+			},
+			expectedErr: "feature flag '': name is empty",
+		},
+		{
+			name: "invalid feature name",
+			feature: fflag.Feature{
+				Name: "meh!",
+			},
+			expectedErr: "feature flag 'meh!': invalid name (allowed a-z, 0-9, _, .)",
+		},
+	}
+
+	for _, tc := range tests {
+		tc := tc
+
+		t.Run("", func(t *testing.T) {
+			fr := fflag.FeatureRegister{EnvPrefix: "FFLAG_TEST_"}
+			err := fr.RegisterFeature(&tc.feature)
+			cstest.RequireErrorContains(t, err, tc.expectedErr)
+		})
+	}
+}
+
+func setUp(t *testing.T) fflag.FeatureRegister {
+	t.Helper()
+
+	fr := fflag.FeatureRegister{EnvPrefix: "FFLAG_TEST_"}
+
+	err := fr.RegisterFeature(&fflag.Feature{Name: "experimental1"})
+	require.NoError(t, err)
+
+	err = fr.RegisterFeature(&fflag.Feature{
+		Name:        "some_feature",
+		Description: "A feature that does something, with a description",
+	})
+	require.NoError(t, err)
+
+	err = fr.RegisterFeature(&fflag.Feature{
+		Name:           "new_standard",
+		State:          fflag.DeprecatedState,
+		Description:    "This implements the new standard T34.256w",
+		DeprecationMsg: "In 2.0 we'll do T34.256w by default",
+	})
+	require.NoError(t, err)
+
+	err = fr.RegisterFeature(&fflag.Feature{
+		Name:           "was_adopted",
+		State:          fflag.RetiredState,
+		Description:    "This implements a new tricket",
+		DeprecationMsg: "The trinket was implemented in 1.5",
+	})
+	require.NoError(t, err)
+
+	return fr
+}
+
+func TestGetFeature(t *testing.T) {
+	tests := []struct {
+		name        string
+		feature     string
+		expectedErr string
+	}{
+		{
+			name:    "just a feature",
+			feature: "experimental1",
+		}, {
+			name:        "feature that does not exist",
+			feature:     "will_never_exist",
+			expectedErr: "unknown feature",
+		},
+	}
+
+	fr := setUp(t)
+
+	for _, tc := range tests {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			_, err := fr.GetFeature(tc.feature)
+			cstest.RequireErrorMessage(t, err, tc.expectedErr)
+			if tc.expectedErr != "" {
+				return
+			}
+		})
+	}
+}
+
+func TestIsEnabled(t *testing.T) {
+	tests := []struct {
+		name     string
+		feature  string
+		enable   bool
+		expected bool
+	}{
+		{
+			name:     "feature that was not enabled",
+			feature:  "experimental1",
+			expected: false,
+		}, {
+			name:     "feature that was enabled",
+			feature:  "experimental1",
+			enable:   true,
+			expected: true,
+		},
+	}
+
+	fr := setUp(t)
+
+	for _, tc := range tests {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			feat, err := fr.GetFeature(tc.feature)
+			require.NoError(t, err)
+
+			err = feat.Set(tc.enable)
+			require.NoError(t, err)
+
+			require.Equal(t, tc.expected, feat.IsEnabled())
+		})
+	}
+}
+
+func TestFeatureSet(t *testing.T) {
+	tests := []struct {
+		name           string // test description
+		feature        string // feature name
+		value          bool   // value for SetFeature
+		expected       bool   // expected value from IsEnabled
+		expectedSetErr string // error expected from SetFeature
+		expectedGetErr string // error expected from GetFeature
+	}{
+		{
+			name:     "enable a feature to try something new",
+			feature:  "experimental1",
+			value:    true,
+			expected: true,
+		}, {
+			// not useful in practice, unlikely to happen
+			name:     "disable the feature that was enabled",
+			feature:  "experimental1",
+			value:    false,
+			expected: false,
+		}, {
+			name:           "enable a feature that will be retired in v2",
+			feature:        "new_standard",
+			value:          true,
+			expected:       true,
+			expectedSetErr: "the flag is deprecated",
+		}, {
+			name:           "enable a feature that was retired in v1.5",
+			feature:        "was_adopted",
+			value:          true,
+			expected:       false,
+			expectedSetErr: "the flag is retired",
+		}, {
+			name:           "enable a feature that does not exist",
+			feature:        "will_never_exist",
+			value:          true,
+			expectedSetErr: "unknown feature",
+			expectedGetErr: "unknown feature",
+		},
+	}
+
+	// the tests are not indepedent because we don't instantiate a feature
+	// map for each one, but it simplified the code
+	fr := setUp(t)
+
+	for _, tc := range tests {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			feat, err := fr.GetFeature(tc.feature)
+			cstest.RequireErrorMessage(t, err, tc.expectedGetErr)
+			if tc.expectedGetErr != "" {
+				return
+			}
+
+			err = feat.Set(tc.value)
+			cstest.RequireErrorMessage(t, err, tc.expectedSetErr)
+			require.Equal(t, tc.expected, feat.IsEnabled())
+		})
+	}
+}
+
+func TestSetFromEnv(t *testing.T) {
+	tests := []struct {
+		name   string
+		envvar string
+		value  string
+		// expected bool
+		expectedLog []string
+		expectedErr string
+	}{
+		{
+			name:   "variable that does not start with FFLAG_TEST_",
+			envvar: "PATH",
+			value:  "/bin:/usr/bin/:/usr/local/bin",
+			// silently ignored
+		}, {
+			name:        "enable a feature flag",
+			envvar:      "FFLAG_TEST_EXPERIMENTAL1",
+			value:       "true",
+			expectedLog: []string{"Feature flag: experimental1=true (from envvar)"},
+		}, {
+			name:        "invalid value (not true or false)",
+			envvar:      "FFLAG_TEST_EXPERIMENTAL1",
+			value:       "maybe",
+			expectedLog: []string{"Ignored envvar FFLAG_TEST_EXPERIMENTAL1=maybe: invalid value (must be 'true' or 'false')"},
+		}, {
+			name:        "feature flag that is unknown",
+			envvar:      "FFLAG_TEST_WILL_NEVER_EXIST",
+			value:       "true",
+			expectedLog: []string{"Ignored envvar 'FFLAG_TEST_WILL_NEVER_EXIST': unknown feature"},
+		}, {
+			name:   "enable a feature flag with a description",
+			envvar: "FFLAG_TEST_SOME_FEATURE",
+			value:  "true",
+			expectedLog: []string{
+				"Feature flag: some_feature=true (from envvar). A feature that does something, with a description",
+			},
+		}, {
+			name:   "enable a deprecated feature",
+			envvar: "FFLAG_TEST_NEW_STANDARD",
+			value:  "true",
+			expectedLog: []string{
+				"Envvar 'FFLAG_TEST_NEW_STANDARD': the flag is deprecated. In 2.0 we'll do T34.256w by default",
+				"Feature flag: new_standard=true (from envvar). This implements the new standard T34.256w",
+			},
+		}, {
+			name:   "enable a feature that was retired in v1.5",
+			envvar: "FFLAG_TEST_WAS_ADOPTED",
+			value:  "true",
+			expectedLog: []string{
+				"Ignored envvar 'FFLAG_TEST_WAS_ADOPTED': the flag is retired. " +
+					"The trinket was implemented in 1.5",
+			},
+		}, {
+			// this could happen in theory, but only if environment variables
+			// are parsed after configuration files, which is not a good idea
+			// because they are more useful asap
+			name:   "disable a feature flag already set",
+			envvar: "FFLAG_TEST_EXPERIMENTAL1",
+			value:  "false",
+		},
+	}
+
+	fr := setUp(t)
+
+	for _, tc := range tests {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			logger, hook := logtest.NewNullLogger()
+			logger.SetLevel(logrus.InfoLevel)
+			t.Setenv(tc.envvar, tc.value)
+			err := fr.SetFromEnv(logger)
+			cstest.RequireErrorMessage(t, err, tc.expectedErr)
+			for _, expectedMessage := range tc.expectedLog {
+				cstest.RequireLogContains(t, hook, expectedMessage)
+			}
+		})
+	}
+}
+
+func TestSetFromYaml(t *testing.T) {
+	tests := []struct {
+		name        string
+		yml         string
+		expectedLog []string
+		expectedErr string
+	}{
+		{
+			name: "empty file",
+			yml:  "",
+			// no error
+		}, {
+			name:        "invalid yaml",
+			yml:         "bad! content, bad!",
+			expectedErr: "failed to parse feature flags: [1:1] string was used where sequence is expected\n    >  1 | bad! content, bad!\n           ^",
+		}, {
+			name:        "invalid feature flag name",
+			yml:         "- not_a_feature",
+			expectedLog: []string{"Ignored feature flag 'not_a_feature': unknown feature"},
+		}, {
+			name:        "invalid value (must be a list)",
+			yml:         "experimental1: true",
+			expectedErr: "failed to parse feature flags: [1:14] value was used where sequence is expected\n    >  1 | experimental1: true\n                        ^",
+		}, {
+			name:        "enable a feature flag",
+			yml:         "- experimental1",
+			expectedLog: []string{"Feature flag: experimental1=true (from config file)"},
+		}, {
+			name: "enable a deprecated feature",
+			yml:  "- new_standard",
+			expectedLog: []string{
+				"Feature 'new_standard': the flag is deprecated. In 2.0 we'll do T34.256w by default",
+				"Feature flag: new_standard=true (from config file). This implements the new standard T34.256w",
+			},
+		}, {
+			name: "enable a retired feature",
+			yml:  "- was_adopted",
+			expectedLog: []string{
+				"Ignored feature flag 'was_adopted': the flag is retired. The trinket was implemented in 1.5",
+			},
+		},
+	}
+
+	fr := setUp(t)
+
+	for _, tc := range tests {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			logger, hook := logtest.NewNullLogger()
+			logger.SetLevel(logrus.InfoLevel)
+			err := fr.SetFromYaml(strings.NewReader(tc.yml), logger)
+			cstest.RequireErrorMessage(t, err, tc.expectedErr)
+			for _, expectedMessage := range tc.expectedLog {
+				cstest.RequireLogContains(t, hook, expectedMessage)
+			}
+		})
+	}
+}
+
+func TestSetFromYamlFile(t *testing.T) {
+	tmpfile, err := os.CreateTemp("", "test")
+	require.NoError(t, err)
+
+	defer os.Remove(tmpfile.Name())
+
+	// write the config file
+	_, err = tmpfile.Write([]byte("- experimental1"))
+	require.NoError(t, err)
+	require.NoError(t, tmpfile.Close())
+
+	fr := setUp(t)
+	logger, hook := logtest.NewNullLogger()
+	logger.SetLevel(logrus.InfoLevel)
+
+	err = fr.SetFromYamlFile(tmpfile.Name(), logger)
+	require.NoError(t, err)
+
+	cstest.RequireLogContains(t, hook, "Feature flag: experimental1=true (from config file)")
+}
+
+func TestGetEnabledFeatures(t *testing.T) {
+	fr := setUp(t)
+
+	feat1, err := fr.GetFeature("new_standard")
+	require.NoError(t, err)
+	feat1.Set(true)
+
+	feat2, err := fr.GetFeature("experimental1")
+	require.NoError(t, err)
+	feat2.Set(true)
+
+	expected := []string{
+		"experimental1",
+		"new_standard",
+	}
+
+	require.Equal(t, expected, fr.GetEnabledFeatures())
+}

+ 27 - 0
pkg/hubtest/parser_assert.go

@@ -78,6 +78,7 @@ func (p *ParserAssert) LoadTest(filename string) error {
 }
 }
 
 
 func (p *ParserAssert) AssertFile(testFile string) error {
 func (p *ParserAssert) AssertFile(testFile string) error {
+
 	file, err := os.Open(p.File)
 	file, err := os.Open(p.File)
 
 
 	if err != nil {
 	if err != nil {
@@ -268,6 +269,32 @@ func LoadParserDump(filepath string) (*ParserResults, error) {
 	if err := yaml.Unmarshal(results, &pdump); err != nil {
 	if err := yaml.Unmarshal(results, &pdump); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
+
+	/* we know that some variables should always be set,
+	let's check if they're present in last parser output of last stage */
+	stages := make([]string, 0, len(pdump))
+	for k := range pdump {
+		stages = append(stages, k)
+	}
+	sort.Strings(stages)
+	/*the very last one is set to 'success' which is just a bool indicating if the line was successfully parsed*/
+	lastStage := stages[len(stages)-2]
+
+	parsers := make([]string, 0, len(pdump[lastStage]))
+	for k := range pdump[lastStage] {
+		parsers = append(parsers, k)
+	}
+	sort.Strings(parsers)
+	lastParser := parsers[len(parsers)-1]
+
+	for idx, result := range pdump[lastStage][lastParser] {
+		if result.Evt.StrTime == "" {
+			log.Warningf("Line %d/%d is missing evt.StrTime. It is most likely a mistake as it will prevent your logs to be processed in time-machine/forensic mode.", idx, len(pdump[lastStage][lastParser]))
+		} else {
+			log.Debugf("Line %d/%d has evt.StrTime set to '%s'", idx, len(pdump[lastStage][lastParser]), result.Evt.StrTime)
+		}
+	}
+
 	return &pdump, nil
 	return &pdump, nil
 }
 }
 
 

+ 25 - 11
pkg/leakybucket/bucket.go

@@ -61,16 +61,17 @@ type Leaky struct {
 	Duration     time.Duration
 	Duration     time.Duration
 	Pour         func(*Leaky, types.Event) `json:"-"`
 	Pour         func(*Leaky, types.Event) `json:"-"`
 	//Profiling when set to true enables profiling of bucket
 	//Profiling when set to true enables profiling of bucket
-	Profiling       bool
-	timedOverflow   bool
-	logger          *log.Entry
-	scopeType       types.ScopeType
-	hash            string
-	scenarioVersion string
-	tomb            *tomb.Tomb
-	wgPour          *sync.WaitGroup
-	wgDumpState     *sync.WaitGroup
-	mutex           *sync.Mutex //used only for TIMEMACHINE mode to allow garbage collection without races
+	Profiling           bool
+	timedOverflow       bool
+	conditionalOverflow bool
+	logger              *log.Entry
+	scopeType           types.ScopeType
+	hash                string
+	scenarioVersion     string
+	tomb                *tomb.Tomb
+	wgPour              *sync.WaitGroup
+	wgDumpState         *sync.WaitGroup
+	mutex               *sync.Mutex //used only for TIMEMACHINE mode to allow garbage collection without races
 }
 }
 
 
 var BucketsPour = prometheus.NewCounterVec(
 var BucketsPour = prometheus.NewCounterVec(
@@ -188,6 +189,10 @@ func FromFactory(bucketFactory BucketFactory) *Leaky {
 		l.timedOverflow = true
 		l.timedOverflow = true
 	}
 	}
 
 
+	if l.BucketConfig.Type == "conditional" {
+		l.conditionalOverflow = true
+		l.Duration = l.BucketConfig.leakspeed
+	}
 	return l
 	return l
 }
 }
 
 
@@ -247,6 +252,14 @@ func LeakRoutine(leaky *Leaky) error {
 			BucketsPour.With(prometheus.Labels{"name": leaky.Name, "source": msg.Line.Src, "type": msg.Line.Module}).Inc()
 			BucketsPour.With(prometheus.Labels{"name": leaky.Name, "source": msg.Line.Src, "type": msg.Line.Module}).Inc()
 
 
 			leaky.Pour(leaky, *msg) // glue for now
 			leaky.Pour(leaky, *msg) // glue for now
+
+			for _, processor := range processors {
+				msg = processor.AfterBucketPour(leaky.BucketConfig)(*msg, leaky)
+				if msg == nil {
+					goto End
+				}
+			}
+
 			//Clear cache on behalf of pour
 			//Clear cache on behalf of pour
 
 
 			// if durationTicker isn't initialized, then we're pouring our first event
 			// if durationTicker isn't initialized, then we're pouring our first event
@@ -337,7 +350,8 @@ func Pour(leaky *Leaky, msg types.Event) {
 		leaky.First_ts = time.Now().UTC()
 		leaky.First_ts = time.Now().UTC()
 	}
 	}
 	leaky.Last_ts = time.Now().UTC()
 	leaky.Last_ts = time.Now().UTC()
-	if leaky.Limiter.Allow() {
+
+	if leaky.Limiter.Allow() || leaky.conditionalOverflow {
 		leaky.Queue.Add(msg)
 		leaky.Queue.Add(msg)
 	} else {
 	} else {
 		leaky.Ovflw_ts = time.Now().UTC()
 		leaky.Ovflw_ts = time.Now().UTC()

+ 2 - 2
pkg/leakybucket/buckets_test.go

@@ -64,8 +64,8 @@ func TestBucket(t *testing.T) {
 	}
 	}
 }
 }
 
 
-//during tests, we're likely to have only one scenario, and thus only one holder.
-//we want to avoid the death of the tomb because all existing buckets have been destroyed.
+// during tests, we're likely to have only one scenario, and thus only one holder.
+// we want to avoid the death of the tomb because all existing buckets have been destroyed.
 func watchTomb(tomb *tomb.Tomb) {
 func watchTomb(tomb *tomb.Tomb) {
 	for {
 	for {
 		if tomb.Alive() == false {
 		if tomb.Alive() == false {

+ 61 - 0
pkg/leakybucket/conditional.go

@@ -0,0 +1,61 @@
+package leakybucket
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/antonmedv/expr"
+	"github.com/antonmedv/expr/vm"
+	"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
+)
+
+type ConditionalOverflow struct {
+	ConditionalFilter        string
+	ConditionalFilterRuntime *vm.Program
+	DumbProcessor
+}
+
+func NewConditionalOverflow(g *BucketFactory) (*ConditionalOverflow, error) {
+	var err error
+
+	c := ConditionalOverflow{}
+	c.ConditionalFilter = g.ConditionalOverflow
+	c.ConditionalFilterRuntime, err = expr.Compile(c.ConditionalFilter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{
+		"queue": &Queue{}, "leaky": &Leaky{}})))
+	if err != nil {
+		g.logger.Errorf("Unable to compile condition expression for conditional bucket : %s", err)
+		return nil, fmt.Errorf("unable to compile condition expression for conditional bucket : %v", err)
+	}
+	return &c, nil
+}
+
+func (c *ConditionalOverflow) AfterBucketPour(b *BucketFactory) func(types.Event, *Leaky) *types.Event {
+	return func(msg types.Event, l *Leaky) *types.Event {
+		var condition, ok bool
+		if c.ConditionalFilterRuntime != nil {
+			l.logger.Debugf("Running condition expression : %s", c.ConditionalFilter)
+			ret, err := expr.Run(c.ConditionalFilterRuntime, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &msg, "queue": l.Queue, "leaky": l}))
+			if err != nil {
+				l.logger.Errorf("unable to run conditional filter : %s", err)
+				return &msg
+			}
+
+			l.logger.Debugf("Conditional bucket expression returned : %v", ret)
+
+			if condition, ok = ret.(bool); !ok {
+				l.logger.Warningf("overflow condition, unexpected non-bool return : %T", ret)
+				return &msg
+			}
+
+			if condition {
+				l.logger.Debugf("Conditional bucket overflow")
+				l.Ovflw_ts = time.Now().UTC()
+				l.Out <- l.Queue
+				return nil
+			}
+		}
+
+		return &msg
+	}
+}

+ 76 - 42
pkg/leakybucket/manager_load.go

@@ -11,6 +11,8 @@ import (
 	"sync"
 	"sync"
 	"time"
 	"time"
 
 
+	"github.com/crowdsecurity/crowdsec/pkg/alertcontext"
+
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
@@ -32,49 +34,50 @@ import (
 // BucketFactory struct holds all fields for any bucket configuration. This is to have a
 // BucketFactory struct holds all fields for any bucket configuration. This is to have a
 // generic struct for buckets. This can be seen as a bucket factory.
 // generic struct for buckets. This can be seen as a bucket factory.
 type BucketFactory struct {
 type BucketFactory struct {
-	FormatVersion   string                    `yaml:"format"`
-	Author          string                    `yaml:"author"`
-	Description     string                    `yaml:"description"`
-	References      []string                  `yaml:"references"`
-	Type            string                    `yaml:"type"`                //Type can be : leaky, counter, trigger. It determines the main bucket characteristics
-	Name            string                    `yaml:"name"`                //Name of the bucket, used later in log and user-messages. Should be unique
-	Capacity        int                       `yaml:"capacity"`            //Capacity is applicable to leaky buckets and determines the "burst" capacity
-	LeakSpeed       string                    `yaml:"leakspeed"`           //Leakspeed is a float representing how many events per second leak out of the bucket
-	Duration        string                    `yaml:"duration"`            //Duration allows 'counter' buckets to have a fixed life-time
-	Filter          string                    `yaml:"filter"`              //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct
-	GroupBy         string                    `yaml:"groupby,omitempty"`   //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip
-	Distinct        string                    `yaml:"distinct"`            //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result)
-	Debug           bool                      `yaml:"debug"`               //Debug, when set to true, will enable debugging for _this_ scenario specifically
-	Labels          map[string]string         `yaml:"labels"`              //Labels is K:V list aiming at providing context the overflow
-	Blackhole       string                    `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration
-	logger          *log.Entry                `yaml:"-"`                   //logger is bucket-specific logger (used by Debug as well)
-	Reprocess       bool                      `yaml:"reprocess"`           //Reprocess, if true, will for the bucket to be re-injected into processing chain
-	CacheSize       int                       `yaml:"cache_size"`          //CacheSize, if > 0, limits the size of in-memory cache of the bucket
-	Profiling       bool                      `yaml:"profiling"`           //Profiling, if true, will make the bucket record pours/overflows/etc.
-	OverflowFilter  string                    `yaml:"overflow_filter"`     //OverflowFilter if present, is a filter that must return true for the overflow to go through
-	ScopeType       types.ScopeType           `yaml:"scope,omitempty"`     //to enforce a different remediation than blocking an IP. Will default this to IP
-	BucketName      string                    `yaml:"-"`
-	Filename        string                    `yaml:"-"`
-	RunTimeFilter   *vm.Program               `json:"-"`
-	ExprDebugger    *exprhelpers.ExprDebugger `yaml:"-" json:"-"` // used to debug expression by printing the content of each variable of the expression
-	RunTimeGroupBy  *vm.Program               `json:"-"`
-	Data            []*types.DataSource       `yaml:"data,omitempty"`
-	DataDir         string                    `yaml:"-"`
-	CancelOnFilter  string                    `yaml:"cancel_on,omitempty"` //a filter that, if matched, kills the bucket
-	leakspeed       time.Duration             //internal representation of `Leakspeed`
-	duration        time.Duration             //internal representation of `Duration`
-	ret             chan types.Event          //the bucket-specific output chan for overflows
-	processors      []Processor               //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.)
-	output          bool                      //??
-	ScenarioVersion string                    `yaml:"version,omitempty"`
-	hash            string                    `yaml:"-"`
-	Simulated       bool                      `yaml:"simulated"` //Set to true if the scenario instantiating the bucket was in the exclusion list
-	tomb            *tomb.Tomb                `yaml:"-"`
-	wgPour          *sync.WaitGroup           `yaml:"-"`
-	wgDumpState     *sync.WaitGroup           `yaml:"-"`
+	FormatVersion       string                    `yaml:"format"`
+	Author              string                    `yaml:"author"`
+	Description         string                    `yaml:"description"`
+	References          []string                  `yaml:"references"`
+	Type                string                    `yaml:"type"`                //Type can be : leaky, counter, trigger. It determines the main bucket characteristics
+	Name                string                    `yaml:"name"`                //Name of the bucket, used later in log and user-messages. Should be unique
+	Capacity            int                       `yaml:"capacity"`            //Capacity is applicable to leaky buckets and determines the "burst" capacity
+	LeakSpeed           string                    `yaml:"leakspeed"`           //Leakspeed is a float representing how many events per second leak out of the bucket
+	Duration            string                    `yaml:"duration"`            //Duration allows 'counter' buckets to have a fixed life-time
+	Filter              string                    `yaml:"filter"`              //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct
+	GroupBy             string                    `yaml:"groupby,omitempty"`   //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip
+	Distinct            string                    `yaml:"distinct"`            //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result)
+	Debug               bool                      `yaml:"debug"`               //Debug, when set to true, will enable debugging for _this_ scenario specifically
+	Labels              map[string]string         `yaml:"labels"`              //Labels is K:V list aiming at providing context the overflow
+	Blackhole           string                    `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration
+	logger              *log.Entry                `yaml:"-"`                   //logger is bucket-specific logger (used by Debug as well)
+	Reprocess           bool                      `yaml:"reprocess"`           //Reprocess, if true, will for the bucket to be re-injected into processing chain
+	CacheSize           int                       `yaml:"cache_size"`          //CacheSize, if > 0, limits the size of in-memory cache of the bucket
+	Profiling           bool                      `yaml:"profiling"`           //Profiling, if true, will make the bucket record pours/overflows/etc.
+	OverflowFilter      string                    `yaml:"overflow_filter"`     //OverflowFilter if present, is a filter that must return true for the overflow to go through
+	ConditionalOverflow string                    `yaml:"condition"`           //condition if present, is an expression that must return true for the bucket to overflow
+	ScopeType           types.ScopeType           `yaml:"scope,omitempty"`     //to enforce a different remediation than blocking an IP. Will default this to IP
+	BucketName          string                    `yaml:"-"`
+	Filename            string                    `yaml:"-"`
+	RunTimeFilter       *vm.Program               `json:"-"`
+	ExprDebugger        *exprhelpers.ExprDebugger `yaml:"-" json:"-"` // used to debug expression by printing the content of each variable of the expression
+	RunTimeGroupBy      *vm.Program               `json:"-"`
+	Data                []*types.DataSource       `yaml:"data,omitempty"`
+	DataDir             string                    `yaml:"-"`
+	CancelOnFilter      string                    `yaml:"cancel_on,omitempty"` //a filter that, if matched, kills the bucket
+	leakspeed           time.Duration             //internal representation of `Leakspeed`
+	duration            time.Duration             //internal representation of `Duration`
+	ret                 chan types.Event          //the bucket-specific output chan for overflows
+	processors          []Processor               //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.)
+	output              bool                      //??
+	ScenarioVersion     string                    `yaml:"version,omitempty"`
+	hash                string                    `yaml:"-"`
+	Simulated           bool                      `yaml:"simulated"` //Set to true if the scenario instantiating the bucket was in the exclusion list
+	tomb                *tomb.Tomb                `yaml:"-"`
+	wgPour              *sync.WaitGroup           `yaml:"-"`
+	wgDumpState         *sync.WaitGroup           `yaml:"-"`
 }
 }
 
 
-//we use one NameGenerator for all the future buckets
+// we use one NameGenerator for all the future buckets
 var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano())
 var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano())
 
 
 func ValidateFactory(bucketFactory *BucketFactory) error {
 func ValidateFactory(bucketFactory *BucketFactory) error {
@@ -96,7 +99,7 @@ func ValidateFactory(bucketFactory *BucketFactory) error {
 		}
 		}
 	} else if bucketFactory.Type == "counter" {
 	} else if bucketFactory.Type == "counter" {
 		if bucketFactory.Duration == "" {
 		if bucketFactory.Duration == "" {
-			return fmt.Errorf("duration ca't be empty for counter")
+			return fmt.Errorf("duration can't be empty for counter")
 		}
 		}
 		if bucketFactory.duration == 0 {
 		if bucketFactory.duration == 0 {
 			return fmt.Errorf("bad duration for counter bucket '%d'", bucketFactory.duration)
 			return fmt.Errorf("bad duration for counter bucket '%d'", bucketFactory.duration)
@@ -108,6 +111,19 @@ func ValidateFactory(bucketFactory *BucketFactory) error {
 		if bucketFactory.Capacity != 0 {
 		if bucketFactory.Capacity != 0 {
 			return fmt.Errorf("trigger bucket must have 0 capacity")
 			return fmt.Errorf("trigger bucket must have 0 capacity")
 		}
 		}
+	} else if bucketFactory.Type == "conditional" {
+		if bucketFactory.ConditionalOverflow == "" {
+			return fmt.Errorf("conditional bucket must have a condition")
+		}
+		if bucketFactory.Capacity != -1 {
+			bucketFactory.logger.Warnf("Using a value different than -1 as capacity for conditional bucket, this may lead to unexpected overflows")
+		}
+		if bucketFactory.LeakSpeed == "" {
+			return fmt.Errorf("leakspeed can't be empty for conditional bucket")
+		}
+		if bucketFactory.leakspeed == 0 {
+			return fmt.Errorf("bad leakspeed for conditional bucket '%s'", bucketFactory.LeakSpeed)
+		}
 	} else {
 	} else {
 		return fmt.Errorf("unknown bucket type '%s'", bucketFactory.Type)
 		return fmt.Errorf("unknown bucket type '%s'", bucketFactory.Type)
 	}
 	}
@@ -225,6 +241,11 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, files []string, tomb *tomb.
 			ret = append(ret, bucketFactory)
 			ret = append(ret, bucketFactory)
 		}
 		}
 	}
 	}
+
+	if err := alertcontext.NewAlertContext(cscfg.ContextToSend, cscfg.ConsoleContextValueLength); err != nil {
+		return nil, nil, fmt.Errorf("unable to load alert context: %s", err)
+	}
+
 	log.Warningf("Loaded %d scenarios", len(ret))
 	log.Warningf("Loaded %d scenarios", len(ret))
 	return ret, response, nil
 	return ret, response, nil
 }
 }
@@ -297,6 +318,8 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
 		bucketFactory.processors = append(bucketFactory.processors, &Trigger{})
 		bucketFactory.processors = append(bucketFactory.processors, &Trigger{})
 	case "counter":
 	case "counter":
 		bucketFactory.processors = append(bucketFactory.processors, &DumbProcessor{})
 		bucketFactory.processors = append(bucketFactory.processors, &DumbProcessor{})
+	case "conditional":
+		bucketFactory.processors = append(bucketFactory.processors, &DumbProcessor{})
 	default:
 	default:
 		return fmt.Errorf("invalid type '%s' in %s : %v", bucketFactory.Type, bucketFactory.Filename, err)
 		return fmt.Errorf("invalid type '%s' in %s : %v", bucketFactory.Type, bucketFactory.Filename, err)
 	}
 	}
@@ -331,6 +354,16 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
 		bucketFactory.processors = append(bucketFactory.processors, blackhole)
 		bucketFactory.processors = append(bucketFactory.processors, blackhole)
 	}
 	}
 
 
+	if bucketFactory.ConditionalOverflow != "" {
+		bucketFactory.logger.Tracef("Adding conditional overflow.")
+		condovflw, err := NewConditionalOverflow(bucketFactory)
+		if err != nil {
+			bucketFactory.logger.Errorf("Error creating conditional overflow : %s", err)
+			return fmt.Errorf("error creating conditional overflow : %s", err)
+		}
+		bucketFactory.processors = append(bucketFactory.processors, condovflw)
+	}
+
 	if len(bucketFactory.Data) > 0 {
 	if len(bucketFactory.Data) > 0 {
 		for _, data := range bucketFactory.Data {
 		for _, data := range bucketFactory.Data {
 			if data.DestPath == "" {
 			if data.DestPath == "" {
@@ -349,6 +382,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
 		return fmt.Errorf("invalid bucket from %s : %v", bucketFactory.Filename, err)
 		return fmt.Errorf("invalid bucket from %s : %v", bucketFactory.Filename, err)
 	}
 	}
 	bucketFactory.tomb = tomb
 	bucketFactory.tomb = tomb
+
 	return nil
 	return nil
 
 
 }
 }

+ 10 - 4
pkg/leakybucket/overflows.go

@@ -6,6 +6,7 @@ import (
 	"sort"
 	"sort"
 	"strconv"
 	"strconv"
 
 
+	"github.com/crowdsecurity/crowdsec/pkg/alertcontext"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/davecgh/go-spew/spew"
 	"github.com/davecgh/go-spew/spew"
@@ -17,7 +18,7 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
 	"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
 )
 )
 
 
-//SourceFromEvent extracts and formats a valid models.Source object from an Event
+// SourceFromEvent extracts and formats a valid models.Source object from an Event
 func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, error) {
 func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, error) {
 	srcs := make(map[string]models.Source)
 	srcs := make(map[string]models.Source)
 	/*if it's already an overflow, we have properly formatted sources.
 	/*if it's already an overflow, we have properly formatted sources.
@@ -160,7 +161,7 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e
 	return srcs, nil
 	return srcs, nil
 }
 }
 
 
-//EventsFromQueue iterates the queue to collect & prepare meta-datas from alert
+// EventsFromQueue iterates the queue to collect & prepare meta-datas from alert
 func EventsFromQueue(queue *Queue) []*models.Event {
 func EventsFromQueue(queue *Queue) []*models.Event {
 
 
 	events := []*models.Event{}
 	events := []*models.Event{}
@@ -207,7 +208,7 @@ func EventsFromQueue(queue *Queue) []*models.Event {
 	return events
 	return events
 }
 }
 
 
-//alertFormatSource iterates over the queue to collect sources
+// alertFormatSource iterates over the queue to collect sources
 func alertFormatSource(leaky *Leaky, queue *Queue) (map[string]models.Source, string, error) {
 func alertFormatSource(leaky *Leaky, queue *Queue) (map[string]models.Source, string, error) {
 	var sources map[string]models.Source = make(map[string]models.Source)
 	var sources map[string]models.Source = make(map[string]models.Source)
 	var source_type string
 	var source_type string
@@ -233,7 +234,7 @@ func alertFormatSource(leaky *Leaky, queue *Queue) (map[string]models.Source, st
 	return sources, source_type, nil
 	return sources, source_type, nil
 }
 }
 
 
-//NewAlert will generate a RuntimeAlert and its APIAlert(s) from a bucket that overflowed
+// NewAlert will generate a RuntimeAlert and its APIAlert(s) from a bucket that overflowed
 func NewAlert(leaky *Leaky, queue *Queue) (types.RuntimeAlert, error) {
 func NewAlert(leaky *Leaky, queue *Queue) (types.RuntimeAlert, error) {
 	var runtimeAlert types.RuntimeAlert
 	var runtimeAlert types.RuntimeAlert
 
 
@@ -293,6 +294,11 @@ func NewAlert(leaky *Leaky, queue *Queue) (types.RuntimeAlert, error) {
 	*apiAlert.Message = fmt.Sprintf("%s %s performed '%s' (%d events over %s) at %s", source_scope, sourceStr, leaky.Name, leaky.Total_count, leaky.Ovflw_ts.Sub(leaky.First_ts), leaky.Last_ts)
 	*apiAlert.Message = fmt.Sprintf("%s %s performed '%s' (%d events over %s) at %s", source_scope, sourceStr, leaky.Name, leaky.Total_count, leaky.Ovflw_ts.Sub(leaky.First_ts), leaky.Last_ts)
 	//Get the events from Leaky/Queue
 	//Get the events from Leaky/Queue
 	apiAlert.Events = EventsFromQueue(queue)
 	apiAlert.Events = EventsFromQueue(queue)
+	var warnings []error
+	apiAlert.Meta, warnings = alertcontext.EventToContext(leaky.Queue.GetQueue())
+	for _, w := range warnings {
+		log.Warningf("while extracting context from bucket %s : %s", leaky.Name, w)
+	}
 
 
 	//Loop over the Sources and generate appropriate number of ApiAlerts
 	//Loop over the Sources and generate appropriate number of ApiAlerts
 	for _, srcValue := range sources {
 	for _, srcValue := range sources {

+ 7 - 0
pkg/leakybucket/processor.go

@@ -6,6 +6,8 @@ type Processor interface {
 	OnBucketInit(Bucket *BucketFactory) error
 	OnBucketInit(Bucket *BucketFactory) error
 	OnBucketPour(Bucket *BucketFactory) func(types.Event, *Leaky) *types.Event
 	OnBucketPour(Bucket *BucketFactory) func(types.Event, *Leaky) *types.Event
 	OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue)
 	OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue)
+
+	AfterBucketPour(Bucket *BucketFactory) func(types.Event, *Leaky) *types.Event
 }
 }
 
 
 type DumbProcessor struct {
 type DumbProcessor struct {
@@ -25,5 +27,10 @@ func (d *DumbProcessor) OnBucketOverflow(b *BucketFactory) func(*Leaky, types.Ru
 	return func(leaky *Leaky, alert types.RuntimeAlert, queue *Queue) (types.RuntimeAlert, *Queue) {
 	return func(leaky *Leaky, alert types.RuntimeAlert, queue *Queue) (types.RuntimeAlert, *Queue) {
 		return alert, queue
 		return alert, queue
 	}
 	}
+}
 
 
+func (d *DumbProcessor) AfterBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event {
+	return func(msg types.Event, leaky *Leaky) *types.Event {
+		return &msg
+	}
 }
 }

+ 6 - 0
pkg/leakybucket/reset_filter.go

@@ -64,6 +64,12 @@ func (u *CancelOnFilter) OnBucketOverflow(bucketFactory *BucketFactory) func(*Le
 	}
 	}
 }
 }
 
 
+func (u *CancelOnFilter) AfterBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event {
+	return func(msg types.Event, leaky *Leaky) *types.Event {
+		return &msg
+	}
+}
+
 func (u *CancelOnFilter) OnBucketInit(bucketFactory *BucketFactory) error {
 func (u *CancelOnFilter) OnBucketInit(bucketFactory *BucketFactory) error {
 	var err error
 	var err error
 	var compiledExpr struct {
 	var compiledExpr struct {

+ 11 - 0
pkg/leakybucket/tests/conditional-bucket/bucket.yaml

@@ -0,0 +1,11 @@
+type: conditional
+name: test/conditional
+#debug: true
+description: "conditional bucket"
+filter: "evt.Meta.log_type == 'http_access-log'"
+groupby: evt.Meta.source_ip
+condition: any(queue.Queue, {.Meta.http_path == "/"}) and any(queue.Queue, {.Meta.http_path == "/foo"})
+leakspeed: 1s
+capacity: -1
+labels:
+  type: overflow_1

+ 1 - 0
pkg/leakybucket/tests/conditional-bucket/scenarios.yaml

@@ -0,0 +1 @@
+ - filename: {{.TestDirectory}}/bucket.yaml

+ 50 - 0
pkg/leakybucket/tests/conditional-bucket/test.json

@@ -0,0 +1,50 @@
+{
+	"lines": [
+	   {
+		  "Line": {
+			 "Labels": {
+				"type": "nginx"
+			 },
+			 "Raw": "don't care"
+		  },
+		  "MarshaledTime": "2020-01-01T10:00:00.000Z",
+		  "Meta": {
+			 "source_ip": "2a00:1450:4007:816::200e",
+			 "log_type": "http_access-log",
+			 "http_path": "/"
+		  }
+	   },
+	   {
+		"Line": {
+		   "Labels": {
+			  "type": "nginx"
+		   },
+		   "Raw": "don't care"
+		},
+		"MarshaledTime": "2020-01-01T10:00:00.000Z",
+		"Meta": {
+		   "source_ip": "2a00:1450:4007:816::200e",
+		   "log_type": "http_access-log",
+		   "http_path": "/foo"
+		}
+	 }
+	],
+	"results": [
+	  {
+		"Type" : 1,
+		"Alert": {
+		  "sources" : {
+			"2a00:1450:4007:816::200e": {
+			  "ip": "2a00:1450:4007:816::200e",
+			  "scope": "Ip",
+			  "value": "2a00:1450:4007:816::200e"
+			}
+		  },
+		  "Alert" : {
+			"scenario": "test/conditional",
+			"events_count": 2
+		  }
+		}
+	  }
+	]
+  }

+ 5 - 2
pkg/leakybucket/timemachine.go

@@ -4,7 +4,6 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
-	"github.com/davecgh/go-spew/spew"
 	log "github.com/sirupsen/logrus"
 	log "github.com/sirupsen/logrus"
 )
 )
 
 
@@ -14,7 +13,11 @@ func TimeMachinePour(l *Leaky, msg types.Event) {
 		err error
 		err error
 	)
 	)
 	if msg.MarshaledTime == "" {
 	if msg.MarshaledTime == "" {
-		log.Warningf("Trying to time-machine event without timestamp : %s", spew.Sdump(msg))
+		log.WithFields(log.Fields{
+			"evt_type": msg.Line.Labels["type"],
+			"evt_src":  msg.Line.Src,
+			"scenario": l.Name,
+		}).Warningf("Trying to process event without evt.StrTime. Event cannot be poured to scenario")
 		return
 		return
 	}
 	}
 
 

+ 6 - 0
pkg/leakybucket/uniq.go

@@ -53,6 +53,12 @@ func (u *Uniq) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, types
 	}
 	}
 }
 }
 
 
+func (u *Uniq) AfterBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event {
+	return func(msg types.Event, leaky *Leaky) *types.Event {
+		return &msg
+	}
+}
+
 func (u *Uniq) OnBucketInit(bucketFactory *BucketFactory) error {
 func (u *Uniq) OnBucketInit(bucketFactory *BucketFactory) error {
 	var err error
 	var err error
 	var compiledExpr *vm.Program
 	var compiledExpr *vm.Program

+ 102 - 15
pkg/models/add_signals_request_item.go

@@ -7,6 +7,7 @@ package models
 
 
 import (
 import (
 	"context"
 	"context"
+	"strconv"
 
 
 	"github.com/go-openapi/errors"
 	"github.com/go-openapi/errors"
 	"github.com/go-openapi/strfmt"
 	"github.com/go-openapi/strfmt"
@@ -19,6 +20,12 @@ import (
 // swagger:model AddSignalsRequestItem
 // swagger:model AddSignalsRequestItem
 type AddSignalsRequestItem struct {
 type AddSignalsRequestItem struct {
 
 
+	// alert id
+	AlertID int64 `json:"alert_id,omitempty"`
+
+	// context
+	Context []*AddSignalsRequestItemContextItems0 `json:"context"`
+
 	// created at
 	// created at
 	CreatedAt string `json:"created_at,omitempty"`
 	CreatedAt string `json:"created_at,omitempty"`
 
 
@@ -38,8 +45,7 @@ type AddSignalsRequestItem struct {
 	ScenarioHash *string `json:"scenario_hash"`
 	ScenarioHash *string `json:"scenario_hash"`
 
 
 	// scenario trust
 	// scenario trust
-	// Required: true
-	ScenarioTrust *string `json:"scenario_trust"`
+	ScenarioTrust string `json:"scenario_trust,omitempty"`
 
 
 	// scenario version
 	// scenario version
 	// Required: true
 	// Required: true
@@ -62,19 +68,19 @@ type AddSignalsRequestItem struct {
 func (m *AddSignalsRequestItem) Validate(formats strfmt.Registry) error {
 func (m *AddSignalsRequestItem) Validate(formats strfmt.Registry) error {
 	var res []error
 	var res []error
 
 
-	if err := m.validateMessage(formats); err != nil {
+	if err := m.validateContext(formats); err != nil {
 		res = append(res, err)
 		res = append(res, err)
 	}
 	}
 
 
-	if err := m.validateScenario(formats); err != nil {
+	if err := m.validateMessage(formats); err != nil {
 		res = append(res, err)
 		res = append(res, err)
 	}
 	}
 
 
-	if err := m.validateScenarioHash(formats); err != nil {
+	if err := m.validateScenario(formats); err != nil {
 		res = append(res, err)
 		res = append(res, err)
 	}
 	}
 
 
-	if err := m.validateScenarioTrust(formats); err != nil {
+	if err := m.validateScenarioHash(formats); err != nil {
 		res = append(res, err)
 		res = append(res, err)
 	}
 	}
 
 
@@ -100,36 +106,53 @@ func (m *AddSignalsRequestItem) Validate(formats strfmt.Registry) error {
 	return nil
 	return nil
 }
 }
 
 
-func (m *AddSignalsRequestItem) validateMessage(formats strfmt.Registry) error {
+func (m *AddSignalsRequestItem) validateContext(formats strfmt.Registry) error {
+	if swag.IsZero(m.Context) { // not required
+		return nil
+	}
+
+	for i := 0; i < len(m.Context); i++ {
+		if swag.IsZero(m.Context[i]) { // not required
+			continue
+		}
+
+		if m.Context[i] != nil {
+			if err := m.Context[i].Validate(formats); err != nil {
+				if ve, ok := err.(*errors.Validation); ok {
+					return ve.ValidateName("context" + "." + strconv.Itoa(i))
+				} else if ce, ok := err.(*errors.CompositeError); ok {
+					return ce.ValidateName("context" + "." + strconv.Itoa(i))
+				}
+				return err
+			}
+		}
 
 
-	if err := validate.Required("message", "body", m.Message); err != nil {
-		return err
 	}
 	}
 
 
 	return nil
 	return nil
 }
 }
 
 
-func (m *AddSignalsRequestItem) validateScenario(formats strfmt.Registry) error {
+func (m *AddSignalsRequestItem) validateMessage(formats strfmt.Registry) error {
 
 
-	if err := validate.Required("scenario", "body", m.Scenario); err != nil {
+	if err := validate.Required("message", "body", m.Message); err != nil {
 		return err
 		return err
 	}
 	}
 
 
 	return nil
 	return nil
 }
 }
 
 
-func (m *AddSignalsRequestItem) validateScenarioHash(formats strfmt.Registry) error {
+func (m *AddSignalsRequestItem) validateScenario(formats strfmt.Registry) error {
 
 
-	if err := validate.Required("scenario_hash", "body", m.ScenarioHash); err != nil {
+	if err := validate.Required("scenario", "body", m.Scenario); err != nil {
 		return err
 		return err
 	}
 	}
 
 
 	return nil
 	return nil
 }
 }
 
 
-func (m *AddSignalsRequestItem) validateScenarioTrust(formats strfmt.Registry) error {
+func (m *AddSignalsRequestItem) validateScenarioHash(formats strfmt.Registry) error {
 
 
-	if err := validate.Required("scenario_trust", "body", m.ScenarioTrust); err != nil {
+	if err := validate.Required("scenario_hash", "body", m.ScenarioHash); err != nil {
 		return err
 		return err
 	}
 	}
 
 
@@ -187,6 +210,10 @@ func (m *AddSignalsRequestItem) validateStopAt(formats strfmt.Registry) error {
 func (m *AddSignalsRequestItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
 func (m *AddSignalsRequestItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
 	var res []error
 	var res []error
 
 
+	if err := m.contextValidateContext(ctx, formats); err != nil {
+		res = append(res, err)
+	}
+
 	if err := m.contextValidateSource(ctx, formats); err != nil {
 	if err := m.contextValidateSource(ctx, formats); err != nil {
 		res = append(res, err)
 		res = append(res, err)
 	}
 	}
@@ -197,6 +224,26 @@ func (m *AddSignalsRequestItem) ContextValidate(ctx context.Context, formats str
 	return nil
 	return nil
 }
 }
 
 
+func (m *AddSignalsRequestItem) contextValidateContext(ctx context.Context, formats strfmt.Registry) error {
+
+	for i := 0; i < len(m.Context); i++ {
+
+		if m.Context[i] != nil {
+			if err := m.Context[i].ContextValidate(ctx, formats); err != nil {
+				if ve, ok := err.(*errors.Validation); ok {
+					return ve.ValidateName("context" + "." + strconv.Itoa(i))
+				} else if ce, ok := err.(*errors.CompositeError); ok {
+					return ce.ValidateName("context" + "." + strconv.Itoa(i))
+				}
+				return err
+			}
+		}
+
+	}
+
+	return nil
+}
+
 func (m *AddSignalsRequestItem) contextValidateSource(ctx context.Context, formats strfmt.Registry) error {
 func (m *AddSignalsRequestItem) contextValidateSource(ctx context.Context, formats strfmt.Registry) error {
 
 
 	if m.Source != nil {
 	if m.Source != nil {
@@ -230,3 +277,43 @@ func (m *AddSignalsRequestItem) UnmarshalBinary(b []byte) error {
 	*m = res
 	*m = res
 	return nil
 	return nil
 }
 }
+
+// AddSignalsRequestItemContextItems0 add signals request item context items0
+//
+// swagger:model AddSignalsRequestItemContextItems0
+type AddSignalsRequestItemContextItems0 struct {
+
+	// key
+	Key string `json:"key,omitempty"`
+
+	// value
+	Value string `json:"value,omitempty"`
+}
+
+// Validate validates this add signals request item context items0
+func (m *AddSignalsRequestItemContextItems0) Validate(formats strfmt.Registry) error {
+	return nil
+}
+
+// ContextValidate validates this add signals request item context items0 based on context it is used
+func (m *AddSignalsRequestItemContextItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+	return nil
+}
+
+// MarshalBinary interface implementation
+func (m *AddSignalsRequestItemContextItems0) MarshalBinary() ([]byte, error) {
+	if m == nil {
+		return nil, nil
+	}
+	return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *AddSignalsRequestItemContextItems0) UnmarshalBinary(b []byte) error {
+	var res AddSignalsRequestItemContextItems0
+	if err := swag.ReadJSON(b, &res); err != nil {
+		return err
+	}
+	*m = res
+	return nil
+}

+ 16 - 10
pkg/parser/enrich_date.go

@@ -3,6 +3,7 @@ package parser
 import (
 import (
 	"time"
 	"time"
 
 
+	expr "github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	log "github.com/sirupsen/logrus"
 	log "github.com/sirupsen/logrus"
 )
 )
@@ -59,20 +60,25 @@ func ParseDate(in string, p *types.Event, x interface{}, plog *log.Entry) (map[s
 	var ret map[string]string = make(map[string]string)
 	var ret map[string]string = make(map[string]string)
 	var strDate string
 	var strDate string
 	var parsedDate time.Time
 	var parsedDate time.Time
-
-	if p.StrTimeFormat != "" {
-		strDate, parsedDate = parseDateWithFormat(in, p.StrTimeFormat)
+	if in != "" {
+		if p.StrTimeFormat != "" {
+			strDate, parsedDate = parseDateWithFormat(in, p.StrTimeFormat)
+			if !parsedDate.IsZero() {
+				ret["MarshaledTime"] = strDate
+				return ret, nil
+			}
+			plog.Debugf("unable to parse '%s' with layout '%s'", in, p.StrTimeFormat)
+		}
+		strDate, parsedDate = GenDateParse(in)
 		if !parsedDate.IsZero() {
 		if !parsedDate.IsZero() {
 			ret["MarshaledTime"] = strDate
 			ret["MarshaledTime"] = strDate
 			return ret, nil
 			return ret, nil
-		} else {
-			plog.Debugf("unable to parse '%s' with layout '%s'", in, p.StrTimeFormat)
 		}
 		}
-	}
-	strDate, parsedDate = GenDateParse(in)
-	if !parsedDate.IsZero() {
-		ret["MarshaledTime"] = strDate
-		return ret, nil
+		strDate = expr.ParseUnix(in)
+		if strDate != "" {
+			ret["MarshaledTime"] = strDate
+			return ret, nil
+		}
 	}
 	}
 	plog.Debugf("no suitable date format found for '%s', falling back to now", in)
 	plog.Debugf("no suitable date format found for '%s', falling back to now", in)
 	now := time.Now().UTC()
 	now := time.Now().UTC()

+ 103 - 4
pkg/parser/node.go

@@ -4,6 +4,7 @@ import (
 	"fmt"
 	"fmt"
 	"net"
 	"net"
 	"strings"
 	"strings"
+	"time"
 
 
 	"github.com/antonmedv/expr"
 	"github.com/antonmedv/expr"
 	"github.com/crowdsecurity/grokky"
 	"github.com/crowdsecurity/grokky"
@@ -11,6 +12,7 @@ import (
 	yaml "gopkg.in/yaml.v2"
 	yaml "gopkg.in/yaml.v2"
 
 
 	"github.com/antonmedv/expr/vm"
 	"github.com/antonmedv/expr/vm"
+	"github.com/crowdsecurity/crowdsec/pkg/cache"
 	"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
 	"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/davecgh/go-spew/spew"
 	"github.com/davecgh/go-spew/spew"
@@ -57,6 +59,8 @@ type Node struct {
 	Grok types.GrokPattern `yaml:"grok,omitempty"`
 	Grok types.GrokPattern `yaml:"grok,omitempty"`
 	//Statics can be present in any type of node and is executed last
 	//Statics can be present in any type of node and is executed last
 	Statics []types.ExtraField `yaml:"statics,omitempty"`
 	Statics []types.ExtraField `yaml:"statics,omitempty"`
+	//Stash allows to capture data from the log line and store it in an accessible cache
+	Stash []types.DataCapture `yaml:"stash,omitempty"`
 	//Whitelists
 	//Whitelists
 	Whitelist Whitelist           `yaml:"whitelist,omitempty"`
 	Whitelist Whitelist           `yaml:"whitelist,omitempty"`
 	Data      []*types.DataSource `yaml:"data,omitempty"`
 	Data      []*types.DataSource `yaml:"data,omitempty"`
@@ -103,6 +107,25 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error {
 			}
 			}
 		}
 		}
 	}
 	}
+
+	for idx, stash := range n.Stash {
+		if stash.Name == "" {
+			return fmt.Errorf("stash %d : name must be set", idx)
+		}
+		if stash.Value == "" {
+			return fmt.Errorf("stash %s : value expression must be set", stash.Name)
+		}
+		if stash.Key == "" {
+			return fmt.Errorf("stash %s : key expression must be set", stash.Name)
+		}
+		if stash.TTL == "" {
+			return fmt.Errorf("stash %s : ttl must be set", stash.Name)
+		}
+		//should be configurable
+		if stash.MaxMapSize == 0 {
+			stash.MaxMapSize = 100
+		}
+	}
 	return nil
 	return nil
 }
 }
 
 
@@ -285,6 +308,50 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
 		clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp)
 		clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp)
 	}
 	}
 
 
+	//Process the stash (data collection) if : a grok was present and succeeded, or if there is no grok
+	if NodeHasOKGrok || n.Grok.RunTimeRegexp == nil {
+		for idx, stash := range n.Stash {
+			var value string
+			var key string
+			if stash.ValueExpression == nil {
+				clog.Warningf("Stash %d has no value expression, skipping", idx)
+				continue
+			}
+			if stash.KeyExpression == nil {
+				clog.Warningf("Stash %d has no key expression, skipping", idx)
+				continue
+			}
+			//collect the data
+			output, err := expr.Run(stash.ValueExpression, cachedExprEnv)
+			if err != nil {
+				clog.Warningf("Error while running stash val expression : %v", err)
+			}
+			//can we expect anything else than a string ?
+			switch output := output.(type) {
+			case string:
+				value = output
+			default:
+				clog.Warningf("unexpected type %t (%v) while running '%s'", output, output, stash.Value)
+				continue
+			}
+
+			//collect the key
+			output, err = expr.Run(stash.KeyExpression, cachedExprEnv)
+			if err != nil {
+				clog.Warningf("Error while running stash key expression : %v", err)
+			}
+			//can we expect anything else than a string ?
+			switch output := output.(type) {
+			case string:
+				key = output
+			default:
+				clog.Warningf("unexpected type %t (%v) while running '%s'", output, output, stash.Key)
+				continue
+			}
+			cache.SetKey(stash.Name, key, value, &stash.TTLVal)
+		}
+	}
+
 	//Iterate on leafs
 	//Iterate on leafs
 	if len(n.LeavesNodes) > 0 {
 	if len(n.LeavesNodes) > 0 {
 		for _, leaf := range n.LeavesNodes {
 		for _, leaf := range n.LeavesNodes {
@@ -434,10 +501,10 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
 		n.Logger.Tracef("+ Regexp Compilation '%s'", n.Grok.RegexpName)
 		n.Logger.Tracef("+ Regexp Compilation '%s'", n.Grok.RegexpName)
 		n.Grok.RunTimeRegexp, err = pctx.Grok.Get(n.Grok.RegexpName)
 		n.Grok.RunTimeRegexp, err = pctx.Grok.Get(n.Grok.RegexpName)
 		if err != nil {
 		if err != nil {
-			return fmt.Errorf("Unable to find grok '%s' : %v", n.Grok.RegexpName, err)
+			return fmt.Errorf("unable to find grok '%s' : %v", n.Grok.RegexpName, err)
 		}
 		}
 		if n.Grok.RunTimeRegexp == nil {
 		if n.Grok.RunTimeRegexp == nil {
-			return fmt.Errorf("Empty grok '%s'", n.Grok.RegexpName)
+			return fmt.Errorf("empty grok '%s'", n.Grok.RegexpName)
 		}
 		}
 		n.Logger.Tracef("%s regexp: %s", n.Grok.RegexpName, n.Grok.RunTimeRegexp.Regexp.String())
 		n.Logger.Tracef("%s regexp: %s", n.Grok.RegexpName, n.Grok.RunTimeRegexp.Regexp.String())
 		valid = true
 		valid = true
@@ -447,11 +514,11 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
 		}
 		}
 		n.Grok.RunTimeRegexp, err = pctx.Grok.Compile(n.Grok.RegexpValue)
 		n.Grok.RunTimeRegexp, err = pctx.Grok.Compile(n.Grok.RegexpValue)
 		if err != nil {
 		if err != nil {
-			return fmt.Errorf("Failed to compile grok '%s': %v\n", n.Grok.RegexpValue, err)
+			return fmt.Errorf("failed to compile grok '%s': %v", n.Grok.RegexpValue, err)
 		}
 		}
 		if n.Grok.RunTimeRegexp == nil {
 		if n.Grok.RunTimeRegexp == nil {
 			// We shouldn't be here because compilation succeeded, so regexp shouldn't be nil
 			// We shouldn't be here because compilation succeeded, so regexp shouldn't be nil
-			return fmt.Errorf("Grok compilation failure: %s", n.Grok.RegexpValue)
+			return fmt.Errorf("grok compilation failure: %s", n.Grok.RegexpValue)
 		}
 		}
 		n.Logger.Tracef("%s regexp : %s", n.Grok.RegexpValue, n.Grok.RunTimeRegexp.Regexp.String())
 		n.Logger.Tracef("%s regexp : %s", n.Grok.RegexpValue, n.Grok.RunTimeRegexp.Regexp.String())
 		valid = true
 		valid = true
@@ -480,6 +547,38 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
 		}
 		}
 		valid = true
 		valid = true
 	}
 	}
+
+	/* load data capture (stash) */
+	for i, stash := range n.Stash {
+		n.Stash[i].ValueExpression, err = expr.Compile(stash.Value,
+			expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
+		if err != nil {
+			return errors.Wrap(err, "while compiling stash value expression")
+		}
+
+		n.Stash[i].KeyExpression, err = expr.Compile(stash.Key,
+			expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
+		if err != nil {
+			return errors.Wrap(err, "while compiling stash key expression")
+		}
+
+		n.Stash[i].TTLVal, err = time.ParseDuration(stash.TTL)
+		if err != nil {
+			return errors.Wrap(err, "while parsing stash ttl")
+		}
+
+		logLvl := n.Logger.Logger.GetLevel()
+		//init the cache, does it make sense to create it here just to be sure everything is fine ?
+		if err := cache.CacheInit(cache.CacheCfg{
+			Size:     n.Stash[i].MaxMapSize,
+			TTL:      n.Stash[i].TTLVal,
+			Name:     n.Stash[i].Name,
+			LogLevel: &logLvl,
+		}); err != nil {
+			return errors.Wrap(err, "while initializing cache")
+		}
+	}
+
 	/* compile leafs if present */
 	/* compile leafs if present */
 	if len(n.LeavesNodes) > 0 {
 	if len(n.LeavesNodes) > 0 {
 		for idx := range n.LeavesNodes {
 		for idx := range n.LeavesNodes {

+ 4 - 3
pkg/parser/parsing_test.go

@@ -138,7 +138,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing
 	return nil
 	return nil
 }
 }
 
 
-//prepTests is going to do the initialisation of parser : it's going to load enrichment plugins and load the patterns. This is done here so that we don't redo it for each test
+// prepTests is going to do the initialisation of parser : it's going to load enrichment plugins and load the patterns. This is done here so that we don't redo it for each test
 func prepTests() (*UnixParserCtx, EnricherCtx, error) {
 func prepTests() (*UnixParserCtx, EnricherCtx, error) {
 	var (
 	var (
 		err  error
 		err  error
@@ -252,6 +252,7 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo
 					if debug {
 					if debug {
 						retInfo = append(retInfo, fmt.Sprintf("mismatch %s[%s] %s != %s", outLabels[mapIdx], expKey, expVal, outVal))
 						retInfo = append(retInfo, fmt.Sprintf("mismatch %s[%s] %s != %s", outLabels[mapIdx], expKey, expVal, outVal))
 					}
 					}
+					valid = false
 					goto checkFinished
 					goto checkFinished
 				}
 				}
 			} else { //missing entry
 			} else { //missing entry
@@ -266,11 +267,11 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo
 checkFinished:
 checkFinished:
 	if valid {
 	if valid {
 		if debug {
 		if debug {
-			retInfo = append(retInfo, fmt.Sprintf("OK ! %s", strings.Join(retInfo, "/")))
+			retInfo = append(retInfo, fmt.Sprintf("OK ! \n\t%s", strings.Join(retInfo, "\n\t")))
 		}
 		}
 	} else {
 	} else {
 		if debug {
 		if debug {
-			retInfo = append(retInfo, fmt.Sprintf("KO ! %s", strings.Join(retInfo, "/")))
+			retInfo = append(retInfo, fmt.Sprintf("KO ! \n\t%s", strings.Join(retInfo, "\n\t")))
 		}
 		}
 	}
 	}
 	return retInfo, valid
 	return retInfo, valid

BIN
pkg/parser/test_data/GeoLite2-ASN.mmdb


+ 31 - 0
pkg/parser/tests/base-grok-stash/base-grok-stash.yaml

@@ -0,0 +1,31 @@
+filter: "evt.Line.Labels.type == 'testlog'"
+debug: true
+onsuccess: next_stage
+name: tests/base-grok-stash
+pattern_syntax:
+  TEST_START: start %{DATA:program} thing with pid %{NUMBER:pid}
+  TEST_CONTINUED: pid %{NUMBER:pid} did a forbidden thing
+nodes:
+  - #name: tests/base-grok-stash-sub-start
+    grok:
+      name: "TEST_START"
+      apply_on: Line.Raw
+      statics:
+        - meta: log_type
+          value: test_start
+    stash:
+      - name: test_program_pid_assoc
+        key: evt.Parsed.pid
+        value: evt.Parsed.program
+        ttl: 30s
+        size: 10
+  - #name: tests/base-grok-stash-sub-cont
+    grok:
+      name: "TEST_CONTINUED"
+      apply_on: Line.Raw
+      statics:
+        - meta: log_type
+          value: test_continue
+        - meta: associated_prog_name
+          expression: GetFromStash("test_program_pid_assoc", evt.Parsed.pid)
+

+ 2 - 0
pkg/parser/tests/base-grok-stash/parsers.yaml

@@ -0,0 +1,2 @@
+ - filename: {{.TestDirectory}}/base-grok-stash.yaml
+   stage: s00-raw

+ 63 - 0
pkg/parser/tests/base-grok-stash/test.yaml

@@ -0,0 +1,63 @@
+#these are the events we input into parser
+lines:
+  - Line:
+      Labels:
+        type: testlog
+      Raw: start foobar thing with pid 12
+  - Line:
+      Labels:
+        type: testlog
+      Raw: start toto thing with pid 42
+  - Line:
+      Labels:
+        type: testlog
+      Raw: pid 12 did a forbidden thing
+  - Line:
+      Labels:
+        type: testlog
+      Raw: pid 42 did a forbidden thing
+  - Line:
+      Labels:
+        type: testlog
+      Raw: pid 45 did a forbidden thing
+#these are the results we expect from the parser
+results:
+
+  - Meta:
+      log_type: test_start
+    Parsed:
+      program: foobar
+      pid: "12"
+    Process: true
+    Stage: s00-raw
+
+  - Meta:
+      log_type: test_start
+    Parsed:
+      program: toto
+      pid: "42"
+    Process: true
+    Stage: s00-raw
+
+  - Meta:
+      log_type: test_continue
+      associated_prog_name: foobar
+    Parsed:
+      pid: "12"
+    Process: true
+    Stage: s00-raw
+
+  - Meta:
+      log_type: test_continue
+      associated_prog_name: toto
+    Parsed:
+      pid: "42"
+    Process: true
+    Stage: s00-raw
+
+  - Meta:
+      log_type: test_continue
+    Parsed:
+      pid: "45"
+    Process: true
+    Stage: s00-raw

+ 1 - 0
pkg/parser/tests/geoip-enrich/base-grok.yaml

@@ -1,5 +1,6 @@
 filter: "'source_ip' in evt.Meta"
 filter: "'source_ip' in evt.Meta"
 name: tests/geoip-enrich
 name: tests/geoip-enrich
+debug: true
 description: "Populate event with geoloc info : as, country, coords, source range."
 description: "Populate event with geoloc info : as, country, coords, source range."
 statics:
 statics:
   - method: GeoIpCity
   - method: GeoIpCity

+ 3 - 4
pkg/parser/tests/geoip-enrich/test.yaml

@@ -2,7 +2,7 @@
 lines:
 lines:
   - Meta:
   - Meta:
       test: test1
       test: test1
-      source_ip: 8.8.8.8
+      source_ip: 1.0.0.1
   - Meta:
   - Meta:
       test: test2
       test: test2
       source_ip: 192.168.0.1
       source_ip: 192.168.0.1
@@ -10,11 +10,10 @@ lines:
 results:
 results:
   - Process: true
   - Process: true
     Enriched:
     Enriched:
-      IsoCode: US
       IsInEU: false
       IsInEU: false
-      ASNOrg: Google LLC
+      ASNOrg: "Google Inc."
     Meta:
     Meta:
-      source_ip: 8.8.8.8
+      source_ip: 1.0.0.1
   - Process: true
   - Process: true
     Enriched:
     Enriched:
       IsInEU: false
       IsInEU: false

+ 41 - 0
pkg/parser/unix_parser.go

@@ -4,9 +4,11 @@ import (
 	"fmt"
 	"fmt"
 	"os"
 	"os"
 	"path"
 	"path"
+	"sort"
 	"strings"
 	"strings"
 
 
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
+	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 
 
 	"github.com/crowdsecurity/grokky"
 	"github.com/crowdsecurity/grokky"
 	log "github.com/sirupsen/logrus"
 	log "github.com/sirupsen/logrus"
@@ -50,6 +52,45 @@ func Init(c map[string]interface{}) (*UnixParserCtx, error) {
 	return &r, nil
 	return &r, nil
 }
 }
 
 
+// Return new parsers
+// nodes and povfwnodes are already initialized in parser.LoadStages
+func NewParsers() *Parsers {
+	parsers := &Parsers{
+		Ctx:             &UnixParserCtx{},
+		Povfwctx:        &UnixParserCtx{},
+		StageFiles:      make([]Stagefile, 0),
+		PovfwStageFiles: make([]Stagefile, 0),
+	}
+	for _, itemType := range []string{cwhub.PARSERS, cwhub.PARSERS_OVFLW} {
+		for _, hubParserItem := range cwhub.GetItemMap(itemType) {
+			if hubParserItem.Installed {
+				stagefile := Stagefile{
+					Filename: hubParserItem.LocalPath,
+					Stage:    hubParserItem.Stage,
+				}
+				if itemType == cwhub.PARSERS {
+					parsers.StageFiles = append(parsers.StageFiles, stagefile)
+				}
+				if itemType == cwhub.PARSERS_OVFLW {
+					parsers.PovfwStageFiles = append(parsers.PovfwStageFiles, stagefile)
+				}
+			}
+		}
+	}
+	if parsers.StageFiles != nil {
+		sort.Slice(parsers.StageFiles, func(i, j int) bool {
+			return parsers.StageFiles[i].Filename < parsers.StageFiles[j].Filename
+		})
+	}
+	if parsers.PovfwStageFiles != nil {
+		sort.Slice(parsers.PovfwStageFiles, func(i, j int) bool {
+			return parsers.PovfwStageFiles[i].Filename < parsers.PovfwStageFiles[j].Filename
+		})
+	}
+
+	return parsers
+}
+
 func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) {
 func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) {
 	var err error
 	var err error
 
 

+ 14 - 1
pkg/types/grok_pattern.go

@@ -1,11 +1,13 @@
 package types
 package types
 
 
 import (
 import (
+	"time"
+
 	"github.com/antonmedv/expr/vm"
 	"github.com/antonmedv/expr/vm"
 	"github.com/crowdsecurity/grokky"
 	"github.com/crowdsecurity/grokky"
 )
 )
 
 
-//Used mostly for statics
+// Used mostly for statics
 type ExtraField struct {
 type ExtraField struct {
 	//if the target is indicated by name Struct.Field etc,
 	//if the target is indicated by name Struct.Field etc,
 	TargetByName string `yaml:"target,omitempty"`
 	TargetByName string `yaml:"target,omitempty"`
@@ -39,3 +41,14 @@ type GrokPattern struct {
 	//a grok can contain statics that apply if pattern is successful
 	//a grok can contain statics that apply if pattern is successful
 	Statics []ExtraField `yaml:"statics,omitempty"`
 	Statics []ExtraField `yaml:"statics,omitempty"`
 }
 }
+
+type DataCapture struct {
+	Name            string        `yaml:"name,omitempty"`
+	Key             string        `yaml:"key,omitempty"`
+	KeyExpression   *vm.Program   `yaml:"-"`
+	Value           string        `yaml:"value,omitempty"`
+	ValueExpression *vm.Program   `yaml:"-"`
+	TTL             string        `yaml:"ttl,omitempty"`
+	TTLVal          time.Duration `yaml:"-"`
+	MaxMapSize      int           `yaml:"size,omitempty"`
+}

+ 24 - 5
pkg/yamlpatch/patcher.go

@@ -13,15 +13,24 @@ import (
 type Patcher struct {
 type Patcher struct {
 	BaseFilePath  string
 	BaseFilePath  string
 	PatchFilePath string
 	PatchFilePath string
+	quiet bool
 }
 }
 
 
 func NewPatcher(filePath string, suffix string) *Patcher {
 func NewPatcher(filePath string, suffix string) *Patcher {
 	return &Patcher{
 	return &Patcher{
 		BaseFilePath:  filePath,
 		BaseFilePath:  filePath,
 		PatchFilePath: filePath + suffix,
 		PatchFilePath: filePath + suffix,
+		quiet: false,
 	}
 	}
 }
 }
 
 
+
+// SetQuiet sets the quiet flag, which will log as DEBUG_LEVEL instead of INFO
+func (p *Patcher) SetQuiet(quiet bool) {
+	p.quiet = quiet
+}
+
+
 // read a single YAML file, check for errors (the merge package doesn't) then return the content as bytes.
 // read a single YAML file, check for errors (the merge package doesn't) then return the content as bytes.
 func readYAML(filePath string) ([]byte, error) {
 func readYAML(filePath string) ([]byte, error) {
 	var content []byte
 	var content []byte
@@ -55,13 +64,19 @@ func (p *Patcher) MergedPatchContent() ([]byte, error) {
 	var over []byte
 	var over []byte
 
 
 	over, err = readYAML(p.PatchFilePath)
 	over, err = readYAML(p.PatchFilePath)
-	// optional file, ignore if it does not exist
-	if err != nil && !errors.Is(err, os.ErrNotExist) {
+	if errors.Is(err, os.ErrNotExist) {
+		return base, nil
+	}
+
+	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	if err == nil {
-		log.Infof("Patching yaml: '%s' with '%s'", p.BaseFilePath, p.PatchFilePath)
+
+	logf := log.Infof
+	if p.quiet {
+		logf = log.Debugf
 	}
 	}
+	logf("Patching yaml: '%s' with '%s'", p.BaseFilePath, p.PatchFilePath)
 
 
 	var patched *bytes.Buffer
 	var patched *bytes.Buffer
 
 
@@ -138,7 +153,11 @@ func (p *Patcher) PrependedPatchContent() ([]byte, error) {
 		if err = decodeDocuments(patchFile, &result, true); err != nil {
 		if err = decodeDocuments(patchFile, &result, true); err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
-		log.Infof("Prepending yaml: '%s' with '%s'", p.BaseFilePath, p.PatchFilePath)
+		logf := log.Infof
+		if p.quiet {
+			logf = log.Debugf
+		}
+		logf("Prepending yaml: '%s' with '%s'", p.BaseFilePath, p.PatchFilePath)
 	}
 	}
 
 
 	baseFile, err = os.Open(p.BaseFilePath)
 	baseFile, err = os.Open(p.BaseFilePath)

+ 1 - 1
rpm/SOURCES/crowdsec.unit.patch

@@ -10,4 +10,4 @@
 +ExecStart=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml
 +ExecStart=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml
  #ExecStartPost=/bin/sleep 0.1
  #ExecStartPost=/bin/sleep 0.1
  ExecReload=/bin/kill -HUP $MAINPID
  ExecReload=/bin/kill -HUP $MAINPID
- 
+ Restart=always

+ 3 - 0
rpm/SPECS/crowdsec.spec

@@ -45,6 +45,7 @@ sed -i "s#/usr/local/lib/crowdsec/plugins/#%{_libdir}/%{name}/plugins/#g" config
 rm -rf %{buildroot}
 rm -rf %{buildroot}
 mkdir -p %{buildroot}/etc/crowdsec/hub
 mkdir -p %{buildroot}/etc/crowdsec/hub
 mkdir -p %{buildroot}/etc/crowdsec/patterns
 mkdir -p %{buildroot}/etc/crowdsec/patterns
+mkdir -p %{buildroot}/etc/crowdsec/console/
 mkdir -p %{buildroot}%{_sharedstatedir}/%{name}/data
 mkdir -p %{buildroot}%{_sharedstatedir}/%{name}/data
 mkdir -p %{buildroot}%{_presetdir}
 mkdir -p %{buildroot}%{_presetdir}
 
 
@@ -62,6 +63,7 @@ install -m 600 -D config/config.yaml %{buildroot}%{_sysconfdir}/crowdsec
 install -m 644 -D config/simulation.yaml %{buildroot}%{_sysconfdir}/crowdsec
 install -m 644 -D config/simulation.yaml %{buildroot}%{_sysconfdir}/crowdsec
 install -m 644 -D config/profiles.yaml %{buildroot}%{_sysconfdir}/crowdsec
 install -m 644 -D config/profiles.yaml %{buildroot}%{_sysconfdir}/crowdsec
 install -m 644 -D config/console.yaml %{buildroot}%{_sysconfdir}/crowdsec
 install -m 644 -D config/console.yaml %{buildroot}%{_sysconfdir}/crowdsec
+install -m 644 -D config/context.yaml %{buildroot}%{_sysconfdir}/crowdsec/console/
 install -m 750 -D config/%{name}.cron.daily %{buildroot}%{_sysconfdir}/cron.daily/%{name}
 install -m 750 -D config/%{name}.cron.daily %{buildroot}%{_sysconfdir}/cron.daily/%{name}
 install -m 644 -D %{SOURCE1} %{buildroot}%{_presetdir}
 install -m 644 -D %{SOURCE1} %{buildroot}%{_presetdir}
 
 
@@ -115,6 +117,7 @@ rm -rf %{buildroot}
 %config(noreplace) %{_sysconfdir}/%{name}/simulation.yaml
 %config(noreplace) %{_sysconfdir}/%{name}/simulation.yaml
 %config(noreplace) %{_sysconfdir}/%{name}/profiles.yaml
 %config(noreplace) %{_sysconfdir}/%{name}/profiles.yaml
 %config(noreplace) %{_sysconfdir}/%{name}/console.yaml
 %config(noreplace) %{_sysconfdir}/%{name}/console.yaml
+%config(noreplace) %{_sysconfdir}/%{name}/console/context.yaml
 %config(noreplace) %{_presetdir}/80-%{name}.preset
 %config(noreplace) %{_presetdir}/80-%{name}.preset
 %config(noreplace) %{_sysconfdir}/%{name}/notifications/http.yaml
 %config(noreplace) %{_sysconfdir}/%{name}/notifications/http.yaml
 %config(noreplace) %{_sysconfdir}/%{name}/notifications/slack.yaml
 %config(noreplace) %{_sysconfdir}/%{name}/notifications/slack.yaml

Some files were not shown because too many files changed in this diff