Merge branch 'master' into matrix_version

This commit is contained in:
Manuel Sabban 2023-01-12 16:14:44 +01:00 committed by GitHub
commit c8ce9049f7
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
130 changed files with 4685 additions and 1713 deletions

View file

@ -10,6 +10,7 @@ on:
env:
PREFIX_TEST_NAMES_WITH_FILE: true
jobs:
build:

View file

@ -11,6 +11,18 @@ on:
- 'README.md'
jobs:
get_latest_release:
name: get_latest_release
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- id: get_latest_release
uses: cardinalby/git-get-release-action@cedef2faf69cb7c55b285bad07688d04430b7ada
env:
GITHUB_TOKEN: ${{ github.token }}
with:
latest: true
build:
name: Build
@ -23,13 +35,8 @@ jobs:
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- id: get_latest_release
uses: pozetroninc/github-action-get-latest-release@master
with:
repository: crowdsecurity/crowdsec
excludes: draft
- id: set_release_in_env
run: echo "BUILD_VERSION=${{ steps.get_latest_release.outputs.release }}" >> $env:GITHUB_ENV
run: echo "BUILD_VERSION=${{ jobs.get_latest_release.outputs.tag_name }}" >> $env:GITHUB_ENV
- name: Build
run: make windows_installer
- name: Upload MSI

View file

@ -2,23 +2,33 @@ name: Dispatch to hub when creating pre-release
on:
release:
types: prereleased
types:
- prereleased
jobs:
get_latest_release:
name: get_latest_release
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- id: get_latest_release
uses: cardinalby/git-get-release-action@cedef2faf69cb7c55b285bad07688d04430b7ada
env:
GITHUB_TOKEN: ${{ github.token }}
with:
latest: true
draft: false
prerelease: false
dispatch:
name: dispatch to hub-tests
runs-on: ubuntu-latest
steps:
- id: keydb
uses: pozetroninc/github-action-get-latest-release@master
with:
owner: crowdsecurity
repo: crowdsec
excludes: prerelease, draft
- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v1
with:
token: ${{ secrets.DISPATCH_TOKEN }}
event-type: create_branch
repository: crowdsecurity/hub
client-payload: '{"version": "${{ steps.keydb.outputs.release }}"}'
client-payload: '{"version": "${{ jobs.get_latest_release.outputs.tag_name }}"}'

View file

@ -2,23 +2,33 @@ name: Dispatch to hub when deleting pre-release
on:
release:
types: deleted
types:
- deleted
jobs:
get_latest_release:
name: get_latest_release
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- id: get_latest_release
uses: cardinalby/git-get-release-action@cedef2faf69cb7c55b285bad07688d04430b7ada
env:
GITHUB_TOKEN: ${{ github.token }}
with:
latest: true
draft: false
prerelease: false
dispatch:
name: dispatch to hub-tests
runs-on: ubuntu-latest
steps:
- id: keydb
uses: pozetroninc/github-action-get-latest-release@master
with:
owner: crowdsecurity
repo: crowdsec
excludes: prerelease, draft
- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v1
with:
token: ${{ secrets.DISPATCH_TOKEN }}
event-type: delete_branch
repository: crowdsecurity/hub
client-payload: '{"version": "${{ steps.keydb.outputs.release }}"}'
client-payload: '{"version": "${{ jobs.get_latest_release.outputs.tag_name }}"}'

View file

@ -16,6 +16,7 @@ on:
env:
RICHGO_FORCE_COLOR: 1
CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF: true
jobs:

View file

@ -23,7 +23,6 @@ on:
env:
RICHGO_FORCE_COLOR: 1
AWS_HOST: localstack
SERVICES: cloudwatch,logs,kinesis
# these are to mimic aws config
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
@ -31,29 +30,27 @@ env:
# and to override our endpoint in aws sdk
AWS_ENDPOINT_FORCE: http://localhost:4566
KINESIS_INITIALIZE_STREAMS: "stream-1-shard:1,stream-2-shards:2"
CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF: true
jobs:
build:
strategy:
matrix:
go-version: [1.18, 1.x]
go-version: ["1.19", "1.20.0-rc.1"]
name: "Build + tests"
runs-on: ubuntu-latest
services:
localstack:
image: localstack/localstack:0.13.3
image: localstack/localstack:1.3.0
ports:
- 4566:4566 # Localstack exposes all services on the same port
env:
SERVICES: ${{ env.SERVICES }}
DEBUG: ""
DATA_DIR: ""
LAMBDA_EXECUTOR: ""
KINESIS_ERROR_PROBABILITY: ""
DOCKER_HOST: unix:///var/run/docker.sock
HOST_TMP_FOLDER: "/tmp"
KINESIS_INITIALIZE_STREAMS: ${{ env.KINESIS_INITIALIZE_STREAMS }}
HOSTNAME_EXTERNAL: ${{ env.AWS_HOST }} # Required so that resource urls are provided properly
# e.g sqs url will get localhost if we don't set this env to map our service
@ -64,7 +61,7 @@ jobs:
--health-timeout=5s
--health-retries=3
zoo1:
image: confluentinc/cp-zookeeper:7.1.1
image: confluentinc/cp-zookeeper:7.3.0
ports:
- "2181:2181"
env:
@ -128,6 +125,18 @@ jobs:
fetch-depth: 0
submodules: false
- name: Cache Go modules
uses: actions/cache@v2
with:
path: |
~/go/pkg/mod
~/.cache/go-build
~/Library/Caches/go-build
%LocalAppData%\go-build
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ matrix.go-version }}-go-
- name: Build and run tests
run: |
go install github.com/ory/go-acc@v0.2.8

View file

@ -3,46 +3,47 @@ name: build
on:
release:
types: prereleased
types:
- prereleased
jobs:
build:
name: Build and upload binary package
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.19
uses: actions/setup-go@v3
with:
go-version: 1.19
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Build the binaries
run: make release
- name: Upload to release
uses: JasonEtco/upload-to-release@master
with:
args: crowdsec-release.tgz application/x-gzip
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Go 1.19
uses: actions/setup-go@v3
with:
go-version: 1.19
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Build the binaries
run: make release
- name: Upload to release
uses: JasonEtco/upload-to-release@master
with:
args: crowdsec-release.tgz application/x-gzip
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
build_static:
name: Build and upload binary package
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.19
uses: actions/setup-go@v3
with:
go-version: 1.19
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Build the binaries
run: |
make release BUILD_STATIC=yes
mv crowdsec-release.tgz crowdsec-release-static.tgz
- name: Upload to release
uses: JasonEtco/upload-to-release@master
with:
args: crowdsec-release-static.tgz application/x-gzip
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Go 1.19
uses: actions/setup-go@v3
with:
go-version: 1.19
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Build the binaries
run: |
make release BUILD_STATIC=yes
mv crowdsec-release.tgz crowdsec-release-static.tgz
- name: Upload to release
uses: JasonEtco/upload-to-release@master
with:
args: crowdsec-release-static.tgz application/x-gzip
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View file

@ -37,19 +37,19 @@ jobs:
echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v2
-
name: Login to DockerHub
uses: docker/login-action@v1
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build and push
uses: docker/build-push-action@v2
uses: docker/build-push-action@v3
with:
context: .
file: ./Dockerfile.debian

View file

@ -11,11 +11,9 @@ jobs:
name: Push Docker image to Docker Hub
runs-on: ubuntu-latest
steps:
-
name: Check out the repo
- name: Check out the repo
uses: actions/checkout@v3
-
name: Prepare
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=crowdsecurity/crowdsec
@ -32,21 +30,18 @@ jobs:
TAGS_SLIM="${DOCKER_IMAGE}:${VERSION}-slim"
if [[ ${{ github.event.action }} == released ]]; then
TAGS=$TAGS,${DOCKER_IMAGE}:latest,${GHCR_IMAGE}:latest
TAGS_SLIM=$TAGS,${DOCKER_IMAGE}:slim
TAGS_SLIM=$TAGS_SLIM,${DOCKER_IMAGE}:slim
fi
echo ::set-output name=version::${VERSION}
echo ::set-output name=tags::${TAGS}
echo ::set-output name=tags_slim::${TAGS_SLIM}
echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
-
name: Login to DockerHub
uses: docker/login-action@v1
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
@ -58,9 +53,8 @@ jobs:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
-
name: Build and push slim image
uses: docker/build-push-action@v2
- name: Build and push slim image
uses: docker/build-push-action@v3
with:
context: .
file: ./Dockerfile
@ -74,9 +68,8 @@ jobs:
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
-
name: Build and push full image
uses: docker/build-push-action@v2
- name: Build and push full image
uses: docker/build-push-action@v3
with:
context: .
file: ./Dockerfile

View file

@ -10,87 +10,30 @@ COPY . .
# wizard.sh requires GNU coreutils
RUN apk add --no-cache git gcc libc-dev make bash gettext binutils-gold coreutils && \
echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \
SYSTEM="docker" make clean release && \
cd crowdsec-v* && \
./wizard.sh --docker-mode && \
cd - && \
cd - >/dev/null && \
cscli hub update && \
cscli collections install crowdsecurity/linux && \
cscli parsers install crowdsecurity/whitelists
cscli parsers install crowdsecurity/whitelists && \
go install github.com/mikefarah/yq/v4@v4.30.6
FROM alpine:latest as build-slim
RUN apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community tzdata yq bash && \
RUN apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community tzdata bash && \
mkdir -p /staging/etc/crowdsec && \
mkdir -p /staging/var/lib/crowdsec && \
mkdir -p /var/lib/crowdsec/data \
yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml
mkdir -p /var/lib/crowdsec/data
COPY --from=build /go/bin/yq /usr/local/bin/yq
COPY --from=build /etc/crowdsec /staging/etc/crowdsec
COPY --from=build /usr/local/bin/crowdsec /usr/local/bin/crowdsec
COPY --from=build /usr/local/bin/cscli /usr/local/bin/cscli
COPY --from=build /go/src/crowdsec/docker/docker_start.sh /
COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/config.yaml
# NOTE: setting default values here will overwrite the ones set in config.yaml
# every time the container is started. We set the default in docker/config.yaml
# and document them in docker/README.md, but keep the variables empty here.
ENV CONFIG_FILE=/etc/crowdsec/config.yaml
ENV LOCAL_API_URL=
ENV CUSTOM_HOSTNAME=localhost
ENV PLUGIN_DIR=
ENV DISABLE_AGENT=false
ENV DISABLE_LOCAL_API=false
ENV DISABLE_ONLINE_API=false
ENV DSN=
ENV TYPE=
ENV TEST_MODE=false
ENV USE_WAL=
# register to app.crowdsec.net
ENV ENROLL_INSTANCE_NAME=
ENV ENROLL_KEY=
ENV ENROLL_TAGS=
# log verbosity
ENV LEVEL_TRACE=
ENV LEVEL_DEBUG=
ENV LEVEL_INFO=
# TLS setup ----------------------------------- #
ENV AGENT_USERNAME=
ENV AGENT_PASSWORD=
# TLS setup ----------------------------------- #
ENV USE_TLS=false
ENV CACERT_FILE=
ENV CERT_FILE=
ENV KEY_FILE=
# comma-separated list of allowed OU values for TLS bouncer certificates
ENV BOUNCERS_ALLOWED_OU=
# comma-separated list of allowed OU values for TLS agent certificates
ENV AGENTS_ALLOWED_OU=
# Install the following hub items --------------#
ENV COLLECTIONS=
ENV PARSERS=
ENV SCENARIOS=
ENV POSTOVERFLOWS=
# Uninstall the following hub items ------------#
ENV DISABLE_COLLECTIONS=
ENV DISABLE_PARSERS=
ENV DISABLE_SCENARIOS=
ENV DISABLE_POSTOVERFLOWS=
ENV METRICS_PORT=
RUN yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml
ENTRYPOINT /bin/bash docker_start.sh

View file

@ -14,17 +14,21 @@ ENV DEBCONF_NOWARNINGS="yes"
# wizard.sh requires GNU coreutils
RUN apt-get update && \
apt-get install -y -q git gcc libc-dev make bash gettext binutils-gold coreutils tzdata && \
SYSTEM="docker" make release && \
echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \
SYSTEM="docker" make clean release && \
cd crowdsec-v* && \
./wizard.sh --docker-mode && \
cd - && \
cd - >/dev/null && \
cscli hub update && \
cscli collections install crowdsecurity/linux && \
cscli parsers install crowdsecurity/whitelists && \
go install github.com/mikefarah/yq/v4@v4.30.5
go install github.com/mikefarah/yq/v4@v4.30.6
FROM debian:bullseye-slim as build-slim
ENV DEBIAN_FRONTEND=noninteractive
ENV DEBCONF_NOWARNINGS="yes"
RUN apt-get update && \
apt-get install -y -q --install-recommends --no-install-suggests \
procps \
@ -35,8 +39,7 @@ RUN apt-get update && \
tzdata && \
mkdir -p /staging/etc/crowdsec && \
mkdir -p /staging/var/lib/crowdsec && \
mkdir -p /var/lib/crowdsec/data \
yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml
mkdir -p /var/lib/crowdsec/data
COPY --from=build /go/bin/yq /usr/local/bin/yq
COPY --from=build /etc/crowdsec /staging/etc/crowdsec
@ -44,67 +47,9 @@ COPY --from=build /usr/local/bin/crowdsec /usr/local/bin/crowdsec
COPY --from=build /usr/local/bin/cscli /usr/local/bin/cscli
COPY --from=build /go/src/crowdsec/docker/docker_start.sh /
COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/config.yaml
RUN yq eval -i ".plugin_config.group = \"nogroup\"" /staging/etc/crowdsec/config.yaml
RUN yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml && \
yq eval -i ".plugin_config.group = \"nogroup\"" /staging/etc/crowdsec/config.yaml
# NOTE: setting default values here will overwrite the ones set in config.yaml
# every time the container is started. We set the default in docker/config.yaml
# and document them in docker/README.md, but keep the variables empty here.
ENV CONFIG_FILE=/etc/crowdsec/config.yaml
ENV LOCAL_API_URL=
ENV CUSTOM_HOSTNAME=localhost
ENV PLUGIN_DIR=
ENV DISABLE_AGENT=false
ENV DISABLE_LOCAL_API=false
ENV DISABLE_ONLINE_API=false
ENV DSN=
ENV TYPE=
ENV TEST_MODE=false
ENV USE_WAL=
# register to app.crowdsec.net
ENV ENROLL_INSTANCE_NAME=
ENV ENROLL_KEY=
ENV ENROLL_TAGS=
# log verbosity
ENV LEVEL_TRACE=
ENV LEVEL_DEBUG=
ENV LEVEL_INFO=
# TLS setup ----------------------------------- #
ENV AGENT_USERNAME=
ENV AGENT_PASSWORD=
# TLS setup ----------------------------------- #
ENV USE_TLS=false
ENV CACERT_FILE=
ENV CERT_FILE=
ENV KEY_FILE=
# comma-separated list of allowed OU values for TLS bouncer certificates
ENV BOUNCERS_ALLOWED_OU=
# comma-separated list of allowed OU values for TLS agent certificates
ENV AGENTS_ALLOWED_OU=
# Install the following hub items --------------#
ENV COLLECTIONS=
ENV PARSERS=
ENV SCENARIOS=
ENV POSTOVERFLOWS=
# Uninstall the following hub items ------------#
ENV DISABLE_COLLECTIONS=
ENV DISABLE_PARSERS=
ENV DISABLE_SCENARIOS=
ENV DISABLE_POSTOVERFLOWS=
ENV METRICS_PORT=
ENTRYPOINT /bin/bash docker_start.sh

View file

@ -45,7 +45,7 @@ CSCLI_BIN = cscli$(EXT)
BUILD_CMD = build
MINIMUM_SUPPORTED_GO_MAJOR_VERSION = 1
MINIMUM_SUPPORTED_GO_MINOR_VERSION = 18
MINIMUM_SUPPORTED_GO_MINOR_VERSION = 19
go_major_minor = $(subst ., ,$(BUILD_GOVERSION))
GO_MAJOR_VERSION = $(word 1, $(go_major_minor))

View file

@ -7,6 +7,7 @@ import (
"fmt"
"net/url"
"os"
"sort"
"strconv"
"strings"
@ -112,6 +113,29 @@ func DisplayOneAlert(alert *models.Alert, withDetail bool) error {
alertDecisionsTable(color.Output, alert)
if len(alert.Meta) > 0 {
fmt.Printf("\n - Context :\n")
sort.Slice(alert.Meta, func(i, j int) bool {
return alert.Meta[i].Key < alert.Meta[j].Key
})
table := newTable(color.Output)
table.SetRowLines(false)
table.SetHeaders("Key", "Value")
for _, meta := range alert.Meta {
var valSlice []string
if err := json.Unmarshal([]byte(meta.Value), &valSlice); err != nil {
return fmt.Errorf("unknown context value type '%s' : %s", meta.Value, err)
}
for _, value := range valSlice {
table.AddRow(
meta.Key,
value,
)
}
}
table.Render()
}
if withDetail {
fmt.Printf("\n - Events :\n")
for _, event := range alert.Events {
@ -419,9 +443,6 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`,
if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
log.Fatal("Local API is disabled, please run this command on the local API machine")
}
if err := csConfig.LoadDBConfig(); err != nil {
log.Fatal(err)
}
dbClient, err = database.NewClient(csConfig.DbConfig)
if err != nil {
log.Fatalf("unable to create new database client: %s", err)

View file

@ -18,10 +18,6 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/types"
)
var keyIP string
var keyLength int
var key string
func getBouncers(out io.Writer, dbClient *database.Client) error {
bouncers, err := dbClient.ListBouncers()
if err != nil {
@ -59,33 +55,8 @@ func getBouncers(out io.Writer, dbClient *database.Client) error {
return nil
}
func NewBouncersCmd() *cobra.Command {
/* ---- DECISIONS COMMAND */
var cmdBouncers = &cobra.Command{
Use: "bouncers [action]",
Short: "Manage bouncers [requires local API]",
Long: `To list/add/delete bouncers.
Note: This command requires database direct access, so is intended to be run on Local API/master.
`,
Args: cobra.MinimumNArgs(1),
Aliases: []string{"bouncer"},
DisableAutoGenTag: true,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
var err error
if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
log.Fatal("Local API is disabled, please run this command on the local API machine")
}
if err := csConfig.LoadDBConfig(); err != nil {
log.Fatal(err)
}
dbClient, err = database.NewClient(csConfig.DbConfig)
if err != nil {
log.Fatalf("unable to create new database client: %s", err)
}
},
}
var cmdBouncersList = &cobra.Command{
func NewBouncersListCmd() *cobra.Command {
cmdBouncersList := &cobra.Command{
Use: "list",
Short: "List bouncers",
Long: `List bouncers`,
@ -99,9 +70,61 @@ Note: This command requires database direct access, so is intended to be run on
}
},
}
cmdBouncers.AddCommand(cmdBouncersList)
var cmdBouncersAdd = &cobra.Command{
return cmdBouncersList
}
func runBouncersAdd(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
keyLength, err := flags.GetInt("length")
if err != nil {
return err
}
key, err := flags.GetString("key")
if err != nil {
return err
}
keyName := args[0]
var apiKey string
if keyName == "" {
log.Fatalf("Please provide a name for the api key")
}
apiKey = key
if key == "" {
apiKey, err = middlewares.GenerateAPIKey(keyLength)
}
if err != nil {
log.Fatalf("unable to generate api key: %s", err)
}
_, err = dbClient.CreateBouncer(keyName, "", middlewares.HashSHA512(apiKey), types.ApiKeyAuthType)
if err != nil {
log.Fatalf("unable to create bouncer: %s", err)
}
if csConfig.Cscli.Output == "human" {
fmt.Printf("Api key for '%s':\n\n", keyName)
fmt.Printf(" %s\n\n", apiKey)
fmt.Print("Please keep this key since you will not be able to retrieve it!\n")
} else if csConfig.Cscli.Output == "raw" {
fmt.Printf("%s", apiKey)
} else if csConfig.Cscli.Output == "json" {
j, err := json.Marshal(apiKey)
if err != nil {
log.Fatalf("unable to marshal api key")
}
fmt.Printf("%s", string(j))
}
return nil
}
func NewBouncersAddCmd() *cobra.Command {
cmdBouncersAdd := &cobra.Command{
Use: "add MyBouncerName [--length 16]",
Short: "add bouncer",
Long: `add bouncer`,
@ -110,45 +133,33 @@ cscli bouncers add MyBouncerName -l 24
cscli bouncers add MyBouncerName -k %s`, generatePassword(32)),
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, arg []string) {
keyName := arg[0]
var apiKey string
var err error
if keyName == "" {
log.Fatalf("Please provide a name for the api key")
}
apiKey = key
if key == "" {
apiKey, err = middlewares.GenerateAPIKey(keyLength)
}
if err != nil {
log.Fatalf("unable to generate api key: %s", err)
}
_, err = dbClient.CreateBouncer(keyName, keyIP, middlewares.HashSHA512(apiKey), types.ApiKeyAuthType)
if err != nil {
log.Fatalf("unable to create bouncer: %s", err)
}
if csConfig.Cscli.Output == "human" {
fmt.Printf("Api key for '%s':\n\n", keyName)
fmt.Printf(" %s\n\n", apiKey)
fmt.Print("Please keep this key since you will not be able to retrieve it!\n")
} else if csConfig.Cscli.Output == "raw" {
fmt.Printf("%s", apiKey)
} else if csConfig.Cscli.Output == "json" {
j, err := json.Marshal(apiKey)
if err != nil {
log.Fatalf("unable to marshal api key")
}
fmt.Printf("%s", string(j))
}
},
RunE: runBouncersAdd,
}
cmdBouncersAdd.Flags().IntVarP(&keyLength, "length", "l", 16, "length of the api key")
cmdBouncersAdd.Flags().StringVarP(&key, "key", "k", "", "api key for the bouncer")
cmdBouncers.AddCommand(cmdBouncersAdd)
var cmdBouncersDelete = &cobra.Command{
flags := cmdBouncersAdd.Flags()
flags.IntP("length", "l", 16, "length of the api key")
flags.StringP("key", "k", "", "api key for the bouncer")
return cmdBouncersAdd
}
func runBouncersDelete(cmd *cobra.Command, args []string) error {
for _, bouncerID := range args {
err := dbClient.DeleteBouncer(bouncerID)
if err != nil {
log.Fatalf("unable to delete bouncer '%s': %s", bouncerID, err)
}
log.Infof("bouncer '%s' deleted successfully", bouncerID)
}
return nil
}
func NewBouncersDeleteCmd() *cobra.Command {
cmdBouncersDelete := &cobra.Command{
Use: "delete MyBouncerName",
Short: "delete bouncer",
Args: cobra.MinimumNArgs(1),
@ -173,16 +184,38 @@ cscli bouncers add MyBouncerName -k %s`, generatePassword(32)),
}
return ret, cobra.ShellCompDirectiveNoFileComp
},
Run: func(cmd *cobra.Command, args []string) {
for _, bouncerID := range args {
err := dbClient.DeleteBouncer(bouncerID)
if err != nil {
log.Fatalf("unable to delete bouncer '%s': %s", bouncerID, err)
}
log.Infof("bouncer '%s' deleted successfully", bouncerID)
RunE: runBouncersDelete,
}
return cmdBouncersDelete
}
func NewBouncersCmd() *cobra.Command {
/* ---- DECISIONS COMMAND */
var cmdBouncers = &cobra.Command{
Use: "bouncers [action]",
Short: "Manage bouncers [requires local API]",
Long: `To list/add/delete bouncers.
Note: This command requires database direct access, so is intended to be run on Local API/master.
`,
Args: cobra.MinimumNArgs(1),
Aliases: []string{"bouncer"},
DisableAutoGenTag: true,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
var err error
if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
log.Fatal("Local API is disabled, please run this command on the local API machine")
}
dbClient, err = database.NewClient(csConfig.DbConfig)
if err != nil {
log.Fatalf("unable to create new database client: %s", err)
}
},
}
cmdBouncers.AddCommand(cmdBouncersDelete)
cmdBouncers.AddCommand(NewBouncersListCmd())
cmdBouncers.AddCommand(NewBouncersAddCmd())
cmdBouncers.AddCommand(NewBouncersDeleteCmd())
return cmdBouncers
}

View file

@ -22,6 +22,7 @@ import (
var CAPIURLPrefix string = "v2"
var CAPIBaseURL string = "https://api.crowdsec.net/"
var capiUserPrefix string
var outputFile string
func NewCapiCmd() *cobra.Command {
var cmdCapi = &cobra.Command{

View file

@ -1,506 +1,21 @@
package main
import (
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"github.com/antonmedv/expr"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
type OldAPICfg struct {
MachineID string `json:"machine_id"`
Password string `json:"password"`
}
/* Backup crowdsec configurations to directory <dirPath> :
- Main config (config.yaml)
- Profiles config (profiles.yaml)
- Simulation config (simulation.yaml)
- Backup of API credentials (local API and online API)
- List of scenarios, parsers, postoverflows and collections that are up-to-date
- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
*/
func backupConfigToDirectory(dirPath string) error {
var err error
if dirPath == "" {
return fmt.Errorf("directory path can't be empty")
}
log.Infof("Starting configuration backup")
/*if parent directory doesn't exist, bail out. create final dir with Mkdir*/
parentDir := filepath.Dir(dirPath)
if _, err := os.Stat(parentDir); err != nil {
return errors.Wrapf(err, "while checking parent directory %s existence", parentDir)
}
if err = os.Mkdir(dirPath, 0700); err != nil {
return errors.Wrapf(err, "while creating %s", dirPath)
}
if csConfig.ConfigPaths.SimulationFilePath != "" {
backupSimulation := filepath.Join(dirPath, "simulation.yaml")
if err = types.CopyFile(csConfig.ConfigPaths.SimulationFilePath, backupSimulation); err != nil {
return errors.Wrapf(err, "failed copy %s to %s", csConfig.ConfigPaths.SimulationFilePath, backupSimulation)
}
log.Infof("Saved simulation to %s", backupSimulation)
}
/*
- backup AcquisitionFilePath
- backup the other files of acquisition directory
*/
if csConfig.Crowdsec != nil && csConfig.Crowdsec.AcquisitionFilePath != "" {
backupAcquisition := filepath.Join(dirPath, "acquis.yaml")
if err = types.CopyFile(csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition, err)
}
}
acquisBackupDir := filepath.Join(dirPath, "acquis")
if err = os.Mkdir(acquisBackupDir, 0700); err != nil {
return fmt.Errorf("error while creating %s : %s", acquisBackupDir, err)
}
if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 {
for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles {
/*if it was the default one, it was already backup'ed*/
if csConfig.Crowdsec.AcquisitionFilePath == acquisFile {
continue
}
targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile)))
if err != nil {
return errors.Wrapf(err, "while saving %s to %s", acquisFile, acquisBackupDir)
}
if err = types.CopyFile(acquisFile, targetFname); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err)
}
log.Infof("Saved acquis %s to %s", acquisFile, targetFname)
}
}
if ConfigFilePath != "" {
backupMain := fmt.Sprintf("%s/config.yaml", dirPath)
if err = types.CopyFile(ConfigFilePath, backupMain); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", ConfigFilePath, backupMain, err)
}
log.Infof("Saved default yaml to %s", backupMain)
}
if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" {
backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath)
if err = types.CopyFile(csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds, err)
}
log.Infof("Saved online API credentials to %s", backupCAPICreds)
}
if csConfig.API != nil && csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" {
backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath)
if err = types.CopyFile(csConfig.API.Client.CredentialsFilePath, backupLAPICreds); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Client.CredentialsFilePath, backupLAPICreds, err)
}
log.Infof("Saved local API credentials to %s", backupLAPICreds)
}
if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.ProfilesPath != "" {
backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath)
if err = types.CopyFile(csConfig.API.Server.ProfilesPath, backupProfiles); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Server.ProfilesPath, backupProfiles, err)
}
log.Infof("Saved profiles to %s", backupProfiles)
}
if err = BackupHub(dirPath); err != nil {
return fmt.Errorf("failed to backup hub config : %s", err)
}
return nil
}
/* Restore crowdsec configurations to directory <dirPath> :
- Main config (config.yaml)
- Profiles config (profiles.yaml)
- Simulation config (simulation.yaml)
- Backup of API credentials (local API and online API)
- List of scenarios, parsers, postoverflows and collections that are up-to-date
- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
*/
func restoreConfigFromDirectory(dirPath string) error {
var err error
if !restoreOldBackup {
backupMain := fmt.Sprintf("%s/config.yaml", dirPath)
if _, err = os.Stat(backupMain); err == nil {
if csConfig.ConfigPaths != nil && csConfig.ConfigPaths.ConfigDir != "" {
if err = types.CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", backupMain, csConfig.ConfigPaths.ConfigDir, err)
}
}
}
// Now we have config.yaml, we should regenerate config struct to have rights paths etc
ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)
initConfig()
backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath)
if _, err = os.Stat(backupCAPICreds); err == nil {
if err = types.CopyFile(backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath, err)
}
}
backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath)
if _, err = os.Stat(backupLAPICreds); err == nil {
if err = types.CopyFile(backupLAPICreds, csConfig.API.Client.CredentialsFilePath); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", backupLAPICreds, csConfig.API.Client.CredentialsFilePath, err)
}
}
backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath)
if _, err = os.Stat(backupProfiles); err == nil {
if err = types.CopyFile(backupProfiles, csConfig.API.Server.ProfilesPath); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", backupProfiles, csConfig.API.Server.ProfilesPath, err)
}
}
} else {
var oldAPICfg OldAPICfg
backupOldAPICfg := fmt.Sprintf("%s/api_creds.json", dirPath)
jsonFile, err := os.Open(backupOldAPICfg)
if err != nil {
log.Warningf("failed to open %s : %s", backupOldAPICfg, err)
} else {
byteValue, _ := io.ReadAll(jsonFile)
err = json.Unmarshal(byteValue, &oldAPICfg)
if err != nil {
return fmt.Errorf("failed to load json file %s : %s", backupOldAPICfg, err)
}
apiCfg := csconfig.ApiCredentialsCfg{
Login: oldAPICfg.MachineID,
Password: oldAPICfg.Password,
URL: CAPIBaseURL,
}
apiConfigDump, err := yaml.Marshal(apiCfg)
if err != nil {
return fmt.Errorf("unable to dump api credentials: %s", err)
}
apiConfigDumpFile := fmt.Sprintf("%s/online_api_credentials.yaml", csConfig.ConfigPaths.ConfigDir)
if csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" {
apiConfigDumpFile = csConfig.API.Server.OnlineClient.CredentialsFilePath
}
err = os.WriteFile(apiConfigDumpFile, apiConfigDump, 0644)
if err != nil {
return fmt.Errorf("write api credentials in '%s' failed: %s", apiConfigDumpFile, err)
}
log.Infof("Saved API credentials to %s", apiConfigDumpFile)
}
}
backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath)
if _, err = os.Stat(backupSimulation); err == nil {
if err = types.CopyFile(backupSimulation, csConfig.ConfigPaths.SimulationFilePath); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", backupSimulation, csConfig.ConfigPaths.SimulationFilePath, err)
}
}
/*if there is a acquisition dir, restore its content*/
if csConfig.Crowdsec.AcquisitionDirPath != "" {
if err = os.Mkdir(csConfig.Crowdsec.AcquisitionDirPath, 0700); err != nil {
return fmt.Errorf("error while creating %s : %s", csConfig.Crowdsec.AcquisitionDirPath, err)
}
}
//if there was a single one
backupAcquisition := fmt.Sprintf("%s/acquis.yaml", dirPath)
if _, err = os.Stat(backupAcquisition); err == nil {
log.Debugf("restoring backup'ed %s", backupAcquisition)
if err = types.CopyFile(backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath, err)
}
}
//if there is files in the acquis backup dir, restore them
acquisBackupDir := filepath.Join(dirPath, "acquis", "*.yaml")
if acquisFiles, err := filepath.Glob(acquisBackupDir); err == nil {
for _, acquisFile := range acquisFiles {
targetFname, err := filepath.Abs(csConfig.Crowdsec.AcquisitionDirPath + "/" + filepath.Base(acquisFile))
if err != nil {
return errors.Wrapf(err, "while saving %s to %s", acquisFile, targetFname)
}
log.Debugf("restoring %s to %s", acquisFile, targetFname)
if err = types.CopyFile(acquisFile, targetFname); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err)
}
}
}
if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 {
for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles {
log.Infof("backup filepath from dir -> %s", acquisFile)
/*if it was the default one, it was already backup'ed*/
if csConfig.Crowdsec.AcquisitionFilePath == acquisFile {
log.Infof("skip this one")
continue
}
targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile)))
if err != nil {
return errors.Wrapf(err, "while saving %s to %s", acquisFile, acquisBackupDir)
}
if err = types.CopyFile(acquisFile, targetFname); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err)
}
log.Infof("Saved acquis %s to %s", acquisFile, targetFname)
}
}
if err = RestoreHub(dirPath); err != nil {
return fmt.Errorf("failed to restore hub config : %s", err)
}
return nil
}
func NewConfigCmd() *cobra.Command {
var cmdConfig = &cobra.Command{
cmdConfig := &cobra.Command{
Use: "config [command]",
Short: "Allows to view current config",
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
}
var key string
type Env struct {
Config *csconfig.Config
}
var cmdConfigShow = &cobra.Command{
Use: "show",
Short: "Displays current config",
Long: `Displays the current cli configuration.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
if key != "" {
program, err := expr.Compile(key, expr.Env(Env{}))
if err != nil {
log.Fatal(err)
}
output, err := expr.Run(program, Env{Config: csConfig})
if err != nil {
log.Fatal(err)
}
switch csConfig.Cscli.Output {
case "human", "raw":
switch output.(type) {
case string:
fmt.Printf("%s\n", output)
case int:
fmt.Printf("%d\n", output)
default:
fmt.Printf("%v\n", output)
}
case "json":
data, err := json.MarshalIndent(output, "", " ")
if err != nil {
log.Fatalf("failed to marshal configuration: %s", err)
}
fmt.Printf("%s\n", string(data))
}
return
}
switch csConfig.Cscli.Output {
case "human":
fmt.Printf("Global:\n")
if csConfig.ConfigPaths != nil {
fmt.Printf(" - Configuration Folder : %s\n", csConfig.ConfigPaths.ConfigDir)
fmt.Printf(" - Data Folder : %s\n", csConfig.ConfigPaths.DataDir)
fmt.Printf(" - Hub Folder : %s\n", csConfig.ConfigPaths.HubDir)
fmt.Printf(" - Simulation File : %s\n", csConfig.ConfigPaths.SimulationFilePath)
}
if csConfig.Common != nil {
fmt.Printf(" - Log Folder : %s\n", csConfig.Common.LogDir)
fmt.Printf(" - Log level : %s\n", csConfig.Common.LogLevel)
fmt.Printf(" - Log Media : %s\n", csConfig.Common.LogMedia)
}
if csConfig.Crowdsec != nil {
fmt.Printf("Crowdsec:\n")
fmt.Printf(" - Acquisition File : %s\n", csConfig.Crowdsec.AcquisitionFilePath)
fmt.Printf(" - Parsers routines : %d\n", csConfig.Crowdsec.ParserRoutinesCount)
if csConfig.Crowdsec.AcquisitionDirPath != "" {
fmt.Printf(" - Acquisition Folder : %s\n", csConfig.Crowdsec.AcquisitionDirPath)
}
}
if csConfig.Cscli != nil {
fmt.Printf("cscli:\n")
fmt.Printf(" - Output : %s\n", csConfig.Cscli.Output)
fmt.Printf(" - Hub Branch : %s\n", csConfig.Cscli.HubBranch)
fmt.Printf(" - Hub Folder : %s\n", csConfig.Cscli.HubDir)
}
if csConfig.API != nil {
if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil {
fmt.Printf("API Client:\n")
fmt.Printf(" - URL : %s\n", csConfig.API.Client.Credentials.URL)
fmt.Printf(" - Login : %s\n", csConfig.API.Client.Credentials.Login)
fmt.Printf(" - Credentials File : %s\n", csConfig.API.Client.CredentialsFilePath)
}
if csConfig.API.Server != nil {
fmt.Printf("Local API Server:\n")
fmt.Printf(" - Listen URL : %s\n", csConfig.API.Server.ListenURI)
fmt.Printf(" - Profile File : %s\n", csConfig.API.Server.ProfilesPath)
if csConfig.API.Server.TLS != nil {
if csConfig.API.Server.TLS.CertFilePath != "" {
fmt.Printf(" - Cert File : %s\n", csConfig.API.Server.TLS.CertFilePath)
}
if csConfig.API.Server.TLS.KeyFilePath != "" {
fmt.Printf(" - Key File : %s\n", csConfig.API.Server.TLS.KeyFilePath)
}
if csConfig.API.Server.TLS.CACertPath != "" {
fmt.Printf(" - CA Cert : %s\n", csConfig.API.Server.TLS.CACertPath)
}
if csConfig.API.Server.TLS.CRLPath != "" {
fmt.Printf(" - CRL : %s\n", csConfig.API.Server.TLS.CRLPath)
}
if csConfig.API.Server.TLS.CacheExpiration != nil {
fmt.Printf(" - Cache Expiration : %s\n", csConfig.API.Server.TLS.CacheExpiration)
}
if csConfig.API.Server.TLS.ClientVerification != "" {
fmt.Printf(" - Client Verification : %s\n", csConfig.API.Server.TLS.ClientVerification)
}
if csConfig.API.Server.TLS.AllowedAgentsOU != nil {
for _, ou := range csConfig.API.Server.TLS.AllowedAgentsOU {
fmt.Printf(" - Allowed Agents OU : %s\n", ou)
}
}
if csConfig.API.Server.TLS.AllowedBouncersOU != nil {
for _, ou := range csConfig.API.Server.TLS.AllowedBouncersOU {
fmt.Printf(" - Allowed Bouncers OU : %s\n", ou)
}
}
}
fmt.Printf(" - Trusted IPs: \n")
for _, ip := range csConfig.API.Server.TrustedIPs {
fmt.Printf(" - %s\n", ip)
}
if csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.Credentials != nil {
fmt.Printf("Central API:\n")
fmt.Printf(" - URL : %s\n", csConfig.API.Server.OnlineClient.Credentials.URL)
fmt.Printf(" - Login : %s\n", csConfig.API.Server.OnlineClient.Credentials.Login)
fmt.Printf(" - Credentials File : %s\n", csConfig.API.Server.OnlineClient.CredentialsFilePath)
}
}
}
if csConfig.DbConfig != nil {
fmt.Printf(" - Database:\n")
fmt.Printf(" - Type : %s\n", csConfig.DbConfig.Type)
switch csConfig.DbConfig.Type {
case "sqlite":
fmt.Printf(" - Path : %s\n", csConfig.DbConfig.DbPath)
default:
fmt.Printf(" - Host : %s\n", csConfig.DbConfig.Host)
fmt.Printf(" - Port : %d\n", csConfig.DbConfig.Port)
fmt.Printf(" - User : %s\n", csConfig.DbConfig.User)
fmt.Printf(" - DB Name : %s\n", csConfig.DbConfig.DbName)
}
if csConfig.DbConfig.Flush != nil {
if *csConfig.DbConfig.Flush.MaxAge != "" {
fmt.Printf(" - Flush age : %s\n", *csConfig.DbConfig.Flush.MaxAge)
}
if *csConfig.DbConfig.Flush.MaxItems != 0 {
fmt.Printf(" - Flush size : %d\n", *csConfig.DbConfig.Flush.MaxItems)
}
}
}
case "json":
data, err := json.MarshalIndent(csConfig, "", " ")
if err != nil {
log.Fatalf("failed to marshal configuration: %s", err)
}
fmt.Printf("%s\n", string(data))
case "raw":
data, err := yaml.Marshal(csConfig)
if err != nil {
log.Fatalf("failed to marshal configuration: %s", err)
}
fmt.Printf("%s\n", string(data))
}
},
}
cmdConfigShow.Flags().StringVar(&key, "key", "", "Display only this value (Config.API.Server.ListenURI)")
cmdConfig.AddCommand(cmdConfigShow)
var cmdConfigBackup = &cobra.Command{
Use: `backup "directory"`,
Short: "Backup current config",
Long: `Backup the current crowdsec configuration including :
- Main config (config.yaml)
- Simulation config (simulation.yaml)
- Profiles config (profiles.yaml)
- List of scenarios, parsers, postoverflows and collections that are up-to-date
- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
- Backup of API credentials (local API and online API)`,
Example: `cscli config backup ./my-backup`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
var err error
if err := csConfig.LoadHub(); err != nil {
log.Fatal(err)
}
if err = cwhub.GetHubIdx(csConfig.Hub); err != nil {
log.Info("Run 'sudo cscli hub update' to get the hub index")
log.Fatalf("Failed to get Hub index : %v", err)
}
if err = backupConfigToDirectory(args[0]); err != nil {
log.Fatalf("Failed to backup configurations: %s", err)
}
},
}
cmdConfig.AddCommand(cmdConfigBackup)
var cmdConfigRestore = &cobra.Command{
Use: `restore "directory"`,
Short: `Restore config in backup "directory"`,
Long: `Restore the crowdsec configuration from specified backup "directory" including:
- Main config (config.yaml)
- Simulation config (simulation.yaml)
- Profiles config (profiles.yaml)
- List of scenarios, parsers, postoverflows and collections that are up-to-date
- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
- Backup of API credentials (local API and online API)`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
var err error
if err := csConfig.LoadHub(); err != nil {
log.Fatal(err)
}
if err = cwhub.GetHubIdx(csConfig.Hub); err != nil {
log.Info("Run 'sudo cscli hub update' to get the hub index")
log.Fatalf("Failed to get Hub index : %v", err)
}
if err := restoreConfigFromDirectory(args[0]); err != nil {
log.Fatalf("failed restoring configurations from %s : %s", args[0], err)
}
},
}
cmdConfigRestore.PersistentFlags().BoolVar(&restoreOldBackup, "old-backup", false, "To use when you are upgrading crowdsec v0.X to v1.X and you need to restore backup from v0.X")
cmdConfig.AddCommand(cmdConfigRestore)
cmdConfig.AddCommand(NewConfigShowCmd())
cmdConfig.AddCommand(NewConfigBackupCmd())
cmdConfig.AddCommand(NewConfigRestoreCmd())
return cmdConfig
}

View file

@ -0,0 +1,170 @@
package main
import (
"fmt"
"os"
"path/filepath"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
)
/* Backup crowdsec configurations to directory <dirPath> :
- Main config (config.yaml)
- Profiles config (profiles.yaml)
- Simulation config (simulation.yaml)
- Backup of API credentials (local API and online API)
- List of scenarios, parsers, postoverflows and collections that are up-to-date
- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
*/
func backupConfigToDirectory(dirPath string) error {
var err error
if dirPath == "" {
return fmt.Errorf("directory path can't be empty")
}
log.Infof("Starting configuration backup")
/*if parent directory doesn't exist, bail out. create final dir with Mkdir*/
parentDir := filepath.Dir(dirPath)
if _, err := os.Stat(parentDir); err != nil {
return errors.Wrapf(err, "while checking parent directory %s existence", parentDir)
}
if err = os.Mkdir(dirPath, 0o700); err != nil {
return errors.Wrapf(err, "while creating %s", dirPath)
}
if csConfig.ConfigPaths.SimulationFilePath != "" {
backupSimulation := filepath.Join(dirPath, "simulation.yaml")
if err = types.CopyFile(csConfig.ConfigPaths.SimulationFilePath, backupSimulation); err != nil {
return errors.Wrapf(err, "failed copy %s to %s", csConfig.ConfigPaths.SimulationFilePath, backupSimulation)
}
log.Infof("Saved simulation to %s", backupSimulation)
}
/*
- backup AcquisitionFilePath
- backup the other files of acquisition directory
*/
if csConfig.Crowdsec != nil && csConfig.Crowdsec.AcquisitionFilePath != "" {
backupAcquisition := filepath.Join(dirPath, "acquis.yaml")
if err = types.CopyFile(csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition, err)
}
}
acquisBackupDir := filepath.Join(dirPath, "acquis")
if err = os.Mkdir(acquisBackupDir, 0o700); err != nil {
return fmt.Errorf("error while creating %s : %s", acquisBackupDir, err)
}
if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 {
for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles {
/*if it was the default one, it was already backup'ed*/
if csConfig.Crowdsec.AcquisitionFilePath == acquisFile {
continue
}
targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile)))
if err != nil {
return errors.Wrapf(err, "while saving %s to %s", acquisFile, acquisBackupDir)
}
if err = types.CopyFile(acquisFile, targetFname); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err)
}
log.Infof("Saved acquis %s to %s", acquisFile, targetFname)
}
}
if ConfigFilePath != "" {
backupMain := fmt.Sprintf("%s/config.yaml", dirPath)
if err = types.CopyFile(ConfigFilePath, backupMain); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", ConfigFilePath, backupMain, err)
}
log.Infof("Saved default yaml to %s", backupMain)
}
if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" {
backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath)
if err = types.CopyFile(csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds, err)
}
log.Infof("Saved online API credentials to %s", backupCAPICreds)
}
if csConfig.API != nil && csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" {
backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath)
if err = types.CopyFile(csConfig.API.Client.CredentialsFilePath, backupLAPICreds); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Client.CredentialsFilePath, backupLAPICreds, err)
}
log.Infof("Saved local API credentials to %s", backupLAPICreds)
}
if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.ProfilesPath != "" {
backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath)
if err = types.CopyFile(csConfig.API.Server.ProfilesPath, backupProfiles); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Server.ProfilesPath, backupProfiles, err)
}
log.Infof("Saved profiles to %s", backupProfiles)
}
if err = BackupHub(dirPath); err != nil {
return fmt.Errorf("failed to backup hub config : %s", err)
}
return nil
}
func runConfigBackup(cmd *cobra.Command, args []string) error {
if err := csConfig.LoadHub(); err != nil {
return err
}
if err := cwhub.GetHubIdx(csConfig.Hub); err != nil {
log.Info("Run 'sudo cscli hub update' to get the hub index")
return fmt.Errorf("failed to get Hub index: %w", err)
}
if err := backupConfigToDirectory(args[0]); err != nil {
return fmt.Errorf("failed to backup config: %w", err)
}
return nil
}
func NewConfigBackupCmd() *cobra.Command {
cmdConfigBackup := &cobra.Command{
Use: `backup "directory"`,
Short: "Backup current config",
Long: `Backup the current crowdsec configuration including :
- Main config (config.yaml)
- Simulation config (simulation.yaml)
- Profiles config (profiles.yaml)
- List of scenarios, parsers, postoverflows and collections that are up-to-date
- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
- Backup of API credentials (local API and online API)`,
Example: `cscli config backup ./my-backup`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
RunE: runConfigBackup,
}
return cmdConfigBackup
}

View file

@ -0,0 +1,225 @@
package main
import (
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
)
type OldAPICfg struct {
MachineID string `json:"machine_id"`
Password string `json:"password"`
}
/* Restore crowdsec configurations to directory <dirPath> :
- Main config (config.yaml)
- Profiles config (profiles.yaml)
- Simulation config (simulation.yaml)
- Backup of API credentials (local API and online API)
- List of scenarios, parsers, postoverflows and collections that are up-to-date
- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
*/
func restoreConfigFromDirectory(dirPath string, oldBackup bool) error {
var err error
if !oldBackup {
backupMain := fmt.Sprintf("%s/config.yaml", dirPath)
if _, err = os.Stat(backupMain); err == nil {
if csConfig.ConfigPaths != nil && csConfig.ConfigPaths.ConfigDir != "" {
if err = types.CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", backupMain, csConfig.ConfigPaths.ConfigDir, err)
}
}
}
// Now we have config.yaml, we should regenerate config struct to have rights paths etc
ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)
initConfig()
backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath)
if _, err = os.Stat(backupCAPICreds); err == nil {
if err = types.CopyFile(backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath, err)
}
}
backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath)
if _, err = os.Stat(backupLAPICreds); err == nil {
if err = types.CopyFile(backupLAPICreds, csConfig.API.Client.CredentialsFilePath); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", backupLAPICreds, csConfig.API.Client.CredentialsFilePath, err)
}
}
backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath)
if _, err = os.Stat(backupProfiles); err == nil {
if err = types.CopyFile(backupProfiles, csConfig.API.Server.ProfilesPath); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", backupProfiles, csConfig.API.Server.ProfilesPath, err)
}
}
} else {
var oldAPICfg OldAPICfg
backupOldAPICfg := fmt.Sprintf("%s/api_creds.json", dirPath)
jsonFile, err := os.Open(backupOldAPICfg)
if err != nil {
log.Warningf("failed to open %s : %s", backupOldAPICfg, err)
} else {
byteValue, _ := io.ReadAll(jsonFile)
err = json.Unmarshal(byteValue, &oldAPICfg)
if err != nil {
return fmt.Errorf("failed to load json file %s : %s", backupOldAPICfg, err)
}
apiCfg := csconfig.ApiCredentialsCfg{
Login: oldAPICfg.MachineID,
Password: oldAPICfg.Password,
URL: CAPIBaseURL,
}
apiConfigDump, err := yaml.Marshal(apiCfg)
if err != nil {
return fmt.Errorf("unable to dump api credentials: %s", err)
}
apiConfigDumpFile := fmt.Sprintf("%s/online_api_credentials.yaml", csConfig.ConfigPaths.ConfigDir)
if csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" {
apiConfigDumpFile = csConfig.API.Server.OnlineClient.CredentialsFilePath
}
err = os.WriteFile(apiConfigDumpFile, apiConfigDump, 0o644)
if err != nil {
return fmt.Errorf("write api credentials in '%s' failed: %s", apiConfigDumpFile, err)
}
log.Infof("Saved API credentials to %s", apiConfigDumpFile)
}
}
backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath)
if _, err = os.Stat(backupSimulation); err == nil {
if err = types.CopyFile(backupSimulation, csConfig.ConfigPaths.SimulationFilePath); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", backupSimulation, csConfig.ConfigPaths.SimulationFilePath, err)
}
}
/*if there is a acquisition dir, restore its content*/
if csConfig.Crowdsec.AcquisitionDirPath != "" {
if err = os.Mkdir(csConfig.Crowdsec.AcquisitionDirPath, 0o700); err != nil {
return fmt.Errorf("error while creating %s : %s", csConfig.Crowdsec.AcquisitionDirPath, err)
}
}
// if there was a single one
backupAcquisition := fmt.Sprintf("%s/acquis.yaml", dirPath)
if _, err = os.Stat(backupAcquisition); err == nil {
log.Debugf("restoring backup'ed %s", backupAcquisition)
if err = types.CopyFile(backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath, err)
}
}
// if there is files in the acquis backup dir, restore them
acquisBackupDir := filepath.Join(dirPath, "acquis", "*.yaml")
if acquisFiles, err := filepath.Glob(acquisBackupDir); err == nil {
for _, acquisFile := range acquisFiles {
targetFname, err := filepath.Abs(csConfig.Crowdsec.AcquisitionDirPath + "/" + filepath.Base(acquisFile))
if err != nil {
return errors.Wrapf(err, "while saving %s to %s", acquisFile, targetFname)
}
log.Debugf("restoring %s to %s", acquisFile, targetFname)
if err = types.CopyFile(acquisFile, targetFname); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err)
}
}
}
if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 {
for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles {
log.Infof("backup filepath from dir -> %s", acquisFile)
// if it was the default one, it has already been backed up
if csConfig.Crowdsec.AcquisitionFilePath == acquisFile {
log.Infof("skip this one")
continue
}
targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile)))
if err != nil {
return errors.Wrapf(err, "while saving %s to %s", acquisFile, acquisBackupDir)
}
if err = types.CopyFile(acquisFile, targetFname); err != nil {
return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err)
}
log.Infof("Saved acquis %s to %s", acquisFile, targetFname)
}
}
if err = RestoreHub(dirPath); err != nil {
return fmt.Errorf("failed to restore hub config : %s", err)
}
return nil
}
func runConfigRestore(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
oldBackup, err := flags.GetBool("old-backup")
if err != nil {
return err
}
if err := csConfig.LoadHub(); err != nil {
return err
}
if err := cwhub.GetHubIdx(csConfig.Hub); err != nil {
log.Info("Run 'sudo cscli hub update' to get the hub index")
return fmt.Errorf("failed to get Hub index: %w", err)
}
if err := restoreConfigFromDirectory(args[0], oldBackup); err != nil {
return fmt.Errorf("failed to restore config from %s: %w", args[0], err)
}
return nil
}
func NewConfigRestoreCmd() *cobra.Command {
cmdConfigRestore := &cobra.Command{
Use: `restore "directory"`,
Short: `Restore config in backup "directory"`,
Long: `Restore the crowdsec configuration from specified backup "directory" including:
- Main config (config.yaml)
- Simulation config (simulation.yaml)
- Profiles config (profiles.yaml)
- List of scenarios, parsers, postoverflows and collections that are up-to-date
- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections
- Backup of API credentials (local API and online API)`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
RunE: runConfigRestore,
}
flags := cmdConfigRestore.Flags()
flags.BoolP("old-backup", "", false, "To use when you are upgrading crowdsec v0.X to v1.X and you need to restore backup from v0.X")
return cmdConfigRestore
}

View file

@ -0,0 +1,217 @@
package main
import (
"encoding/json"
"fmt"
"github.com/antonmedv/expr"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
)
func showConfigKey(key string) error {
type Env struct {
Config *csconfig.Config
}
program, err := expr.Compile(key, expr.Env(Env{}))
if err != nil {
return err
}
output, err := expr.Run(program, Env{Config: csConfig})
if err != nil {
return err
}
switch csConfig.Cscli.Output {
case "human", "raw":
switch output.(type) {
case string:
fmt.Printf("%s\n", output)
case int:
fmt.Printf("%d\n", output)
default:
fmt.Printf("%v\n", output)
}
case "json":
data, err := json.MarshalIndent(output, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal configuration: %w", err)
}
fmt.Printf("%s\n", string(data))
}
return nil
}
func runConfigShow(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
key, err := flags.GetString("key")
if err != nil {
return err
}
if key != "" {
return showConfigKey(key)
}
switch csConfig.Cscli.Output {
case "human":
fmt.Printf("Global:\n")
if csConfig.ConfigPaths != nil {
fmt.Printf(" - Configuration Folder : %s\n", csConfig.ConfigPaths.ConfigDir)
fmt.Printf(" - Data Folder : %s\n", csConfig.ConfigPaths.DataDir)
fmt.Printf(" - Hub Folder : %s\n", csConfig.ConfigPaths.HubDir)
fmt.Printf(" - Simulation File : %s\n", csConfig.ConfigPaths.SimulationFilePath)
}
if csConfig.Common != nil {
fmt.Printf(" - Log Folder : %s\n", csConfig.Common.LogDir)
fmt.Printf(" - Log level : %s\n", csConfig.Common.LogLevel)
fmt.Printf(" - Log Media : %s\n", csConfig.Common.LogMedia)
}
if csConfig.Crowdsec != nil {
fmt.Printf("Crowdsec:\n")
fmt.Printf(" - Acquisition File : %s\n", csConfig.Crowdsec.AcquisitionFilePath)
fmt.Printf(" - Parsers routines : %d\n", csConfig.Crowdsec.ParserRoutinesCount)
if csConfig.Crowdsec.AcquisitionDirPath != "" {
fmt.Printf(" - Acquisition Folder : %s\n", csConfig.Crowdsec.AcquisitionDirPath)
}
}
if csConfig.Cscli != nil {
fmt.Printf("cscli:\n")
fmt.Printf(" - Output : %s\n", csConfig.Cscli.Output)
fmt.Printf(" - Hub Branch : %s\n", csConfig.Cscli.HubBranch)
fmt.Printf(" - Hub Folder : %s\n", csConfig.Cscli.HubDir)
}
if csConfig.API != nil {
if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil {
fmt.Printf("API Client:\n")
fmt.Printf(" - URL : %s\n", csConfig.API.Client.Credentials.URL)
fmt.Printf(" - Login : %s\n", csConfig.API.Client.Credentials.Login)
fmt.Printf(" - Credentials File : %s\n", csConfig.API.Client.CredentialsFilePath)
}
if csConfig.API.Server != nil {
fmt.Printf("Local API Server:\n")
fmt.Printf(" - Listen URL : %s\n", csConfig.API.Server.ListenURI)
fmt.Printf(" - Profile File : %s\n", csConfig.API.Server.ProfilesPath)
if csConfig.API.Server.TLS != nil {
if csConfig.API.Server.TLS.CertFilePath != "" {
fmt.Printf(" - Cert File : %s\n", csConfig.API.Server.TLS.CertFilePath)
}
if csConfig.API.Server.TLS.KeyFilePath != "" {
fmt.Printf(" - Key File : %s\n", csConfig.API.Server.TLS.KeyFilePath)
}
if csConfig.API.Server.TLS.CACertPath != "" {
fmt.Printf(" - CA Cert : %s\n", csConfig.API.Server.TLS.CACertPath)
}
if csConfig.API.Server.TLS.CRLPath != "" {
fmt.Printf(" - CRL : %s\n", csConfig.API.Server.TLS.CRLPath)
}
if csConfig.API.Server.TLS.CacheExpiration != nil {
fmt.Printf(" - Cache Expiration : %s\n", csConfig.API.Server.TLS.CacheExpiration)
}
if csConfig.API.Server.TLS.ClientVerification != "" {
fmt.Printf(" - Client Verification : %s\n", csConfig.API.Server.TLS.ClientVerification)
}
if csConfig.API.Server.TLS.AllowedAgentsOU != nil {
for _, ou := range csConfig.API.Server.TLS.AllowedAgentsOU {
fmt.Printf(" - Allowed Agents OU : %s\n", ou)
}
}
if csConfig.API.Server.TLS.AllowedBouncersOU != nil {
for _, ou := range csConfig.API.Server.TLS.AllowedBouncersOU {
fmt.Printf(" - Allowed Bouncers OU : %s\n", ou)
}
}
}
fmt.Printf(" - Trusted IPs: \n")
for _, ip := range csConfig.API.Server.TrustedIPs {
fmt.Printf(" - %s\n", ip)
}
if csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.Credentials != nil {
fmt.Printf("Central API:\n")
fmt.Printf(" - URL : %s\n", csConfig.API.Server.OnlineClient.Credentials.URL)
fmt.Printf(" - Login : %s\n", csConfig.API.Server.OnlineClient.Credentials.Login)
fmt.Printf(" - Credentials File : %s\n", csConfig.API.Server.OnlineClient.CredentialsFilePath)
}
}
}
if csConfig.DbConfig != nil {
fmt.Printf(" - Database:\n")
fmt.Printf(" - Type : %s\n", csConfig.DbConfig.Type)
switch csConfig.DbConfig.Type {
case "sqlite":
fmt.Printf(" - Path : %s\n", csConfig.DbConfig.DbPath)
default:
fmt.Printf(" - Host : %s\n", csConfig.DbConfig.Host)
fmt.Printf(" - Port : %d\n", csConfig.DbConfig.Port)
fmt.Printf(" - User : %s\n", csConfig.DbConfig.User)
fmt.Printf(" - DB Name : %s\n", csConfig.DbConfig.DbName)
}
if csConfig.DbConfig.Flush != nil {
if *csConfig.DbConfig.Flush.MaxAge != "" {
fmt.Printf(" - Flush age : %s\n", *csConfig.DbConfig.Flush.MaxAge)
}
if *csConfig.DbConfig.Flush.MaxItems != 0 {
fmt.Printf(" - Flush size : %d\n", *csConfig.DbConfig.Flush.MaxItems)
}
}
}
case "json":
data, err := json.MarshalIndent(csConfig, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal configuration: %w", err)
}
fmt.Printf("%s\n", string(data))
case "raw":
data, err := yaml.Marshal(csConfig)
if err != nil {
return fmt.Errorf("failed to marshal configuration: %w", err)
}
fmt.Printf("%s\n", string(data))
}
return nil
}
func NewConfigShowCmd() *cobra.Command {
cmdConfigShow := &cobra.Command{
Use: "show",
Short: "Displays current config",
Long: `Displays the current cli configuration.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: runConfigShow,
}
flags := cmdConfigShow.Flags()
flags.StringP("key", "", "", "Display only this value (Config.API.Server.ListenURI)")
return cmdConfigShow
}

View file

@ -46,7 +46,7 @@ func NewConsoleCmd() *cobra.Command {
log.Fatalf("No configuration for Central API (CAPI) in '%s'", *csConfig.FilePath)
}
if csConfig.API.Server.OnlineClient.Credentials == nil {
log.Fatal("You must configure Central API (CAPI) with `cscli capi register` before enrolling your instance")
log.Fatal("You must configure Central API (CAPI) with `cscli capi register` before accessing console features.")
}
return nil
},
@ -129,9 +129,9 @@ After running this command your will need to validate the enrollment in the weba
var enableAll, disableAll bool
cmdEnable := &cobra.Command{
Use: "enable [feature-flag]",
Short: "Enable a feature flag",
Example: "enable tainted",
Use: "enable [option]",
Short: "Enable a console option",
Example: "sudo cscli console enable tainted",
Long: `
Enable given information push to the central API. Allows to empower the console`,
ValidArgs: csconfig.CONSOLE_CONFIGS,
@ -153,13 +153,13 @@ Enable given information push to the central API. Allows to empower the console`
log.Infof(ReloadMessage())
},
}
cmdEnable.Flags().BoolVarP(&enableAll, "all", "a", false, "Enable all feature flags")
cmdEnable.Flags().BoolVarP(&enableAll, "all", "a", false, "Enable all console options")
cmdConsole.AddCommand(cmdEnable)
cmdDisable := &cobra.Command{
Use: "disable [feature-flag]",
Short: "Disable a feature flag",
Example: "disable tainted",
Use: "disable [option]",
Short: "Disable a console option",
Example: "sudo cscli console disable tainted",
Long: `
Disable given information push to the central API.`,
ValidArgs: csconfig.CONSOLE_CONFIGS,
@ -183,13 +183,13 @@ Disable given information push to the central API.`,
log.Infof(ReloadMessage())
},
}
cmdDisable.Flags().BoolVarP(&disableAll, "all", "a", false, "Enable all feature flags")
cmdDisable.Flags().BoolVarP(&disableAll, "all", "a", false, "Disable all console options")
cmdConsole.AddCommand(cmdDisable)
cmdConsoleStatus := &cobra.Command{
Use: "status [feature-flag]",
Short: "Shows status of one or all feature flags",
Example: "status tainted",
Use: "status [option]",
Short: "Shows status of one or all console options",
Example: `sudo cscli console status`,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
switch csConfig.Cscli.Output {
@ -212,6 +212,7 @@ Disable given information push to the central API.`,
{"share_manual_decisions", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareManualDecisions)},
{"share_custom", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios)},
{"share_tainted", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios)},
{"share_context", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareContext)},
}
for _, row := range rows {
err = csvwriter.Write(row)
@ -223,8 +224,8 @@ Disable given information push to the central API.`,
}
},
}
cmdConsole.AddCommand(cmdConsoleStatus)
return cmdConsole
}
@ -270,6 +271,19 @@ func SetConsoleOpts(args []string, wanted bool) {
log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted)
csConfig.API.Server.ConsoleConfig.ShareManualDecisions = types.BoolPtr(wanted)
}
case csconfig.SEND_CONTEXT:
/*for each flag check if it's already set before setting it*/
if csConfig.API.Server.ConsoleConfig.ShareContext != nil {
if *csConfig.API.Server.ConsoleConfig.ShareContext == wanted {
log.Infof("%s already set to %t", csconfig.SEND_CONTEXT, wanted)
} else {
log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted)
*csConfig.API.Server.ConsoleConfig.ShareContext = wanted
}
} else {
log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted)
csConfig.API.Server.ConsoleConfig.ShareContext = types.BoolPtr(wanted)
}
default:
log.Fatalf("unknown flag %s", arg)
}

View file

@ -41,6 +41,12 @@ func cmdConsoleStatusTable(out io.Writer, csConfig csconfig.Config) {
}
t.AddRow(option, activated, "Send alerts from tainted scenarios to the console")
case csconfig.SEND_CONTEXT:
activated := string(emoji.CrossMark)
if *csConfig.API.Server.ConsoleConfig.ShareContext {
activated = string(emoji.CheckMarkButton)
}
t.AddRow(option, activated, "Send context with alerts to the console")
}
}

View file

@ -5,6 +5,7 @@ import (
"fmt"
"net/url"
"os"
"sort"
"strings"
"github.com/go-openapi/strfmt"
@ -13,15 +14,186 @@ import (
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/crowdsecurity/crowdsec/pkg/alertcontext"
"github.com/crowdsecurity/crowdsec/pkg/apiclient"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/models"
"github.com/crowdsecurity/crowdsec/pkg/parser"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
var LAPIURLPrefix string = "v1"
var lapiUser string
func runLapiStatus(cmd *cobra.Command, args []string) error {
var err error
password := strfmt.Password(csConfig.API.Client.Credentials.Password)
apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL)
login := csConfig.API.Client.Credentials.Login
if err != nil {
log.Fatalf("parsing api url ('%s'): %s", apiurl, err)
}
if err := csConfig.LoadHub(); err != nil {
log.Fatal(err)
}
if err := cwhub.GetHubIdx(csConfig.Hub); err != nil {
log.Info("Run 'sudo cscli hub update' to get the hub index")
log.Fatalf("Failed to load hub index : %s", err)
}
scenarios, err := cwhub.GetInstalledScenariosAsString()
if err != nil {
log.Fatalf("failed to get scenarios : %s", err)
}
Client, err = apiclient.NewDefaultClient(apiurl,
LAPIURLPrefix,
fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()),
nil)
if err != nil {
log.Fatalf("init default client: %s", err)
}
t := models.WatcherAuthRequest{
MachineID: &login,
Password: &password,
Scenarios: scenarios,
}
log.Infof("Loaded credentials from %s", csConfig.API.Client.CredentialsFilePath)
log.Infof("Trying to authenticate with username %s on %s", login, apiurl)
_, err = Client.Auth.AuthenticateWatcher(context.Background(), t)
if err != nil {
log.Fatalf("Failed to authenticate to Local API (LAPI) : %s", err)
} else {
log.Infof("You can successfully interact with Local API (LAPI)")
}
return nil
}
func runLapiRegister(cmd *cobra.Command, args []string) error {
var err error
flags := cmd.Flags()
apiURL, err := flags.GetString("url")
if err != nil {
return err
}
outputFile, err := flags.GetString("file")
if err != nil {
return err
}
lapiUser, err := flags.GetString("machine")
if err != nil {
return err
}
if lapiUser == "" {
lapiUser, err = generateID("")
if err != nil {
log.Fatalf("unable to generate machine id: %s", err)
}
}
password := strfmt.Password(generatePassword(passwordLength))
if apiURL == "" {
if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" {
apiURL = csConfig.API.Client.Credentials.URL
} else {
log.Fatalf("No Local API URL. Please provide it in your configuration or with the -u parameter")
}
}
/*URL needs to end with /, but user doesn't care*/
if !strings.HasSuffix(apiURL, "/") {
apiURL += "/"
}
/*URL needs to start with http://, but user doesn't care*/
if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") {
apiURL = "http://" + apiURL
}
apiurl, err := url.Parse(apiURL)
if err != nil {
log.Fatalf("parsing api url: %s", err)
}
_, err = apiclient.RegisterClient(&apiclient.Config{
MachineID: lapiUser,
Password: password,
UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()),
URL: apiurl,
VersionPrefix: LAPIURLPrefix,
}, nil)
if err != nil {
log.Fatalf("api client register: %s", err)
}
log.Printf("Successfully registered to Local API (LAPI)")
var dumpFile string
if outputFile != "" {
dumpFile = outputFile
} else if csConfig.API.Client.CredentialsFilePath != "" {
dumpFile = csConfig.API.Client.CredentialsFilePath
} else {
dumpFile = ""
}
apiCfg := csconfig.ApiCredentialsCfg{
Login: lapiUser,
Password: password.String(),
URL: apiURL,
}
apiConfigDump, err := yaml.Marshal(apiCfg)
if err != nil {
log.Fatalf("unable to marshal api credentials: %s", err)
}
if dumpFile != "" {
err = os.WriteFile(dumpFile, apiConfigDump, 0644)
if err != nil {
log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err)
}
log.Printf("Local API credentials dumped to '%s'", dumpFile)
} else {
fmt.Printf("%s\n", string(apiConfigDump))
}
log.Warning(ReloadMessage())
return nil
}
func NewLapiStatusCmd() *cobra.Command {
cmdLapiStatus := &cobra.Command{
Use: "status",
Short: "Check authentication to Local API (LAPI)",
Args: cobra.MinimumNArgs(0),
DisableAutoGenTag: true,
RunE: runLapiStatus,
}
return cmdLapiStatus
}
func NewLapiRegisterCmd() *cobra.Command {
cmdLapiRegister := &cobra.Command{
Use: "register",
Short: "Register a machine to Local API (LAPI)",
Long: `Register you machine to the Local API (LAPI).
Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`,
Args: cobra.MinimumNArgs(0),
DisableAutoGenTag: true,
RunE: runLapiRegister,
}
flags := cmdLapiRegister.Flags()
flags.StringP("url", "u", "", "URL of the API (ie. http://127.0.0.1)")
flags.StringP("file", "f", "", "output file destination")
flags.String("machine", "", "Name of the machine to register with")
return cmdLapiRegister
}
func NewLapiCmd() *cobra.Command {
var cmdLapi = &cobra.Command{
@ -37,138 +209,352 @@ func NewLapiCmd() *cobra.Command {
},
}
var cmdLapiRegister = &cobra.Command{
Use: "register",
Short: "Register a machine to Local API (LAPI)",
Long: `Register you machine to the Local API (LAPI).
Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`,
Args: cobra.MinimumNArgs(0),
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
var err error
if lapiUser == "" {
lapiUser, err = generateID("")
if err != nil {
log.Fatalf("unable to generate machine id: %s", err)
}
}
password := strfmt.Password(generatePassword(passwordLength))
if apiURL == "" {
if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" {
apiURL = csConfig.API.Client.Credentials.URL
} else {
log.Fatalf("No Local API URL. Please provide it in your configuration or with the -u parameter")
}
}
/*URL needs to end with /, but user doesn't care*/
if !strings.HasSuffix(apiURL, "/") {
apiURL += "/"
}
/*URL needs to start with http://, but user doesn't care*/
if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") {
apiURL = "http://" + apiURL
}
apiurl, err := url.Parse(apiURL)
if err != nil {
log.Fatalf("parsing api url: %s", err)
}
_, err = apiclient.RegisterClient(&apiclient.Config{
MachineID: lapiUser,
Password: password,
UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()),
URL: apiurl,
VersionPrefix: LAPIURLPrefix,
}, nil)
cmdLapi.AddCommand(NewLapiRegisterCmd())
cmdLapi.AddCommand(NewLapiStatusCmd())
cmdLapi.AddCommand(NewLapiContextCmd())
if err != nil {
log.Fatalf("api client register: %s", err)
}
log.Printf("Successfully registered to Local API (LAPI)")
var dumpFile string
if outputFile != "" {
dumpFile = outputFile
} else if csConfig.API.Client.CredentialsFilePath != "" {
dumpFile = csConfig.API.Client.CredentialsFilePath
} else {
dumpFile = ""
}
apiCfg := csconfig.ApiCredentialsCfg{
Login: lapiUser,
Password: password.String(),
URL: apiURL,
}
apiConfigDump, err := yaml.Marshal(apiCfg)
if err != nil {
log.Fatalf("unable to marshal api credentials: %s", err)
}
if dumpFile != "" {
err = os.WriteFile(dumpFile, apiConfigDump, 0644)
if err != nil {
log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err)
}
log.Printf("Local API credentials dumped to '%s'", dumpFile)
} else {
fmt.Printf("%s\n", string(apiConfigDump))
}
log.Warning(ReloadMessage())
},
}
cmdLapiRegister.Flags().StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)")
cmdLapiRegister.Flags().StringVarP(&outputFile, "file", "f", "", "output file destination")
cmdLapiRegister.Flags().StringVar(&lapiUser, "machine", "", "Name of the machine to register with")
cmdLapi.AddCommand(cmdLapiRegister)
var cmdLapiStatus = &cobra.Command{
Use: "status",
Short: "Check authentication to Local API (LAPI)",
Args: cobra.MinimumNArgs(0),
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
var err error
password := strfmt.Password(csConfig.API.Client.Credentials.Password)
apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL)
login := csConfig.API.Client.Credentials.Login
if err != nil {
log.Fatalf("parsing api url ('%s'): %s", apiurl, err)
}
if err := csConfig.LoadHub(); err != nil {
log.Fatal(err)
}
if err := cwhub.GetHubIdx(csConfig.Hub); err != nil {
log.Info("Run 'sudo cscli hub update' to get the hub index")
log.Fatalf("Failed to load hub index : %s", err)
}
scenarios, err := cwhub.GetInstalledScenariosAsString()
if err != nil {
log.Fatalf("failed to get scenarios : %s", err)
}
Client, err = apiclient.NewDefaultClient(apiurl,
LAPIURLPrefix,
fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()),
nil)
if err != nil {
log.Fatalf("init default client: %s", err)
}
t := models.WatcherAuthRequest{
MachineID: &login,
Password: &password,
Scenarios: scenarios,
}
log.Infof("Loaded credentials from %s", csConfig.API.Client.CredentialsFilePath)
log.Infof("Trying to authenticate with username %s on %s", login, apiurl)
_, err = Client.Auth.AuthenticateWatcher(context.Background(), t)
if err != nil {
log.Fatalf("Failed to authenticate to Local API (LAPI) : %s", err)
} else {
log.Infof("You can successfully interact with Local API (LAPI)")
}
},
}
cmdLapi.AddCommand(cmdLapiStatus)
return cmdLapi
}
func NewLapiContextCmd() *cobra.Command {
cmdContext := &cobra.Command{
Use: "context [command]",
Short: "Manage context to send with alerts",
DisableAutoGenTag: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := csConfig.LoadCrowdsec(); err != nil {
fileNotFoundMessage := fmt.Sprintf("failed to open context file: open %s: no such file or directory", csConfig.Crowdsec.ConsoleContextPath)
if err.Error() != fileNotFoundMessage {
log.Fatalf("Unable to load CrowdSec Agent: %s", err)
}
}
if csConfig.DisableAgent {
log.Fatalf("Agent is disabled and lapi context can only be used on the agent")
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
printHelp(cmd)
},
}
var keyToAdd string
var valuesToAdd []string
cmdContextAdd := &cobra.Command{
Use: "add",
Short: "Add context to send with alerts. You must specify the output key with the expr value you want",
Example: `cscli lapi context add --key source_ip --value evt.Meta.source_ip
cscli lapi context add --key file_source --value evt.Line.Src
`,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
if err := alertcontext.ValidateContextExpr(keyToAdd, valuesToAdd); err != nil {
log.Fatalf("invalid context configuration :%s", err)
}
if _, ok := csConfig.Crowdsec.ContextToSend[keyToAdd]; !ok {
csConfig.Crowdsec.ContextToSend[keyToAdd] = make([]string, 0)
log.Infof("key '%s' added", keyToAdd)
}
data := csConfig.Crowdsec.ContextToSend[keyToAdd]
for _, val := range valuesToAdd {
if !inSlice(val, data) {
log.Infof("value '%s' added to key '%s'", val, keyToAdd)
data = append(data, val)
}
csConfig.Crowdsec.ContextToSend[keyToAdd] = data
}
if err := csConfig.Crowdsec.DumpContextConfigFile(); err != nil {
log.Fatalf(err.Error())
}
},
}
cmdContextAdd.Flags().StringVarP(&keyToAdd, "key", "k", "", "The key of the different values to send")
cmdContextAdd.Flags().StringSliceVar(&valuesToAdd, "value", []string{}, "The expr fields to associate with the key")
cmdContextAdd.MarkFlagRequired("key")
cmdContextAdd.MarkFlagRequired("value")
cmdContext.AddCommand(cmdContextAdd)
cmdContextStatus := &cobra.Command{
Use: "status",
Short: "List context to send with alerts",
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
if len(csConfig.Crowdsec.ContextToSend) == 0 {
fmt.Println("No context found on this agent. You can use 'cscli lapi context add' to add context to your alerts.")
return
}
dump, err := yaml.Marshal(csConfig.Crowdsec.ContextToSend)
if err != nil {
log.Fatalf("unable to show context status: %s", err)
}
fmt.Println(string(dump))
},
}
cmdContext.AddCommand(cmdContextStatus)
var detectAll bool
cmdContextDetect := &cobra.Command{
Use: "detect",
Short: "Detect available fields from the installed parsers",
Example: `cscli lapi context detect --all
cscli lapi context detect crowdsecurity/sshd-logs
`,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
var err error
if !detectAll && len(args) == 0 {
log.Infof("Please provide parsers to detect or --all flag.")
printHelp(cmd)
}
// to avoid all the log.Info from the loaders functions
log.SetLevel(log.ErrorLevel)
err = exprhelpers.Init(nil)
if err != nil {
log.Fatalf("Failed to init expr helpers : %s", err)
}
// Populate cwhub package tools
if err := cwhub.GetHubIdx(csConfig.Hub); err != nil {
log.Fatalf("Failed to load hub index : %s", err)
}
csParsers := parser.NewParsers()
if csParsers, err = parser.LoadParsers(csConfig, csParsers); err != nil {
log.Fatalf("unable to load parsers: %s", err)
}
fieldByParsers := make(map[string][]string)
for _, node := range csParsers.Nodes {
if !detectAll && !inSlice(node.Name, args) {
continue
}
if !detectAll {
args = removeFromSlice(node.Name, args)
}
fieldByParsers[node.Name] = make([]string, 0)
fieldByParsers[node.Name] = detectNode(node, *csParsers.Ctx)
subNodeFields := detectSubNode(node, *csParsers.Ctx)
for _, field := range subNodeFields {
if !inSlice(field, fieldByParsers[node.Name]) {
fieldByParsers[node.Name] = append(fieldByParsers[node.Name], field)
}
}
}
fmt.Printf("Acquisition :\n\n")
fmt.Printf(" - evt.Line.Module\n")
fmt.Printf(" - evt.Line.Raw\n")
fmt.Printf(" - evt.Line.Src\n")
fmt.Println()
parsersKey := make([]string, 0)
for k := range fieldByParsers {
parsersKey = append(parsersKey, k)
}
sort.Strings(parsersKey)
for _, k := range parsersKey {
if len(fieldByParsers[k]) == 0 {
continue
}
fmt.Printf("%s :\n\n", k)
values := fieldByParsers[k]
sort.Strings(values)
for _, value := range values {
fmt.Printf(" - %s\n", value)
}
fmt.Println()
}
if len(args) > 0 {
for _, parserNotFound := range args {
log.Errorf("parser '%s' not found, can't detect fields", parserNotFound)
}
}
},
}
cmdContextDetect.Flags().BoolVarP(&detectAll, "all", "a", false, "Detect evt field for all installed parser")
cmdContext.AddCommand(cmdContextDetect)
var keysToDelete []string
var valuesToDelete []string
cmdContextDelete := &cobra.Command{
Use: "delete",
Short: "Delete context to send with alerts",
Example: `cscli lapi context delete --key source_ip
cscli lapi context delete --value evt.Line.Src
`,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
if len(keysToDelete) == 0 && len(valuesToDelete) == 0 {
log.Fatalf("please provide at least a key or a value to delete")
}
for _, key := range keysToDelete {
if _, ok := csConfig.Crowdsec.ContextToSend[key]; ok {
delete(csConfig.Crowdsec.ContextToSend, key)
log.Infof("key '%s' has been removed", key)
} else {
log.Warningf("key '%s' doesn't exist", key)
}
}
for _, value := range valuesToDelete {
valueFound := false
for key, context := range csConfig.Crowdsec.ContextToSend {
if inSlice(value, context) {
valueFound = true
csConfig.Crowdsec.ContextToSend[key] = removeFromSlice(value, context)
log.Infof("value '%s' has been removed from key '%s'", value, key)
}
if len(csConfig.Crowdsec.ContextToSend[key]) == 0 {
delete(csConfig.Crowdsec.ContextToSend, key)
}
}
if !valueFound {
log.Warningf("value '%s' not found", value)
}
}
if err := csConfig.Crowdsec.DumpContextConfigFile(); err != nil {
log.Fatalf(err.Error())
}
},
}
cmdContextDelete.Flags().StringSliceVarP(&keysToDelete, "key", "k", []string{}, "The keys to delete")
cmdContextDelete.Flags().StringSliceVar(&valuesToDelete, "value", []string{}, "The expr fields to delete")
cmdContext.AddCommand(cmdContextDelete)
return cmdContext
}
func detectStaticField(GrokStatics []types.ExtraField) []string {
ret := make([]string, 0)
for _, static := range GrokStatics {
if static.Parsed != "" {
fieldName := fmt.Sprintf("evt.Parsed.%s", static.Parsed)
if !inSlice(fieldName, ret) {
ret = append(ret, fieldName)
}
}
if static.Meta != "" {
fieldName := fmt.Sprintf("evt.Meta.%s", static.Meta)
if !inSlice(fieldName, ret) {
ret = append(ret, fieldName)
}
}
if static.TargetByName != "" {
fieldName := static.TargetByName
if !strings.HasPrefix(fieldName, "evt.") {
fieldName = "evt." + fieldName
}
if !inSlice(fieldName, ret) {
ret = append(ret, fieldName)
}
}
}
return ret
}
func detectNode(node parser.Node, parserCTX parser.UnixParserCtx) []string {
var ret = make([]string, 0)
if node.Grok.RunTimeRegexp != nil {
for _, capturedField := range node.Grok.RunTimeRegexp.Names() {
fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField)
if !inSlice(fieldName, ret) {
ret = append(ret, fieldName)
}
}
}
if node.Grok.RegexpName != "" {
grokCompiled, err := parserCTX.Grok.Get(node.Grok.RegexpName)
if err != nil {
log.Warningf("Can't get subgrok: %s", err)
}
for _, capturedField := range grokCompiled.Names() {
fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField)
if !inSlice(fieldName, ret) {
ret = append(ret, fieldName)
}
}
}
if len(node.Grok.Statics) > 0 {
staticsField := detectStaticField(node.Grok.Statics)
for _, staticField := range staticsField {
if !inSlice(staticField, ret) {
ret = append(ret, staticField)
}
}
}
if len(node.Statics) > 0 {
staticsField := detectStaticField(node.Statics)
for _, staticField := range staticsField {
if !inSlice(staticField, ret) {
ret = append(ret, staticField)
}
}
}
return ret
}
func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string {
var ret = make([]string, 0)
for _, subnode := range node.LeavesNodes {
if subnode.Grok.RunTimeRegexp != nil {
for _, capturedField := range subnode.Grok.RunTimeRegexp.Names() {
fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField)
if !inSlice(fieldName, ret) {
ret = append(ret, fieldName)
}
}
}
if subnode.Grok.RegexpName != "" {
grokCompiled, err := parserCTX.Grok.Get(subnode.Grok.RegexpName)
if err != nil {
log.Warningf("Can't get subgrok: %s", err)
}
for _, capturedField := range grokCompiled.Names() {
fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField)
if !inSlice(fieldName, ret) {
ret = append(ret, fieldName)
}
}
}
if len(subnode.Grok.Statics) > 0 {
staticsField := detectStaticField(subnode.Grok.Statics)
for _, staticField := range staticsField {
if !inSlice(staticField, ret) {
ret = append(ret, staticField)
}
}
}
if len(subnode.Statics) > 0 {
staticsField := detectStaticField(subnode.Statics)
for _, staticField := range staticsField {
if !inSlice(staticField, ret) {
ret = append(ret, staticField)
}
}
}
}
return ret
}

View file

@ -29,22 +29,15 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/types"
)
var machineID string
var machinePassword string
var interactive bool
var apiURL string
var outputFile string
var forceAdd bool
var autoAdd bool
var (
passwordLength = 64
upper = "ABCDEFGHIJKLMNOPQRSTUVWXY"
lower = "abcdefghijklmnopqrstuvwxyz"
digits = "0123456789"
)
func generatePassword(length int) string {
upper := "ABCDEFGHIJKLMNOPQRSTUVWXY"
lower := "abcdefghijklmnopqrstuvwxyz"
digits := "0123456789"
charset := upper + lower + digits
charsetLength := len(charset)
@ -149,32 +142,8 @@ func getAgents(out io.Writer, dbClient *database.Client) error {
return nil
}
func NewMachinesCmd() *cobra.Command {
/* ---- DECISIONS COMMAND */
var cmdMachines = &cobra.Command{
Use: "machines [action]",
Short: "Manage local API machines [requires local API]",
Long: `To list/add/delete/validate machines.
Note: This command requires database direct access, so is intended to be run on the local API machine.
`,
Example: `cscli machines [action]`,
DisableAutoGenTag: true,
Aliases: []string{"machine"},
PersistentPreRun: func(cmd *cobra.Command, args []string) {
if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
if err != nil {
log.Errorf("local api : %s", err)
}
log.Fatal("Local API is disabled, please run this command on the local API machine")
}
if err := csConfig.LoadDBConfig(); err != nil {
log.Errorf("This command requires direct database access (must be run on the local API machine)")
log.Fatal(err)
}
},
}
var cmdMachinesList = &cobra.Command{
func NewMachinesListCmd() *cobra.Command {
cmdMachinesList := &cobra.Command{
Use: "list",
Short: "List machines",
Long: `List `,
@ -195,9 +164,12 @@ Note: This command requires database direct access, so is intended to be run on
}
},
}
cmdMachines.AddCommand(cmdMachinesList)
var cmdMachinesAdd = &cobra.Command{
return cmdMachinesList
}
func NewMachinesAddCmd() *cobra.Command {
cmdMachinesAdd := &cobra.Command{
Use: "add",
Short: "add machine to the database.",
DisableAutoGenTag: true,
@ -214,93 +186,135 @@ cscli machines add MyTestMachine --password MyPassword
log.Fatalf("unable to create new database client: %s", err)
}
},
Run: func(cmd *cobra.Command, args []string) {
var dumpFile string
var err error
// create machineID if not specified by user
if len(args) == 0 {
if !autoAdd {
printHelp(cmd)
return
}
machineID, err = generateID("")
if err != nil {
log.Fatalf("unable to generate machine id : %s", err)
}
} else {
machineID = args[0]
}
/*check if file already exists*/
if outputFile != "" {
dumpFile = outputFile
} else if csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" {
dumpFile = csConfig.API.Client.CredentialsFilePath
}
// create a password if it's not specified by user
if machinePassword == "" && !interactive {
if !autoAdd {
printHelp(cmd)
return
}
machinePassword = generatePassword(passwordLength)
} else if machinePassword == "" && interactive {
qs := &survey.Password{
Message: "Please provide a password for the machine",
}
survey.AskOne(qs, &machinePassword)
}
password := strfmt.Password(machinePassword)
_, err = dbClient.CreateMachine(&machineID, &password, "", true, forceAdd, types.PasswordAuthType)
if err != nil {
log.Fatalf("unable to create machine: %s", err)
}
log.Infof("Machine '%s' successfully added to the local API", machineID)
if apiURL == "" {
if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" {
apiURL = csConfig.API.Client.Credentials.URL
} else if csConfig.API.Server != nil && csConfig.API.Server.ListenURI != "" {
apiURL = "http://" + csConfig.API.Server.ListenURI
} else {
log.Fatalf("unable to dump an api URL. Please provide it in your configuration or with the -u parameter")
}
}
apiCfg := csconfig.ApiCredentialsCfg{
Login: machineID,
Password: password.String(),
URL: apiURL,
}
apiConfigDump, err := yaml.Marshal(apiCfg)
if err != nil {
log.Fatalf("unable to marshal api credentials: %s", err)
}
if dumpFile != "" && dumpFile != "-" {
err = os.WriteFile(dumpFile, apiConfigDump, 0644)
if err != nil {
log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err)
}
log.Printf("API credentials dumped to '%s'", dumpFile)
} else {
fmt.Printf("%s\n", string(apiConfigDump))
}
},
RunE: runMachinesAdd,
}
cmdMachinesAdd.Flags().StringVarP(&machinePassword, "password", "p", "", "machine password to login to the API")
cmdMachinesAdd.Flags().StringVarP(&outputFile, "file", "f", "",
"output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml"))
cmdMachinesAdd.Flags().StringVarP(&apiURL, "url", "u", "", "URL of the local API")
cmdMachinesAdd.Flags().BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password")
cmdMachinesAdd.Flags().BoolVarP(&autoAdd, "auto", "a", false, "automatically generate password (and username if not provided)")
cmdMachinesAdd.Flags().BoolVar(&forceAdd, "force", false, "will force add the machine if it already exist")
cmdMachines.AddCommand(cmdMachinesAdd)
var cmdMachinesDelete = &cobra.Command{
Use: "delete --machine MyTestMachine",
flags := cmdMachinesAdd.Flags()
flags.StringP("password", "p", "", "machine password to login to the API")
flags.StringP("file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml"))
flags.StringP("url", "u", "", "URL of the local API")
flags.BoolP("interactive", "i", false, "interfactive mode to enter the password")
flags.BoolP("auto", "a", false, "automatically generate password (and username if not provided)")
flags.Bool("force", false, "will force add the machine if it already exist")
return cmdMachinesAdd
}
func runMachinesAdd(cmd *cobra.Command, args []string) error {
var dumpFile string
var err error
flags := cmd.Flags()
machinePassword, err := flags.GetString("password")
if err != nil {
return err
}
outputFile, err := flags.GetString("file")
if err != nil {
return err
}
apiURL, err := flags.GetString("url")
if err != nil {
return err
}
interactive, err := flags.GetBool("interactive")
if err != nil {
return err
}
autoAdd, err := flags.GetBool("auto")
if err != nil {
return err
}
forceAdd, err := flags.GetBool("force")
if err != nil {
return err
}
var machineID string
// create machineID if not specified by user
if len(args) == 0 {
if !autoAdd {
printHelp(cmd)
return nil
}
machineID, err = generateID("")
if err != nil {
log.Fatalf("unable to generate machine id : %s", err)
}
} else {
machineID = args[0]
}
/*check if file already exists*/
if outputFile != "" {
dumpFile = outputFile
} else if csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" {
dumpFile = csConfig.API.Client.CredentialsFilePath
}
// create a password if it's not specified by user
if machinePassword == "" && !interactive {
if !autoAdd {
printHelp(cmd)
return nil
}
machinePassword = generatePassword(passwordLength)
} else if machinePassword == "" && interactive {
qs := &survey.Password{
Message: "Please provide a password for the machine",
}
survey.AskOne(qs, &machinePassword)
}
password := strfmt.Password(machinePassword)
_, err = dbClient.CreateMachine(&machineID, &password, "", true, forceAdd, types.PasswordAuthType)
if err != nil {
log.Fatalf("unable to create machine: %s", err)
}
log.Infof("Machine '%s' successfully added to the local API", machineID)
if apiURL == "" {
if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" {
apiURL = csConfig.API.Client.Credentials.URL
} else if csConfig.API.Server != nil && csConfig.API.Server.ListenURI != "" {
apiURL = "http://" + csConfig.API.Server.ListenURI
} else {
log.Fatalf("unable to dump an api URL. Please provide it in your configuration or with the -u parameter")
}
}
apiCfg := csconfig.ApiCredentialsCfg{
Login: machineID,
Password: password.String(),
URL: apiURL,
}
apiConfigDump, err := yaml.Marshal(apiCfg)
if err != nil {
log.Fatalf("unable to marshal api credentials: %s", err)
}
if dumpFile != "" && dumpFile != "-" {
err = os.WriteFile(dumpFile, apiConfigDump, 0644)
if err != nil {
log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err)
}
log.Printf("API credentials dumped to '%s'", dumpFile)
} else {
fmt.Printf("%s\n", string(apiConfigDump))
}
return nil
}
func NewMachinesDeleteCmd() *cobra.Command {
cmdMachinesDelete := &cobra.Command{
Use: "delete [machine_name]...",
Short: "delete machines",
Example: `cscli machines delete "machine_name"`,
Example: `cscli machines delete "machine1" "machine2"`,
Args: cobra.MinimumNArgs(1),
Aliases: []string{"remove"},
DisableAutoGenTag: true,
@ -330,22 +344,27 @@ cscli machines add MyTestMachine --password MyPassword
}
return ret, cobra.ShellCompDirectiveNoFileComp
},
Run: func(cmd *cobra.Command, args []string) {
machineID = args[0]
for _, machineID := range args {
err := dbClient.DeleteWatcher(machineID)
if err != nil {
log.Errorf("unable to delete machine '%s': %s", machineID, err)
return
}
log.Infof("machine '%s' deleted successfully", machineID)
}
},
RunE: runMachinesDelete,
}
cmdMachinesDelete.Flags().StringVarP(&machineID, "machine", "m", "", "machine to delete")
cmdMachines.AddCommand(cmdMachinesDelete)
var cmdMachinesValidate = &cobra.Command{
return cmdMachinesDelete
}
func runMachinesDelete(cmd *cobra.Command, args []string) error {
for _, machineID := range args {
err := dbClient.DeleteWatcher(machineID)
if err != nil {
log.Errorf("unable to delete machine '%s': %s", machineID, err)
return nil
}
log.Infof("machine '%s' deleted successfully", machineID)
}
return nil
}
func NewMachinesValidateCmd() *cobra.Command {
cmdMachinesValidate := &cobra.Command{
Use: "validate",
Short: "validate a machine to access the local API",
Long: `validate a machine to access the local API.`,
@ -360,14 +379,41 @@ cscli machines add MyTestMachine --password MyPassword
}
},
Run: func(cmd *cobra.Command, args []string) {
machineID = args[0]
machineID := args[0]
if err := dbClient.ValidateMachine(machineID); err != nil {
log.Fatalf("unable to validate machine '%s': %s", machineID, err)
}
log.Infof("machine '%s' validated successfully", machineID)
},
}
cmdMachines.AddCommand(cmdMachinesValidate)
return cmdMachinesValidate
}
func NewMachinesCmd() *cobra.Command {
var cmdMachines = &cobra.Command{
Use: "machines [action]",
Short: "Manage local API machines [requires local API]",
Long: `To list/add/delete/validate machines.
Note: This command requires database direct access, so is intended to be run on the local API machine.
`,
Example: `cscli machines [action]`,
DisableAutoGenTag: true,
Aliases: []string{"machine"},
PersistentPreRun: func(cmd *cobra.Command, args []string) {
if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
if err != nil {
log.Errorf("local api : %s", err)
}
log.Fatal("Local API is disabled, please run this command on the local API machine")
}
},
}
cmdMachines.AddCommand(NewMachinesListCmd())
cmdMachines.AddCommand(NewMachinesAddCmd())
cmdMachines.AddCommand(NewMachinesDeleteCmd())
cmdMachines.AddCommand(NewMachinesValidateCmd())
return cmdMachines
}

View file

@ -18,6 +18,7 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
"github.com/crowdsecurity/crowdsec/pkg/database"
"github.com/crowdsecurity/crowdsec/pkg/fflag"
)
var bincoverTesting = ""
@ -35,7 +36,6 @@ var downloadOnly bool
var forceAction bool
var purge bool
var all bool
var restoreOldBackup bool
var prometheusURL string
@ -52,11 +52,9 @@ func initConfig() {
} else if err_lvl {
log.SetLevel(log.ErrorLevel)
}
logFormatter := &log.TextFormatter{TimestampFormat: "02-01-2006 15:04:05", FullTimestamp: true}
log.SetFormatter(logFormatter)
if !inSlice(os.Args[1], NoNeedConfig) {
csConfig, err = csconfig.NewConfig(ConfigFilePath, false, false)
csConfig, err = csconfig.NewConfig(ConfigFilePath, false, false, true)
if err != nil {
log.Fatal(err)
}
@ -68,6 +66,11 @@ func initConfig() {
csConfig = csconfig.NewDefaultConfig()
}
featurePath := filepath.Join(csConfig.ConfigPaths.ConfigDir, "feature.yaml")
if err = fflag.Crowdsec.SetFromYamlFile(featurePath, log.StandardLogger()); err != nil {
log.Fatalf("File %s: %s", featurePath, err)
}
if csConfig.Cscli == nil {
log.Fatalf("missing 'cscli' configuration in '%s', exiting", ConfigFilePath)
}
@ -130,6 +133,19 @@ var (
)
func main() {
// set the formatter asap and worry about level later
logFormatter := &log.TextFormatter{TimestampFormat: "02-01-2006 15:04:05", FullTimestamp: true}
log.SetFormatter(logFormatter)
if err := fflag.RegisterAllFeatures(); err != nil {
log.Fatalf("failed to register features: %s", err)
}
// some features can require configuration or command-line options,
// so we need to parse them asap. we'll load from feature.yaml later.
if err := fflag.Crowdsec.SetFromEnv(log.StandardLogger()); err != nil {
log.Fatalf("failed to set features from environment: %s", err)
}
var rootCmd = &cobra.Command{
Use: "cscli",

View file

@ -57,6 +57,10 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
lapi_bouncer_stats := map[string]map[string]map[string]int{}
decisions_stats := map[string]map[string]map[string]int{}
alerts_stats := map[string]int{}
stash_stats := map[string]struct {
Type string
Count int
}{}
for idx, fam := range result {
if !strings.HasPrefix(fam.Name, "cs_") {
@ -93,6 +97,8 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
origin := metric.Labels["origin"]
action := metric.Labels["action"]
mtype := metric.Labels["type"]
fval, err := strconv.ParseFloat(value, 32)
if err != nil {
log.Errorf("Unexpected int value %s : %s", value, err)
@ -208,6 +214,11 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
alerts_stats[scenario] = make(map[string]int)
}*/
alerts_stats[reason] += ival
case "cs_cache_size":
stash_stats[name] = struct {
Type string
Count int
}{Type: mtype, Count: ival}
default:
continue
}
@ -225,8 +236,9 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
lapiDecisionStatsTable(out, lapi_decisions_stats)
decisionStatsTable(out, decisions_stats)
alertStatsTable(out, alerts_stats)
stashStatsTable(out, stash_stats)
} else if formatType == "json" {
for _, val := range []interface{}{acquis_stats, parsers_stats, buckets_stats, lapi_stats, lapi_bouncer_stats, lapi_machine_stats, lapi_decisions_stats, decisions_stats, alerts_stats} {
for _, val := range []interface{}{acquis_stats, parsers_stats, buckets_stats, lapi_stats, lapi_bouncer_stats, lapi_machine_stats, lapi_decisions_stats, decisions_stats, alerts_stats, stash_stats} {
x, err := json.MarshalIndent(val, "", " ")
if err != nil {
return fmt.Errorf("failed to unmarshal metrics : %v", err)
@ -236,7 +248,7 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error
return nil
} else if formatType == "raw" {
for _, val := range []interface{}{acquis_stats, parsers_stats, buckets_stats, lapi_stats, lapi_bouncer_stats, lapi_machine_stats, lapi_decisions_stats, decisions_stats, alerts_stats} {
for _, val := range []interface{}{acquis_stats, parsers_stats, buckets_stats, lapi_stats, lapi_bouncer_stats, lapi_machine_stats, lapi_decisions_stats, decisions_stats, alerts_stats, stash_stats} {
x, err := yaml.Marshal(val)
if err != nil {
return fmt.Errorf("failed to unmarshal metrics : %v", err)

View file

@ -129,6 +129,41 @@ func parserStatsTable(out io.Writer, stats map[string]map[string]int) {
}
}
func stashStatsTable(out io.Writer, stats map[string]struct {
Type string
Count int
}) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Name", "Type", "Items")
t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft)
// unfortunately, we can't reuse metricsToTable as the structure is too different :/
sortedKeys := []string{}
for k := range stats {
sortedKeys = append(sortedKeys, k)
}
sort.Strings(sortedKeys)
numRows := 0
for _, alabel := range sortedKeys {
astats := stats[alabel]
row := []string{
alabel,
astats.Type,
fmt.Sprintf("%d", astats.Count),
}
t.AddRow(row...)
numRows++
}
if numRows > 0 {
renderTableTitle(out, "\nParser Stash Metrics:")
t.Render()
}
}
func lapiStatsTable(out io.Writer, stats map[string]map[string]int) {
t := newTable(out)
t.SetRowLines(false)

View file

@ -10,8 +10,150 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
)
func NewPostOverflowsInstallCmd() *cobra.Command {
var ignoreError bool
cmdPostOverflowsInstall := &cobra.Command{
Use: "install [config]",
Short: "Install given postoverflow(s)",
Long: `Fetch and install given postoverflow(s) from hub`,
Example: `cscli postoverflows install crowdsec/xxx crowdsec/xyz`,
Args: cobra.MinimumNArgs(1),
DisableAutoGenTag: true,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compAllItems(cwhub.PARSERS_OVFLW, args, toComplete)
},
Run: func(cmd *cobra.Command, args []string) {
for _, name := range args {
t := cwhub.GetItem(cwhub.PARSERS_OVFLW, name)
if t == nil {
nearestItem, score := GetDistance(cwhub.PARSERS_OVFLW, name)
Suggest(cwhub.PARSERS_OVFLW, name, nearestItem.Name, score, ignoreError)
continue
}
if err := cwhub.InstallItem(csConfig, name, cwhub.PARSERS_OVFLW, forceAction, downloadOnly); err != nil {
if ignoreError {
log.Errorf("Error while installing '%s': %s", name, err)
} else {
log.Fatalf("Error while installing '%s': %s", name, err)
}
}
}
},
}
cmdPostOverflowsInstall.PersistentFlags().BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable")
cmdPostOverflowsInstall.PersistentFlags().BoolVar(&forceAction, "force", false, "Force install : Overwrite tainted and outdated files")
cmdPostOverflowsInstall.PersistentFlags().BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple postoverflows")
return cmdPostOverflowsInstall
}
func NewPostOverflowsRemoveCmd() *cobra.Command {
cmdPostOverflowsRemove := &cobra.Command{
Use: "remove [config]",
Short: "Remove given postoverflow(s)",
Long: `remove given postoverflow(s)`,
Example: `cscli postoverflows remove crowdsec/xxx crowdsec/xyz`,
DisableAutoGenTag: true,
Aliases: []string{"delete"},
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compInstalledItems(cwhub.PARSERS_OVFLW, args, toComplete)
},
Run: func(cmd *cobra.Command, args []string) {
if all {
cwhub.RemoveMany(csConfig, cwhub.PARSERS_OVFLW, "", all, purge, forceAction)
return
}
if len(args) == 0 {
log.Fatalf("Specify at least one postoverflow to remove or '--all' flag.")
}
for _, name := range args {
cwhub.RemoveMany(csConfig, cwhub.PARSERS_OVFLW, name, all, purge, forceAction)
}
},
}
cmdPostOverflowsRemove.PersistentFlags().BoolVar(&purge, "purge", false, "Delete source file too")
cmdPostOverflowsRemove.PersistentFlags().BoolVar(&forceAction, "force", false, "Force remove : Remove tainted and outdated files")
cmdPostOverflowsRemove.PersistentFlags().BoolVar(&all, "all", false, "Delete all the postoverflows")
return cmdPostOverflowsRemove
}
func NewPostOverflowsUpgradeCmd() *cobra.Command {
cmdPostOverflowsUpgrade := &cobra.Command{
Use: "upgrade [config]",
Short: "Upgrade given postoverflow(s)",
Long: `Fetch and Upgrade given postoverflow(s) from hub`,
Example: `cscli postoverflows upgrade crowdsec/xxx crowdsec/xyz`,
DisableAutoGenTag: true,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compInstalledItems(cwhub.PARSERS_OVFLW, args, toComplete)
},
Run: func(cmd *cobra.Command, args []string) {
if all {
cwhub.UpgradeConfig(csConfig, cwhub.PARSERS_OVFLW, "", forceAction)
} else {
if len(args) == 0 {
log.Fatalf("no target postoverflow to upgrade")
}
for _, name := range args {
cwhub.UpgradeConfig(csConfig, cwhub.PARSERS_OVFLW, name, forceAction)
}
}
},
}
cmdPostOverflowsUpgrade.PersistentFlags().BoolVarP(&all, "all", "a", false, "Upgrade all the postoverflows")
cmdPostOverflowsUpgrade.PersistentFlags().BoolVar(&forceAction, "force", false, "Force upgrade : Overwrite tainted and outdated files")
return cmdPostOverflowsUpgrade
}
func NewPostOverflowsInspectCmd() *cobra.Command {
cmdPostOverflowsInspect := &cobra.Command{
Use: "inspect [config]",
Short: "Inspect given postoverflow",
Long: `Inspect given postoverflow`,
Example: `cscli postoverflows inspect crowdsec/xxx crowdsec/xyz`,
DisableAutoGenTag: true,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compInstalledItems(cwhub.PARSERS_OVFLW, args, toComplete)
},
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
InspectItem(args[0], cwhub.PARSERS_OVFLW)
},
}
return cmdPostOverflowsInspect
}
func NewPostOverflowsListCmd() *cobra.Command {
cmdPostOverflowsList := &cobra.Command{
Use: "list [config]",
Short: "List all postoverflows or given one",
Long: `List all postoverflows or given one`,
Example: `cscli postoverflows list
cscli postoverflows list crowdsecurity/xxx`,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
ListItems(color.Output, []string{cwhub.PARSERS_OVFLW}, args, false, true, all)
},
}
cmdPostOverflowsList.PersistentFlags().BoolVarP(&all, "all", "a", false, "List disabled items as well")
return cmdPostOverflowsList
}
func NewPostOverflowsCmd() *cobra.Command {
var cmdPostOverflows = &cobra.Command{
cmdPostOverflows := &cobra.Command{
Use: "postoverflows [action] [config]",
Short: "Install/Remove/Upgrade/Inspect postoverflow(s) from hub",
Example: `cscli postoverflows install crowdsecurity/cdn-whitelist
@ -48,125 +190,11 @@ func NewPostOverflowsCmd() *cobra.Command {
},
}
var ignoreError bool
var cmdPostOverflowsInstall = &cobra.Command{
Use: "install [config]",
Short: "Install given postoverflow(s)",
Long: `Fetch and install given postoverflow(s) from hub`,
Example: `cscli postoverflows install crowdsec/xxx crowdsec/xyz`,
Args: cobra.MinimumNArgs(1),
DisableAutoGenTag: true,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compAllItems(cwhub.PARSERS_OVFLW, args, toComplete)
},
Run: func(cmd *cobra.Command, args []string) {
for _, name := range args {
t := cwhub.GetItem(cwhub.PARSERS_OVFLW, name)
if t == nil {
nearestItem, score := GetDistance(cwhub.PARSERS_OVFLW, name)
Suggest(cwhub.PARSERS_OVFLW, name, nearestItem.Name, score, ignoreError)
continue
}
if err := cwhub.InstallItem(csConfig, name, cwhub.PARSERS_OVFLW, forceAction, downloadOnly); err != nil {
if ignoreError {
log.Errorf("Error while installing '%s': %s", name, err)
} else {
log.Fatalf("Error while installing '%s': %s", name, err)
}
}
}
},
}
cmdPostOverflowsInstall.PersistentFlags().BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable")
cmdPostOverflowsInstall.PersistentFlags().BoolVar(&forceAction, "force", false, "Force install : Overwrite tainted and outdated files")
cmdPostOverflowsInstall.PersistentFlags().BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple postoverflows")
cmdPostOverflows.AddCommand(cmdPostOverflowsInstall)
var cmdPostOverflowsRemove = &cobra.Command{
Use: "remove [config]",
Short: "Remove given postoverflow(s)",
Long: `remove given postoverflow(s)`,
Example: `cscli postoverflows remove crowdsec/xxx crowdsec/xyz`,
DisableAutoGenTag: true,
Aliases: []string{"delete"},
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compInstalledItems(cwhub.PARSERS_OVFLW, args, toComplete)
},
Run: func(cmd *cobra.Command, args []string) {
if all {
cwhub.RemoveMany(csConfig, cwhub.PARSERS_OVFLW, "", all, purge, forceAction)
return
}
if len(args) == 0 {
log.Fatalf("Specify at least one postoverflow to remove or '--all' flag.")
}
for _, name := range args {
cwhub.RemoveMany(csConfig, cwhub.PARSERS_OVFLW, name, all, purge, forceAction)
}
},
}
cmdPostOverflowsRemove.PersistentFlags().BoolVar(&purge, "purge", false, "Delete source file too")
cmdPostOverflowsRemove.PersistentFlags().BoolVar(&forceAction, "force", false, "Force remove : Remove tainted and outdated files")
cmdPostOverflowsRemove.PersistentFlags().BoolVar(&all, "all", false, "Delete all the postoverflows")
cmdPostOverflows.AddCommand(cmdPostOverflowsRemove)
var cmdPostOverflowsUpgrade = &cobra.Command{
Use: "upgrade [config]",
Short: "Upgrade given postoverflow(s)",
Long: `Fetch and Upgrade given postoverflow(s) from hub`,
Example: `cscli postoverflows upgrade crowdsec/xxx crowdsec/xyz`,
DisableAutoGenTag: true,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compInstalledItems(cwhub.PARSERS_OVFLW, args, toComplete)
},
Run: func(cmd *cobra.Command, args []string) {
if all {
cwhub.UpgradeConfig(csConfig, cwhub.PARSERS_OVFLW, "", forceAction)
} else {
if len(args) == 0 {
log.Fatalf("no target postoverflow to upgrade")
}
for _, name := range args {
cwhub.UpgradeConfig(csConfig, cwhub.PARSERS_OVFLW, name, forceAction)
}
}
},
}
cmdPostOverflowsUpgrade.PersistentFlags().BoolVarP(&all, "all", "a", false, "Upgrade all the postoverflows")
cmdPostOverflowsUpgrade.PersistentFlags().BoolVar(&forceAction, "force", false, "Force upgrade : Overwrite tainted and outdated files")
cmdPostOverflows.AddCommand(cmdPostOverflowsUpgrade)
var cmdPostOverflowsInspect = &cobra.Command{
Use: "inspect [config]",
Short: "Inspect given postoverflow",
Long: `Inspect given postoverflow`,
Example: `cscli postoverflows inspect crowdsec/xxx crowdsec/xyz`,
DisableAutoGenTag: true,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compInstalledItems(cwhub.PARSERS_OVFLW, args, toComplete)
},
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
InspectItem(args[0], cwhub.PARSERS_OVFLW)
},
}
cmdPostOverflows.AddCommand(cmdPostOverflowsInspect)
var cmdPostOverflowsList = &cobra.Command{
Use: "list [config]",
Short: "List all postoverflows or given one",
Long: `List all postoverflows or given one`,
Example: `cscli postoverflows list
cscli postoverflows list crowdsecurity/xxx`,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
ListItems(color.Output, []string{cwhub.PARSERS_OVFLW}, args, false, true, all)
},
}
cmdPostOverflowsList.PersistentFlags().BoolVarP(&all, "all", "a", false, "List disabled items as well")
cmdPostOverflows.AddCommand(cmdPostOverflowsList)
cmdPostOverflows.AddCommand(NewPostOverflowsInstallCmd())
cmdPostOverflows.AddCommand(NewPostOverflowsRemoveCmd())
cmdPostOverflows.AddCommand(NewPostOverflowsUpgradeCmd())
cmdPostOverflows.AddCommand(NewPostOverflowsInspectCmd())
cmdPostOverflows.AddCommand(NewPostOverflowsListCmd())
return cmdPostOverflows
}

View file

@ -22,6 +22,7 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
"github.com/crowdsecurity/crowdsec/pkg/database"
"github.com/crowdsecurity/crowdsec/pkg/fflag"
"github.com/crowdsecurity/crowdsec/pkg/models"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
@ -30,6 +31,7 @@ const (
SUPPORT_METRICS_HUMAN_PATH = "metrics/metrics.human"
SUPPORT_METRICS_PROMETHEUS_PATH = "metrics/metrics.prometheus"
SUPPORT_VERSION_PATH = "version.txt"
SUPPORT_FEATURES_PATH = "features.txt"
SUPPORT_OS_INFO_PATH = "osinfo.txt"
SUPPORT_PARSERS_PATH = "hub/parsers.txt"
SUPPORT_SCENARIOS_PATH = "hub/scenarios.txt"
@ -89,6 +91,18 @@ func collectVersion() []byte {
return []byte(cwversion.ShowStr())
}
func collectFeatures() []byte {
log.Info("Collecting feature flags")
enabledFeatures := fflag.Crowdsec.GetEnabledFeatures()
w := bytes.NewBuffer(nil)
for _, k := range enabledFeatures {
fmt.Fprintf(w, "%s\n", k)
}
return w.Bytes()
}
func collectOSInfo() ([]byte, error) {
log.Info("Collecting OS info")
info, err := osinfo.GetOSInfo()
@ -264,6 +278,7 @@ cscli support dump -f /tmp/crowdsec-support.zip
var skipHub, skipDB, skipCAPI, skipLAPI, skipAgent bool
infos := map[string][]byte{
SUPPORT_VERSION_PATH: collectVersion(),
SUPPORT_FEATURES_PATH: collectFeatures(),
}
if outFile == "" {
@ -271,7 +286,6 @@ cscli support dump -f /tmp/crowdsec-support.zip
}
dbClient, err = database.NewClient(csConfig.DbConfig)
if err != nil {
log.Warnf("Could not connect to database: %s", err)
skipDB = true
@ -291,7 +305,6 @@ cscli support dump -f /tmp/crowdsec-support.zip
}
err = initHub()
if err != nil {
log.Warn("Could not init hub, running on LAPI ? Hub related information will not be collected")
skipHub = true
@ -309,7 +322,7 @@ cscli support dump -f /tmp/crowdsec-support.zip
skipLAPI = true
}
if csConfig.API.Server == nil || csConfig.API.Server.OnlineClient.Credentials == nil {
if csConfig.API.Server == nil || csConfig.API.Server.OnlineClient == nil || csConfig.API.Server.OnlineClient.Credentials == nil {
log.Warn("no CAPI credentials found, skipping CAPI connectivity check")
skipCAPI = true
}
@ -322,7 +335,6 @@ cscli support dump -f /tmp/crowdsec-support.zip
}
infos[SUPPORT_OS_INFO_PATH], err = collectOSInfo()
if err != nil {
log.Warnf("could not collect OS information: %s", err)
infos[SUPPORT_OS_INFO_PATH] = []byte(err.Error())
@ -389,14 +401,17 @@ cscli support dump -f /tmp/crowdsec-support.zip
}
fw.Write([]byte(types.StripAnsiString(string(data))))
}
err = zipWriter.Close()
if err != nil {
log.Fatalf("could not finalize zip file: %s", err)
}
err = os.WriteFile(outFile, w.Bytes(), 0600)
if err != nil {
log.Fatalf("could not write zip file to %s: %s", outFile, err)
}
log.Infof("Written zip file to %s", outFile)
},
}

View file

@ -739,12 +739,34 @@ func getDBClient() (*database.Client, error) {
if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
return nil, err
}
if err := csConfig.LoadDBConfig(); err != nil {
return nil, err
}
ret, err := database.NewClient(csConfig.DbConfig)
if err != nil {
return nil, err
}
return ret, nil
}
func removeFromSlice(val string, slice []string) []string {
var i int
var value string
valueFound := false
// get the index
for i, value = range slice {
if value == val {
valueFound = true
break
}
}
if valueFound {
slice[i] = slice[len(slice)-1]
slice[len(slice)-1] = ""
slice = slice[:len(slice)-1]
}
return slice
}

View file

@ -28,7 +28,7 @@ func initCrowdsec(cConfig *csconfig.Config) (*parser.Parsers, error) {
}
// Start loading configs
csParsers := newParsers()
csParsers := parser.NewParsers()
if csParsers, err = parser.LoadParsers(cConfig, csParsers); err != nil {
return &parser.Parsers{}, fmt.Errorf("Failed to load parsers: %s", err)
}

View file

@ -5,8 +5,8 @@ import (
"fmt"
_ "net/http/pprof"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"time"
@ -20,6 +20,7 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/csplugin"
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
"github.com/crowdsecurity/crowdsec/pkg/fflag"
"github.com/crowdsecurity/crowdsec/pkg/leakybucket"
"github.com/crowdsecurity/crowdsec/pkg/parser"
"github.com/crowdsecurity/crowdsec/pkg/types"
@ -70,45 +71,6 @@ type Flags struct {
type labelsMap map[string]string
// Return new parsers
// nodes and povfwnodes are already initialized in parser.LoadStages
func newParsers() *parser.Parsers {
parsers := &parser.Parsers{
Ctx: &parser.UnixParserCtx{},
Povfwctx: &parser.UnixParserCtx{},
StageFiles: make([]parser.Stagefile, 0),
PovfwStageFiles: make([]parser.Stagefile, 0),
}
for _, itemType := range []string{cwhub.PARSERS, cwhub.PARSERS_OVFLW} {
for _, hubParserItem := range cwhub.GetItemMap(itemType) {
if hubParserItem.Installed {
stagefile := parser.Stagefile{
Filename: hubParserItem.LocalPath,
Stage: hubParserItem.Stage,
}
if itemType == cwhub.PARSERS {
parsers.StageFiles = append(parsers.StageFiles, stagefile)
}
if itemType == cwhub.PARSERS_OVFLW {
parsers.PovfwStageFiles = append(parsers.PovfwStageFiles, stagefile)
}
}
}
}
if parsers.StageFiles != nil {
sort.Slice(parsers.StageFiles, func(i, j int) bool {
return parsers.StageFiles[i].Filename < parsers.StageFiles[j].Filename
})
}
if parsers.PovfwStageFiles != nil {
sort.Slice(parsers.PovfwStageFiles, func(i, j int) bool {
return parsers.PovfwStageFiles[i].Filename < parsers.PovfwStageFiles[j].Filename
})
}
return parsers
}
func LoadBuckets(cConfig *csconfig.Config) error {
var (
err error
@ -223,7 +185,7 @@ func newLogLevel(curLevelPtr *log.Level, f *Flags) *log.Level {
default:
}
if ret == *curLevelPtr {
if curLevelPtr != nil && ret == *curLevelPtr {
// avoid returning a new ptr to the same value
return curLevelPtr
}
@ -232,6 +194,10 @@ func newLogLevel(curLevelPtr *log.Level, f *Flags) *log.Level {
// LoadConfig returns a configuration parsed from configuration file
func LoadConfig(cConfig *csconfig.Config) error {
if (cConfig.Common == nil || *cConfig.Common == csconfig.CommonCfg{}) {
return fmt.Errorf("unable to load configuration: common section is empty")
}
cConfig.Common.LogLevel = newLogLevel(cConfig.Common.LogLevel, flags)
if dumpFolder != "" {
@ -295,9 +261,39 @@ func LoadConfig(cConfig *csconfig.Config) error {
return err
}
err := LoadFeatureFlags(cConfig, log.StandardLogger())
if err != nil {
return err
}
return nil
}
// LoadFeatureFlags parses {ConfigDir}/feature.yaml to enable/disable features.
//
// Since CROWDSEC_FEATURE_ envvars are parsed before config.yaml,
// when the logger is not yet initialized, we also log here a recap
// of what has been enabled.
func LoadFeatureFlags(cConfig *csconfig.Config, logger *log.Logger) error {
featurePath := filepath.Join(cConfig.ConfigPaths.ConfigDir, "feature.yaml")
if err := fflag.Crowdsec.SetFromYamlFile(featurePath, logger); err != nil {
return fmt.Errorf("file %s: %s", featurePath, err)
}
enabledFeatures := fflag.Crowdsec.GetEnabledFeatures()
msg := "<none>"
if len(enabledFeatures) > 0 {
msg = strings.Join(enabledFeatures, ", ")
}
logger.Infof("Enabled features: %s", msg)
return nil
}
// exitWithCode must be called right before the program termination,
// to allow measuring functional test coverage in case of abnormal exit.
//
@ -322,6 +318,16 @@ func exitWithCode(exitCode int, err error) {
var crowdsecT0 time.Time
func main() {
if err := fflag.RegisterAllFeatures(); err != nil {
log.Fatalf("failed to register features: %s", err)
}
// some features can require configuration or command-line options,
// so wwe need to parse them asap. we'll load from feature.yaml later.
if err := fflag.Crowdsec.SetFromEnv(log.StandardLogger()); err != nil {
log.Fatalf("failed set features from environment: %s", err)
}
crowdsecT0 = time.Now()
defer types.CatchPanic("crowdsec/main")

View file

@ -5,6 +5,7 @@ import (
"time"
v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1"
"github.com/crowdsecurity/crowdsec/pkg/cache"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
"github.com/crowdsecurity/crowdsec/pkg/database"
@ -100,6 +101,10 @@ var globalPourHistogram = prometheus.NewHistogramVec(
func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
//update cache metrics (stash)
cache.UpdateCacheMetrics()
//decision metrics are only relevant for LAPI
if dbClient == nil {
next.ServeHTTP(w, r)
return
@ -160,7 +165,8 @@ func registerPrometheus(config *csconfig.PrometheusCfg) {
globalCsInfo, globalParsingHistogram, globalPourHistogram,
leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow,
v1.LapiRouteHits,
leaky.BucketsCurrentCount)
leaky.BucketsCurrentCount,
cache.CacheMetrics)
} else {
log.Infof("Loading prometheus collectors")
prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo,
@ -168,7 +174,8 @@ func registerPrometheus(config *csconfig.PrometheusCfg) {
globalCsInfo, globalParsingHistogram, globalPourHistogram,
v1.LapiRouteHits, v1.LapiMachineHits, v1.LapiBouncerHits, v1.LapiNilDecisions, v1.LapiNonNilDecisions, v1.LapiResponseTime,
leaky.BucketsPour, leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow, leaky.BucketsCurrentCount,
globalActiveDecisions, globalAlerts)
globalActiveDecisions, globalAlerts,
cache.CacheMetrics)
}
}

View file

@ -30,7 +30,7 @@ func StartRunSvc() error {
},
})
cConfig, err = csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI)
cConfig, err = csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI, false)
if err != nil {
return err
}

View file

@ -58,7 +58,7 @@ func WindowsRun() error {
err error
)
cConfig, err = csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI)
cConfig, err = csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI, false)
if err != nil {
return err
}

View file

@ -54,7 +54,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) {
crowdsecTomb = tomb.Tomb{}
pluginTomb = tomb.Tomb{}
cConfig, err := csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI)
cConfig, err := csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI, false)
if err != nil {
return nil, err
}

View file

@ -97,7 +97,7 @@ func runService(name string) error {
log.Warnf("Failed to open event log: %s", err)
}
cConfig, err := csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI)
cConfig, err := csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI, false)
if err != nil {
return err
}

View file

@ -16,6 +16,7 @@ config_paths:
notification_dir: /etc/crowdsec/notifications/
plugin_dir: /usr/local/lib/crowdsec/plugins/
crowdsec_service:
#console_context_path: /etc/crowdsec/console/context.yaml
acquisition_path: /etc/crowdsec/acquis.yaml
acquisition_dir: /etc/crowdsec/acquis.d
parser_routines: 1

View file

@ -13,6 +13,7 @@ config_paths:
plugin_dir: C:\ProgramData\CrowdSec\plugins\
notification_dir: C:\ProgramData\CrowdSec\config\notifications\
crowdsec_service:
#console_context_path: C:\ProgramData\CrowdSec\console\context.yaml
acquisition_path: C:\ProgramData\CrowdSec\config\acquis.yaml
parser_routines: 1
cscli:

View file

@ -1,3 +1,4 @@
share_manual_decisions: false
share_custom: true
share_tainted: true
share_context: false

0
config/context.yaml Normal file
View file

View file

@ -9,6 +9,8 @@ ExecStartPre=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t
ExecStart=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml
#ExecStartPost=/bin/sleep 0.1
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
RestartSec=60
[Install]
WantedBy=multi-user.target

3
debian/rules vendored
View file

@ -28,7 +28,7 @@ override_dh_auto_install:
mkdir -p debian/crowdsec/usr/share/crowdsec
mkdir -p debian/crowdsec/etc/crowdsec/hub/
mkdir -p debian/crowdsec/usr/share/crowdsec/config
mkdir -p debian/crowdsec/etc/crowdsec/console/
mkdir -p debian/crowdsec/usr/lib/crowdsec/plugins/
mkdir -p debian/crowdsec/etc/crowdsec/notifications/
@ -44,6 +44,7 @@ override_dh_auto_install:
install -m 600 config/config.yaml debian/crowdsec/etc/crowdsec/config.yaml
cp config/simulation.yaml debian/crowdsec/etc/crowdsec/simulation.yaml
cp config/profiles.yaml debian/crowdsec/etc/crowdsec/profiles.yaml
cp config/context.yaml debian/crowdsec/etc/crowdsec/console/context.yaml
cp config/console.yaml debian/crowdsec/etc/crowdsec/console.yaml
cp -a config/patterns debian/crowdsec/etc/crowdsec

View file

@ -179,7 +179,7 @@ To use environment variables, they should be in the format `BOUNCER_KEY_<name>=<
To use Docker secrets, the secret should be named `bouncer_key_<name>` with a content of `<key>`. e.g. `bouncer_key_nginx` with content `mysecretkey12345`.
A bouncer key can be any string but we recommend an alphanumeric value for consistency with the crowdsec-generated keys and to avoid problems with escaping special characters.
A bouncer key can be any string but we recommend an alphanumeric value for consistency with the keys generated by crowdsec and to avoid problems with escaping special characters.
With TLS authentication:
@ -198,22 +198,33 @@ Using binds rather than named volumes ([complete explanation here](https://docs.
# Reference
## Environment Variables
Note for persistent configurations (i.e. bind mount or volumes): when a
variable is set, its value may be written to the appropriate file (usually
config.yaml) each time the container is run.
| Variable | Default | Description |
| ----------------------- | ------------------------- | ----------- |
| `CONFIG_FILE` | `/etc/crowdsec/config.yaml` | Configuration file location |
| `DSN` | | Process a single source in time-machine: `-e DSN="file:///var/log/toto.log"` or `-e DSN="cloudwatch:///your/group/path:stream_name?profile=dev&backlog=16h"` or `-e DSN="journalctl://filters=_SYSTEMD_UNIT=ssh.service"` |
| `TYPE` | | [`Labels.type`](https://docs.crowdsec.net/Crowdsec/v1/references/acquisition/) for file in time-machine: `-e TYPE="<type>"` |
| `TEST_MODE` | false | Don't run the service, only test the configuration: `-e TEST_MODE=true` |
| `TZ` | | Set the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) to ensure the logs have a local timestamp. |
| `LOCAL_API_URL` | `http://0.0.0.0:8080` | The LAPI URL, you need to change this when `DISABLE_LOCAL_API` is true: `-e LOCAL_API_URL="http://lapi-address:8080"` |
| `DISABLE_AGENT` | false | Disable the agent, run a LAPI-only container |
| `DISABLE_LOCAL_API` | false | Disable LAPI, run an agent-only container |
| `DISABLE_ONLINE_API` | false | Disable online API registration for signal sharing |
| `CUSTOM_HOSTNAME` | localhost | Custom hostname for LAPI registration (with agent and LAPI on the same container) |
| `TEST_MODE` | false | Don't run the service, only test the configuration: `-e TEST_MODE=true` |
| `TZ` | | Set the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) to ensure the logs have a local timestamp. |
| `LOCAL_API_URL` | `http://0.0.0.0:8080` | The LAPI URL, you need to change this when `DISABLE_LOCAL_API` is true: `-e LOCAL_API_URL="http://lapi-address:8080"` |
| `PLUGIN_DIR` | `/usr/local/lib/crowdsec/plugins/` | Directory for plugins: `-e PLUGIN_DIR="<path>"` |
| `BOUNCER_KEY_<name>` | | Register a bouncer with the name `<name>` and a key equal to the value of the environment variable. |
| `METRICS_PORT` | 6060 | Port to expose Prometheus metrics |
| | | |
| __LAPI__ | | (useless with DISABLE_LOCAL_API) |
| `USE_WAL` | false | Enable Write-Ahead Logging with SQLite |
| `CUSTOM_HOSTNAME` | localhost | Name for the local agent (running in the container with LAPI) |
| | | |
| __Agent__ | | (these don't work with DISABLE_AGENT) |
| `TYPE` | | [`Labels.type`](https://docs.crowdsec.net/Crowdsec/v1/references/acquisition/) for file in time-machine: `-e TYPE="<type>"` |
| `DSN` | | Process a single source in time-machine: `-e DSN="file:///var/log/toto.log"` or `-e DSN="cloudwatch:///your/group/path:stream_name?profile=dev&backlog=16h"` or `-e DSN="journalctl://filters=_SYSTEMD_UNIT=ssh.service"` |
| | | |
| __Bouncers__ | | |
| `BOUNCER_KEY_<name>` | | Register a bouncer with the name `<name>` and a key equal to the value of the environment variable. |
| | | |
| __Console__ | | |
| `ENROLL_KEY` | | Enroll key retrieved from [the console](https://app.crowdsec.net/) to enroll the instance. |
@ -224,18 +235,23 @@ Using binds rather than named volumes ([complete explanation here](https://docs.
| `AGENT_USERNAME` | | Agent username (to register if is LAPI or to use if it's an agent): `-e AGENT_USERNAME="machine_id"` |
| `AGENT_PASSWORD` | | Agent password (to register if is LAPI or to use if it's an agent): `-e AGENT_PASSWORD="machine_password"` |
| | | |
| __TLS Auth/encryption | | |
| `USE_TLS` | false | Enable TLS on the LAPI |
| `CERT_FILE` | /etc/ssl/cert.pem | TLS Certificate path |
| `KEY_FILE` | /etc/ssl/key.pem | TLS Key path |
| `CACERT_FILE` | | CA certificate bundle |
| __TLS Encryption__ | | |
| `USE_TLS` | false | Enable TLS encryption (either as a LAPI or agent) |
| `CACERT_FILE` | | CA certificate bundle (for self-signed certificates) |
| `INSECURE_SKIP_VERIFY` | | Skip LAPI certificate validation |
| `LAPI_CERT_FILE` | | LAPI TLS Certificate path |
| `LAPI_KEY_FILE` | | LAPI TLS Key path |
| | | |
| __TLS Authentication__ | | (these require USE_TLS=true) |
| `CLIENT_CERT_FILE` | | Client TLS Certificate path (enable TLS authentication) |
| `CLIENT_KEY_FILE` | | Client TLS Key path |
| `AGENTS_ALLOWED_OU` | agent-ou | OU values allowed for agents, separated by comma |
| `BOUNCERS_ALLOWED_OU` | bouncer-ou | OU values allowed for bouncers, separated by comma |
| | | |
| __Hub management__ | | |
| `COLLECTIONS` | | Collections to install, separated by space: `-e COLLECTIONS="crowdsecurity/linux crowdsecurity/apache2"` |
| `SCENARIOS` | | Scenarios to install, separated by space |
| `PARSERS` | | Parsers to install, separated by space |
| `SCENARIOS` | | Scenarios to install, separated by space |
| `POSTOVERFLOWS` | | Postoverflows to install, separated by space |
| `DISABLE_COLLECTIONS` | | Collections to remove, separated by space: `-e DISABLE_COLLECTIONS="crowdsecurity/linux crowdsecurity/nginx"` |
| `DISABLE_PARSERS` | | Parsers to remove, separated by space |
@ -246,6 +262,10 @@ Using binds rather than named volumes ([complete explanation here](https://docs.
| `LEVEL_INFO` | false | Force INFO level for the container log |
| `LEVEL_DEBUG` | false | Force DEBUG level for the container log |
| `LEVEL_TRACE` | false | Force TRACE level (VERY verbose) for the container log |
| | | |
| __Developer options__ | | |
| `CI_TESTING` | false | Used during functional tests |
| `DEBUG` | false | Trace the entrypoint |
## Volumes

View file

@ -16,8 +16,8 @@ crowdsec_service:
acquisition_path: /etc/crowdsec/acquis.yaml
parser_routines: 1
plugin_config:
user: nobody
group: nobody
user: nobody
group: nobody
cscli:
output: human
db_config:
@ -40,10 +40,8 @@ api:
- 127.0.0.1
- ::1
online_client: # Central API credentials (to push signals and receive bad IPs)
#credentials_path: /etc/crowdsec/online_api_credentials.yaml
#credentials_path: /etc/crowdsec/online_api_credentials.yaml
tls:
cert_file: /etc/ssl/cert.pem
key_file: /etc/ssl/key.pem
agents_allowed_ou:
- agent-ou
bouncers_allowed_ou:

View file

@ -3,14 +3,9 @@
# shellcheck disable=SC2292 # allow [ test ] syntax
# shellcheck disable=SC2310 # allow "if function..." syntax with -e
#set -x
#export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
set -e
shopt -s inherit_errexit
#- HELPER FUNCTIONS ----------------#
# match true, TRUE, True, tRuE, etc.
istrue() {
case "$(echo "$1" | tr '[:upper:]' '[:lower:]')" in
@ -27,6 +22,22 @@ isfalse() {
fi
}
if istrue "$DEBUG"; then
set -x
export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
fi
if istrue "$CI_TESTING"; then
echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" >/etc/machine-id
fi
#- DEFAULTS -----------------------#
export CONFIG_FILE="${CONFIG_FILE:=/etc/crowdsec/config.yaml}"
export CUSTOM_HOSTNAME="${CUSTOM_HOSTNAME:=localhost}"
#- HELPER FUNCTIONS ----------------#
# csv2yaml <string>
# generate a yaml list from a comma-separated string of values
csv2yaml() {
@ -58,8 +69,20 @@ conf_set() {
else
YAML_FILE="$CONFIG_FILE"
fi
YAML_CONTENT=$(cat "$YAML_FILE" 2>/dev/null || true)
echo "$YAML_CONTENT" | yq e "$1" | install -m 0600 /dev/stdin "$YAML_FILE"
if [ ! -f "$YAML_FILE" ]; then
install -m 0600 /dev/null "$YAML_FILE"
fi
yq e "$1" -i "$YAML_FILE"
}
# conf_set_if(): used to update the configuration
# only if a given variable is provided
# conf_set_if "$VAR" <yq_expression> [file_path]
conf_set_if() {
if [ "$1" != "" ]; then
shift
conf_set "$@"
fi
}
# register_bouncer <bouncer_name> <bouncer_key>
@ -90,6 +113,16 @@ cscli_if_clean() {
#-----------------------------------#
if [ -n "$CERT_FILE" ] || [ -n "$KEY_FILE" ] ; then
printf '%b' '\033[0;33m'
echo "Warning: the variables CERT_FILE and KEY_FILE have been deprecated." >&2
echo "Please use LAPI_CERT_FILE and LAPI_KEY_FILE insted." >&2
echo "The old variables will be removed in a future release." >&2
printf '%b' '\033[0m'
LAPI_CERT_FILE=${LAPI_CERT_FILE:-$CERT_FILE}
LAPI_KEY_FILE=${LAPI_KEY_FILE:-$KEY_FILE}
fi
# Check and prestage databases
for geodb in GeoLite2-ASN.mmdb GeoLite2-City.mmdb; do
# We keep the pre-populated geoip databases in /staging instead of /var,
@ -122,56 +155,65 @@ elif [ -n "$USE_WAL" ] && isfalse "$USE_WAL"; then
conf_set '.db_config.use_wal = false'
fi
# regenerate local agent credentials (ignore if agent is disabled)
if isfalse "$DISABLE_AGENT"; then
if isfalse "$DISABLE_LOCAL_API"; then
echo "Regenerate local agent credentials"
cscli machines delete "$CUSTOM_HOSTNAME" 2>/dev/null || true
cscli machines add "$CUSTOM_HOSTNAME" --auto --url "$LOCAL_API_URL"
fi
lapi_credentials_path=$(conf_get '.api.client.credentials_path')
# we only use the envvars that are actually defined
# in case of persistent configuration
conf_set '
with(select(strenv(LOCAL_API_URL)!=""); .url = strenv(LOCAL_API_URL)) |
with(select(strenv(AGENT_USERNAME)!=""); .login = strenv(AGENT_USERNAME)) |
with(select(strenv(AGENT_PASSWORD)!=""); .password = strenv(AGENT_PASSWORD))
' "$lapi_credentials_path"
if istrue "$USE_TLS"; then
conf_set '
with(select(strenv(CACERT_FILE)!=""); .ca_cert_path = strenv(CACERT_FILE)) |
with(select(strenv(KEY_FILE)!=""); .key_path = strenv(KEY_FILE)) |
with(select(strenv(CERT_FILE)!=""); .cert_path = strenv(CERT_FILE))
' "$lapi_credentials_path"
else
conf_set '
del(.ca_cert_path) |
del(.key_path) |
del(.cert_path)
' "$lapi_credentials_path"
fi
fi
# regenerate local agent credentials (even if agent is disabled, cscli needs a
# connection to the API)
cscli machines delete "$CUSTOM_HOSTNAME" 2>/dev/null || true
if isfalse "$DISABLE_LOCAL_API"; then
echo "Check if lapi needs to automatically register an agent"
if isfalse "$USE_TLS" || [ "$CLIENT_CERT_FILE" = "" ]; then
echo "Regenerate local agent credentials"
cscli machines add "$CUSTOM_HOSTNAME" --auto
fi
# pre-registration is not needed with TLS
if isfalse "$USE_TLS" && [ "$AGENT_USERNAME" != "" ] && [ "$AGENT_PASSWORD" != "" ] ; then
echo "Check if lapi needs to register an additional agent"
# pre-registration is not needed with TLS authentication, but we can have TLS transport with user/pw
if [ "$AGENT_USERNAME" != "" ] && [ "$AGENT_PASSWORD" != "" ] ; then
# re-register because pw may have been changed
cscli machines add "$AGENT_USERNAME" --password "$AGENT_PASSWORD" --url "$LOCAL_API_URL" --force
cscli machines add "$AGENT_USERNAME" --password "$AGENT_PASSWORD" -f /dev/null --force
echo "Agent registered to lapi"
fi
fi
# ----------------
lapi_credentials_path=$(conf_get '.api.client.credentials_path')
conf_set_if "$LOCAL_API_URL" '.url = strenv(LOCAL_API_URL)' "$lapi_credentials_path"
if istrue "$DISABLE_LOCAL_API"; then
# we only use the envvars that are actually defined
# in case of persistent configuration
conf_set_if "$AGENT_USERNAME" '.login = strenv(AGENT_USERNAME)' "$lapi_credentials_path"
conf_set_if "$AGENT_PASSWORD" '.password = strenv(AGENT_PASSWORD)' "$lapi_credentials_path"
fi
conf_set_if "$INSECURE_SKIP_VERIFY" '.api.client.insecure_skip_verify = env(INSECURE_SKIP_VERIFY)'
# agent-only containers still require USE_TLS
if istrue "$USE_TLS"; then
# shellcheck disable=SC2153
conf_set_if "$CACERT_FILE" '.ca_cert_path = strenv(CACERT_FILE)' "$lapi_credentials_path"
conf_set_if "$CLIENT_KEY_FILE" '.key_path = strenv(CLIENT_KEY_FILE)' "$lapi_credentials_path"
conf_set_if "$CLIENT_CERT_FILE" '.cert_path = strenv(CLIENT_CERT_FILE)' "$lapi_credentials_path"
else
conf_set '
del(.ca_cert_path) |
del(.key_path) |
del(.cert_path)
' "$lapi_credentials_path"
fi
if istrue "$DISABLE_ONLINE_API"; then
conf_set 'del(.api.server.online_client)'
fi
# registration to online API for signal push
if isfalse "$DISABLE_ONLINE_API" && [ "$CONFIG_FILE" == "/etc/crowdsec/config.yaml" ] ; then
if isfalse "$DISABLE_ONLINE_API" ; then
CONFIG_DIR=$(conf_get '.config_paths.config_dir')
config_exists=$(conf_get '.api.server.online_client | has("credentials_path")')
if isfalse "$config_exists"; then
conf_set '.api.server.online_client = {"credentials_path": "/etc/crowdsec/online_api_credentials.yaml"}'
cscli capi register > /etc/crowdsec/online_api_credentials.yaml
export CONFIG_DIR
conf_set '.api.server.online_client = {"credentials_path": strenv(CONFIG_DIR) + "/online_api_credentials.yaml"}'
cscli capi register > "$CONFIG_DIR/online_api_credentials.yaml"
echo "Registration to online API done"
fi
fi
@ -200,22 +242,20 @@ if [ "$GID" != "" ]; then
fi
fi
# XXX only with LAPI
if istrue "$USE_TLS"; then
agents_allowed_yaml=$(csv2yaml "$AGENTS_ALLOWED_OU") \
bouncers_allowed_yaml=$(csv2yaml "$BOUNCERS_ALLOWED_OU") \
conf_set '
with(select(strenv(CACERT_FILE)!=""); .api.server.tls.ca_cert_path = strenv(CACERT_FILE)) |
with(select(strenv(CERT_FILE)!=""); .api.server.tls.cert_file = strenv(CERT_FILE)) |
with(select(strenv(KEY_FILE)!=""); .api.server.tls.key_file = strenv(KEY_FILE)) |
with(select(strenv(BOUNCERS_ALLOWED_OU)!=""); .api.server.tls.bouncers_allowed_ou = env(bouncers_allowed_yaml)) |
with(select(strenv(AGENTS_ALLOWED_OU)!=""); .api.server.tls.agents_allowed_ou = env(agents_allowed_yaml)) |
... comments=""
'
conf_set_if "$CACERT_FILE" '.api.server.tls.ca_cert_path = strenv(CACERT_FILE)'
conf_set_if "$LAPI_CERT_FILE" '.api.server.tls.cert_file = strenv(LAPI_CERT_FILE)'
conf_set_if "$LAPI_KEY_FILE" '.api.server.tls.key_file = strenv(LAPI_KEY_FILE)'
conf_set_if "$BOUNCERS_ALLOWED_OU" '.api.server.tls.bouncers_allowed_ou = env(bouncers_allowed_yaml)'
conf_set_if "$AGENTS_ALLOWED_OU" '.api.server.tls.agents_allowed_ou = env(agents_allowed_yaml)'
else
conf_set 'del(.api.server.tls)'
fi
conf_set 'with(select(strenv(PLUGIN_DIR)!=""); .config_paths.plugin_dir = strenv(PLUGIN_DIR))'
conf_set_if "$PLUGIN_DIR" '.config_paths.plugin_dir = strenv(PLUGIN_DIR)'
## Install collections, parsers, scenarios & postoverflows
cscli hub update
@ -322,7 +362,7 @@ if istrue "$LEVEL_INFO"; then
ARGS="$ARGS -info"
fi
conf_set 'with(select(strenv(METRICS_PORT)!=""); .prometheus.listen_port=env(METRICS_PORT))'
conf_set_if "$METRICS_PORT" '.prometheus.listen_port=env(METRICS_PORT)'
# shellcheck disable=SC2086
exec crowdsec $ARGS

3
go.mod
View file

@ -69,6 +69,7 @@ require (
github.com/aquasecurity/table v1.8.0
github.com/beevik/etree v1.1.0
github.com/blackfireio/osinfo v1.0.3
github.com/goccy/go-yaml v1.9.7
github.com/google/winops v0.0.0-20211216095627-f0e86eb1453b
github.com/ivanpirog/coloredcobra v1.0.1
github.com/mattn/go-isatty v0.0.14
@ -90,6 +91,7 @@ require (
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bluele/gcache v0.0.2 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/containerd/containerd v1.6.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
@ -169,6 +171,7 @@ require (
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect

9
go.sum
View file

@ -121,6 +121,8 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm
github.com/blackfireio/osinfo v1.0.3 h1:Yk2t2GTPjBcESv6nDSWZKO87bGMQgO+Hi9OoXPpxX8c=
github.com/blackfireio/osinfo v1.0.3/go.mod h1:Pd987poVNmd5Wsx6PRPw4+w7kLlf9iJxoRKPtPAjOrA=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/c-robinson/iplib v1.0.3 h1:NG0UF0GoEsrC1/vyfX1Lx2Ss7CySWl3KqqXh3q4DdPU=
@ -209,6 +211,7 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
@ -372,6 +375,8 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
github.com/goccy/go-yaml v1.9.7 h1:D/Vx+JITklB1ugSkncB4BNR67M3X6AKs9+rqVeo3ddw=
github.com/goccy/go-yaml v1.9.7/go.mod h1:JubOolP3gh0HpiBc4BLRD4YmjEjHAmIIB2aaXKkTfoE=
github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
@ -651,6 +656,7 @@ github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcncea
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
@ -664,7 +670,6 @@ github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
@ -1173,6 +1178,7 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -1264,6 +1270,7 @@ golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=

View file

@ -176,7 +176,7 @@ wowo: ajsajasjas
yaml.Unmarshal([]byte(tc.String), &common)
ds, err := DataSourceConfigure(common)
cstest.RequireErrorContains(t, err, tc.ExpectedError)
if tc.ExpectedError == "" {
if tc.ExpectedError != "" {
return
}

View file

@ -54,7 +54,7 @@ func TestTimestamp(t *testing.T) {
currentYear bool
}{
{"May 20 09:33:54", "0000-05-20T09:33:54Z", "", false},
{"May 20 09:33:54", "2022-05-20T09:33:54Z", "", true},
{"May 20 09:33:54", "2023-05-20T09:33:54Z", "", true},
{"May 20 09:33:54 2022", "2022-05-20T09:33:54Z", "", false},
{"May 1 09:33:54 2022", "2022-05-01T09:33:54Z", "", false},
{"May 01 09:33:54 2021", "2021-05-01T09:33:54Z", "", true},
@ -257,7 +257,7 @@ func TestParse(t *testing.T) {
},
{
"<12>May 20 09:33:54 UDMPRO,a2edd0c6ae48,udm-1.10.0.3686 kernel: foo", expected{
Timestamp: time.Date(2022, time.May, 20, 9, 33, 54, 0, time.UTC),
Timestamp: time.Date(2023, time.May, 20, 9, 33, 54, 0, time.UTC),
Hostname: "UDMPRO,a2edd0c6ae48,udm-1.10.0.3686",
Tag: "kernel",
PID: "",

View file

@ -0,0 +1,157 @@
package alertcontext
import (
"encoding/json"
"fmt"
"strconv"
"github.com/antonmedv/expr"
"github.com/antonmedv/expr/vm"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/models"
"github.com/crowdsecurity/crowdsec/pkg/types"
log "github.com/sirupsen/logrus"
)
const (
maxContextValueLen = 4000
)
var (
alertContext = Context{}
)
type Context struct {
ContextToSend map[string][]string
ContextValueLen int
ContextToSendCompiled map[string][]*vm.Program
Log *log.Logger
}
func ValidateContextExpr(key string, expressions []string) error {
for _, expression := range expressions {
_, err := expr.Compile(expression, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
if err != nil {
return fmt.Errorf("compilation of '%s' failed: %v", expression, err)
}
}
return nil
}
func NewAlertContext(contextToSend map[string][]string, valueLength int) error {
var clog = log.New()
if err := types.ConfigureLogger(clog); err != nil {
return fmt.Errorf("couldn't create logger for alert context: %s", err)
}
if valueLength == 0 {
clog.Debugf("No console context value length provided, using default: %d", maxContextValueLen)
valueLength = maxContextValueLen
}
if valueLength > maxContextValueLen {
clog.Debugf("Provided console context value length (%d) is higher than the maximum, using default: %d", valueLength, maxContextValueLen)
valueLength = maxContextValueLen
}
alertContext = Context{
ContextToSend: contextToSend,
ContextValueLen: valueLength,
Log: clog,
ContextToSendCompiled: make(map[string][]*vm.Program),
}
for key, values := range contextToSend {
alertContext.ContextToSendCompiled[key] = make([]*vm.Program, 0)
for _, value := range values {
valueCompiled, err := expr.Compile(value, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
if err != nil {
return fmt.Errorf("compilation of '%s' context value failed: %v", value, err)
}
alertContext.ContextToSendCompiled[key] = append(alertContext.ContextToSendCompiled[key], valueCompiled)
}
}
return nil
}
func truncate(values []string, contextValueLen int) (string, error) {
var ret string
valueByte, err := json.Marshal(values)
if err != nil {
return "", fmt.Errorf("unable to dump metas: %s", err)
}
ret = string(valueByte)
for {
if len(ret) <= contextValueLen {
break
}
// if there is only 1 value left and that the size is too big, truncate it
if len(values) == 1 {
valueToTruncate := values[0]
half := len(valueToTruncate) / 2
lastValueTruncated := valueToTruncate[:half] + "..."
values = values[:len(values)-1]
values = append(values, lastValueTruncated)
} else {
// if there is multiple value inside, just remove the last one
values = values[:len(values)-1]
}
valueByte, err = json.Marshal(values)
if err != nil {
return "", fmt.Errorf("unable to dump metas: %s", err)
}
ret = string(valueByte)
}
return ret, nil
}
func EventToContext(events []types.Event) (models.Meta, []error) {
var errors []error
metas := make([]*models.MetaItems0, 0)
tmpContext := make(map[string][]string)
for _, evt := range events {
for key, values := range alertContext.ContextToSendCompiled {
if _, ok := tmpContext[key]; !ok {
tmpContext[key] = make([]string, 0)
}
for _, value := range values {
var val string
output, err := expr.Run(value, exprhelpers.GetExprEnv(map[string]interface{}{"evt": evt}))
if err != nil {
errors = append(errors, fmt.Errorf("failed to get value for %s : %v", key, err))
continue
}
switch out := output.(type) {
case string:
val = out
case int:
val = strconv.Itoa(out)
default:
errors = append(errors, fmt.Errorf("unexpected return type for %s : %T", key, output))
continue
}
if val != "" && !types.InSlice(val, tmpContext[key]) {
tmpContext[key] = append(tmpContext[key], val)
}
}
}
}
for key, values := range tmpContext {
if len(values) == 0 {
continue
}
valueStr, err := truncate(values, alertContext.ContextValueLen)
if err != nil {
log.Warningf(err.Error())
}
meta := models.MetaItems0{
Key: key,
Value: valueStr,
}
metas = append(metas, &meta)
}
ret := models.Meta(metas)
return ret, errors
}

View file

@ -0,0 +1,201 @@
package alertcontext
import (
"fmt"
"testing"
"github.com/crowdsecurity/crowdsec/pkg/models"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/stretchr/testify/assert"
)
func TestNewAlertContext(t *testing.T) {
tests := []struct {
name string
contextToSend map[string][]string
valueLength int
expectedErr error
}{
{
name: "basic config test",
contextToSend: map[string][]string{
"test": []string{"evt.Parsed.source_ip"},
},
valueLength: 100,
expectedErr: nil,
},
}
for _, test := range tests {
fmt.Printf("Running test '%s'\n", test.name)
err := NewAlertContext(test.contextToSend, test.valueLength)
assert.ErrorIs(t, err, test.expectedErr)
}
}
func TestEventToContext(t *testing.T) {
tests := []struct {
name string
contextToSend map[string][]string
valueLength int
events []types.Event
expectedResult models.Meta
}{
{
name: "basic test",
contextToSend: map[string][]string{
"source_ip": []string{"evt.Parsed.source_ip"},
"nonexistent_field": []string{"evt.Parsed.nonexist"},
},
valueLength: 100,
events: []types.Event{
{
Parsed: map[string]string{
"source_ip": "1.2.3.4",
"source_machine": "mymachine",
},
},
},
expectedResult: []*models.MetaItems0{
{
Key: "source_ip",
Value: "[\"1.2.3.4\"]",
},
},
},
{
name: "test many events",
contextToSend: map[string][]string{
"source_ip": []string{"evt.Parsed.source_ip"},
"source_machine": []string{"evt.Parsed.source_machine"},
"cve": []string{"evt.Parsed.cve"},
},
valueLength: 100,
events: []types.Event{
{
Parsed: map[string]string{
"source_ip": "1.2.3.4",
"source_machine": "mymachine",
"cve": "CVE-2022-1234",
},
},
{
Parsed: map[string]string{
"source_ip": "1.2.3.4",
"source_machine": "mymachine",
"cve": "CVE-2022-1235",
},
},
{
Parsed: map[string]string{
"source_ip": "1.2.3.4",
"source_machine": "mymachine",
"cve": "CVE-2022-125",
},
},
},
expectedResult: []*models.MetaItems0{
{
Key: "source_ip",
Value: "[\"1.2.3.4\"]",
},
{
Key: "source_machine",
Value: "[\"mymachine\"]",
},
{
Key: "cve",
Value: "[\"CVE-2022-1234\",\"CVE-2022-1235\",\"CVE-2022-125\"]",
},
},
},
{
name: "test many events with result above max length (need truncate, keep only 2 on 3 elements)",
contextToSend: map[string][]string{
"source_ip": []string{"evt.Parsed.source_ip"},
"source_machine": []string{"evt.Parsed.source_machine"},
"uri": []string{"evt.Parsed.uri"},
},
valueLength: 100,
events: []types.Event{
{
Parsed: map[string]string{
"source_ip": "1.2.3.4",
"source_machine": "mymachine",
"uri": "/test/test/test/../../../../../../../../",
},
},
{
Parsed: map[string]string{
"source_ip": "1.2.3.4",
"source_machine": "mymachine",
"uri": "/admin/admin/admin/../../../../../../../../",
},
},
{
Parsed: map[string]string{
"source_ip": "1.2.3.4",
"source_machine": "mymachine",
"uri": "/login/login/login/../../../../../../../../../../../",
},
},
},
expectedResult: []*models.MetaItems0{
{
Key: "source_ip",
Value: "[\"1.2.3.4\"]",
},
{
Key: "source_machine",
Value: "[\"mymachine\"]",
},
{
Key: "uri",
Value: "[\"/test/test/test/../../../../../../../../\",\"/admin/admin/admin/../../../../../../../../\"]",
},
},
},
{
name: "test one events with result above max length (need truncate on one element)",
contextToSend: map[string][]string{
"source_ip": []string{"evt.Parsed.source_ip"},
"source_machine": []string{"evt.Parsed.source_machine"},
"uri": []string{"evt.Parsed.uri"},
},
valueLength: 100,
events: []types.Event{
{
Parsed: map[string]string{
"source_ip": "1.2.3.4",
"source_machine": "mymachine",
"uri": "/test/test/test/../../../../.should_truncate_just_after_this/../../../..../../../../../../../../../../../../../../../end",
},
},
},
expectedResult: []*models.MetaItems0{
{
Key: "source_machine",
Value: "[\"mymachine\"]",
},
{
Key: "uri",
Value: "[\"/test/test/test/../../../../.should_truncate_just_after_this...\"]",
},
{
Key: "source_ip",
Value: "[\"1.2.3.4\"]",
},
},
},
}
for _, test := range tests {
fmt.Printf("Running test '%s'\n", test.name)
err := NewAlertContext(test.contextToSend, test.valueLength)
assert.ErrorIs(t, err, nil)
metas, _ := EventToContext(test.events)
assert.ElementsMatch(t, test.expectedResult, metas)
}
}

View file

@ -3,6 +3,8 @@ package apiclient
import (
"bytes"
"encoding/json"
"math/rand"
"sync"
"time"
//"errors"
@ -12,6 +14,7 @@ import (
"net/http/httputil"
"net/url"
"github.com/crowdsecurity/crowdsec/pkg/fflag"
"github.com/crowdsecurity/crowdsec/pkg/models"
"github.com/go-openapi/strfmt"
"github.com/pkg/errors"
@ -75,10 +78,57 @@ func (t *APIKeyTransport) transport() http.RoundTripper {
return http.DefaultTransport
}
type retryRoundTripper struct {
next http.RoundTripper
maxAttempts int
retryStatusCodes []int
withBackOff bool
onBeforeRequest func(attempt int)
}
func (r retryRoundTripper) ShouldRetry(statusCode int) bool {
for _, code := range r.retryStatusCodes {
if code == statusCode {
return true
}
}
return false
}
func (r retryRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
var resp *http.Response
var err error
backoff := 0
for i := 0; i < r.maxAttempts; i++ {
if i > 0 {
if r.withBackOff && !fflag.DisableHttpRetryBackoff.IsEnabled() {
backoff += 10 + rand.Intn(20)
}
log.Infof("retrying in %d seconds (attempt %d of %d)", backoff, i+1, r.maxAttempts)
select {
case <-req.Context().Done():
return resp, req.Context().Err()
case <-time.After(time.Duration(backoff) * time.Second):
}
}
if r.onBeforeRequest != nil {
r.onBeforeRequest(i)
}
clonedReq := cloneRequest(req)
resp, err = r.next.RoundTrip(clonedReq)
if err == nil {
if !r.ShouldRetry(resp.StatusCode) {
return resp, nil
}
}
}
return resp, err
}
type JWTTransport struct {
MachineID *string
Password *strfmt.Password
token string
Token string
Expiration time.Time
Scenarios []string
URL *url.URL
@ -86,8 +136,9 @@ type JWTTransport struct {
UserAgent string
// Transport is the underlying HTTP transport to use when making requests.
// It will default to http.DefaultTransport if nil.
Transport http.RoundTripper
UpdateScenario func() ([]string, error)
Transport http.RoundTripper
UpdateScenario func() ([]string, error)
refreshTokenMutex sync.Mutex
}
func (t *JWTTransport) refreshJwtToken() error {
@ -123,7 +174,14 @@ func (t *JWTTransport) refreshJwtToken() error {
return errors.Wrap(err, "could not create request")
}
req.Header.Add("Content-Type", "application/json")
client := &http.Client{}
client := &http.Client{
Transport: &retryRoundTripper{
next: http.DefaultTransport,
maxAttempts: 5,
withBackOff: true,
retryStatusCodes: []int{http.StatusTooManyRequests, http.StatusServiceUnavailable, http.StatusGatewayTimeout, http.StatusInternalServerError},
},
}
if t.UserAgent != "" {
req.Header.Add("User-Agent", t.UserAgent)
}
@ -149,6 +207,7 @@ func (t *JWTTransport) refreshJwtToken() error {
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
log.Debugf("received response status %q when fetching %v", resp.Status, req.URL)
err = CheckResponse(resp)
if err != nil {
return err
@ -161,45 +220,51 @@ func (t *JWTTransport) refreshJwtToken() error {
if err := t.Expiration.UnmarshalText([]byte(response.Expire)); err != nil {
return errors.Wrap(err, "unable to parse jwt expiration")
}
t.token = response.Token
t.Token = response.Token
log.Debugf("token %s will expire on %s", t.token, t.Expiration.String())
log.Debugf("token %s will expire on %s", t.Token, t.Expiration.String())
return nil
}
// RoundTrip implements the RoundTripper interface.
func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) {
if t.token == "" || t.Expiration.Add(-time.Minute).Before(time.Now().UTC()) {
// in a few occasions several goroutines will execute refreshJwtToken concurrently which is useless and will cause overload on CAPI
// we use a mutex to avoid this
t.refreshTokenMutex.Lock()
if t.Token == "" || t.Expiration.Add(-time.Minute).Before(time.Now().UTC()) {
if err := t.refreshJwtToken(); err != nil {
t.refreshTokenMutex.Unlock()
return nil, err
}
}
t.refreshTokenMutex.Unlock()
// We must make a copy of the Request so
// that we don't modify the Request we were given. This is required by the
// specification of http.RoundTripper.
req = cloneRequest(req)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", t.token))
log.Debugf("req-jwt: %s %s", req.Method, req.URL.String())
if log.GetLevel() >= log.TraceLevel {
dump, _ := httputil.DumpRequest(req, true)
log.Tracef("req-jwt: %s", string(dump))
}
if t.UserAgent != "" {
req.Header.Add("User-Agent", t.UserAgent)
}
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", t.Token))
if log.GetLevel() >= log.TraceLevel {
//requestToDump := cloneRequest(req)
dump, _ := httputil.DumpRequest(req, true)
log.Tracef("req-jwt: %s", string(dump))
}
// Make the HTTP request.
resp, err := t.transport().RoundTrip(req)
if log.GetLevel() >= log.TraceLevel {
dump, _ := httputil.DumpResponse(resp, true)
log.Tracef("resp-jwt: %s (err:%v)", string(dump), err)
}
if err != nil || resp.StatusCode == http.StatusForbidden || resp.StatusCode == http.StatusUnauthorized {
if err != nil {
/*we had an error (network error for example, or 401 because token is refused), reset the token ?*/
t.token = ""
t.Token = ""
return resp, errors.Wrapf(err, "performing jwt auth")
}
log.Debugf("resp-jwt: %d", resp.StatusCode)
return resp, nil
}
@ -207,11 +272,39 @@ func (t *JWTTransport) Client() *http.Client {
return &http.Client{Transport: t}
}
func (t *JWTTransport) ResetToken() {
log.Debug("resetting jwt token")
t.refreshTokenMutex.Lock()
t.Token = ""
t.refreshTokenMutex.Unlock()
}
func (t *JWTTransport) transport() http.RoundTripper {
var transport http.RoundTripper
if t.Transport != nil {
return t.Transport
transport = t.Transport
} else {
transport = http.DefaultTransport
}
// a round tripper that retries once when the status is unauthorized and 5 times when infrastructure is overloaded
return &retryRoundTripper{
next: &retryRoundTripper{
next: transport,
maxAttempts: 5,
withBackOff: true,
retryStatusCodes: []int{http.StatusTooManyRequests, http.StatusServiceUnavailable, http.StatusGatewayTimeout},
},
maxAttempts: 2,
withBackOff: false,
retryStatusCodes: []int{http.StatusUnauthorized, http.StatusForbidden},
onBeforeRequest: func(attempt int) {
// reset the token only in the second attempt as this is when we know we had a 401 or 403
// the second attempt is supposed to refresh the token
if attempt > 0 {
t.ResetToken()
}
},
}
return http.DefaultTransport
}
// cloneRequest returns a clone of the provided *http.Request. The clone is a
@ -225,5 +318,12 @@ func cloneRequest(r *http.Request) *http.Request {
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
if r.Body != nil {
var b bytes.Buffer
b.ReadFrom(r.Body)
r.Body = io.NopCloser(&b)
r2.Body = io.NopCloser(bytes.NewReader(b.Bytes()))
}
return r2
}

View file

@ -234,5 +234,5 @@ func TestWatcherEnroll(t *testing.T) {
}
_, err = client.Auth.EnrollWatcher(context.Background(), "badkey", "", []string{}, false)
assert.Contains(t, err.Error(), "the attachment key provided is not valid")
assert.Contains(t, err.Error(), "the attachment key provided is not valid", "got %s", err.Error())
}

View file

@ -53,8 +53,8 @@ func NewClient(config *Config) (*ApiClient, error) {
UpdateScenario: config.UpdateScenario,
}
tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify}
tlsconfig.RootCAs = CaCertPool
if Cert != nil {
tlsconfig.RootCAs = CaCertPool
tlsconfig.Certificates = []tls.Certificate{*Cert}
}
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tlsconfig
@ -75,8 +75,8 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt
client = &http.Client{}
if ht, ok := http.DefaultTransport.(*http.Transport); ok {
tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify}
tlsconfig.RootCAs = CaCertPool
if Cert != nil {
tlsconfig.RootCAs = CaCertPool
tlsconfig.Certificates = []tls.Certificate{*Cert}
}
ht.TLSClientConfig = &tlsconfig

View file

@ -85,8 +85,8 @@ func (a *apic) FetchScenariosListFromDB() ([]string, error) {
return scenarios, nil
}
func alertToSignal(alert *models.Alert, scenarioTrust string) *models.AddSignalsRequestItem {
return &models.AddSignalsRequestItem{
func alertToSignal(alert *models.Alert, scenarioTrust string, shareContext bool) *models.AddSignalsRequestItem {
signal := &models.AddSignalsRequestItem{
Message: alert.Message,
Scenario: alert.Scenario,
ScenarioHash: alert.ScenarioHash,
@ -96,8 +96,19 @@ func alertToSignal(alert *models.Alert, scenarioTrust string) *models.AddSignals
StopAt: alert.StopAt,
CreatedAt: alert.CreatedAt,
MachineID: alert.MachineID,
ScenarioTrust: &scenarioTrust,
ScenarioTrust: scenarioTrust,
}
if shareContext {
signal.Context = make([]*models.AddSignalsRequestItemContextItems0, 0)
for _, meta := range alert.Meta {
contextItem := models.AddSignalsRequestItemContextItems0{
Key: meta.Key,
Value: meta.Value,
}
signal.Context = append(signal.Context, &contextItem)
}
}
return signal
}
func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig) (*apic, error) {
@ -176,7 +187,7 @@ func (a *apic) Push() error {
var signals []*models.AddSignalsRequestItem
for _, alert := range alerts {
if ok := shouldShareAlert(alert, a.consoleConfig); ok {
signals = append(signals, alertToSignal(alert, getScenarioTrustOfAlert(alert)))
signals = append(signals, alertToSignal(alert, getScenarioTrustOfAlert(alert), *a.consoleConfig.ShareContext))
}
}
a.mu.Lock()

View file

@ -58,6 +58,7 @@ func getAPIC(t *testing.T) *apic {
ShareManualDecisions: types.BoolPtr(false),
ShareTaintedScenarios: types.BoolPtr(false),
ShareCustomScenarios: types.BoolPtr(false),
ShareContext: types.BoolPtr(false),
},
}
}

119
pkg/cache/cache.go vendored Normal file
View file

@ -0,0 +1,119 @@
package cache
import (
"time"
"github.com/bluele/gcache"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
)
var Caches []gcache.Cache
var CacheNames []string
var CacheConfig []CacheCfg
/*prometheus*/
var CacheMetrics = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "cs_cache_size",
Help: "Entries per cache.",
},
[]string{"name", "type"},
)
// UpdateCacheMetrics is called directly by the prom handler
func UpdateCacheMetrics() {
CacheMetrics.Reset()
for i, name := range CacheNames {
CacheMetrics.With(prometheus.Labels{"name": name, "type": CacheConfig[i].Strategy}).Set(float64(Caches[i].Len(false)))
}
}
type CacheCfg struct {
Name string
Size int
TTL time.Duration
Strategy string
LogLevel *log.Level
Logger *log.Entry
}
func CacheInit(cfg CacheCfg) error {
for _, name := range CacheNames {
if name == cfg.Name {
log.Infof("Cache %s already exists", cfg.Name)
}
}
//get a default logger
if cfg.LogLevel == nil {
cfg.LogLevel = new(log.Level)
*cfg.LogLevel = log.InfoLevel
}
var clog = logrus.New()
if err := types.ConfigureLogger(clog); err != nil {
log.Fatalf("While creating cache logger : %s", err)
}
clog.SetLevel(*cfg.LogLevel)
cfg.Logger = clog.WithFields(log.Fields{
"cache": cfg.Name,
})
tmpCache := gcache.New(cfg.Size)
switch cfg.Strategy {
case "LRU":
tmpCache = tmpCache.LRU()
case "LFU":
tmpCache = tmpCache.LFU()
case "ARC":
tmpCache = tmpCache.ARC()
default:
cfg.Strategy = "LRU"
tmpCache = tmpCache.LRU()
}
CTICache := tmpCache.Build()
Caches = append(Caches, CTICache)
CacheNames = append(CacheNames, cfg.Name)
CacheConfig = append(CacheConfig, cfg)
return nil
}
func SetKey(cacheName string, key string, value string, expiration *time.Duration) error {
for i, name := range CacheNames {
if name == cacheName {
if expiration == nil {
expiration = &CacheConfig[i].TTL
}
CacheConfig[i].Logger.Debugf("Setting key %s to %s with expiration %v", key, value, *expiration)
if err := Caches[i].SetWithExpire(key, value, *expiration); err != nil {
CacheConfig[i].Logger.Warningf("While setting key %s in cache %s: %s", key, cacheName, err)
}
}
}
return nil
}
func GetKey(cacheName string, key string) (string, error) {
for i, name := range CacheNames {
if name == cacheName {
if value, err := Caches[i].Get(key); err != nil {
//do not warn or log if key not found
if err == gcache.KeyNotFoundError {
return "", nil
}
CacheConfig[i].Logger.Warningf("While getting key %s in cache %s: %s", key, cacheName, err)
return "", err
} else {
return value.(string), nil
}
}
}
log.Warningf("Cache %s not found", cacheName)
return "", nil
}

30
pkg/cache/cache_test.go vendored Normal file
View file

@ -0,0 +1,30 @@
package cache
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestCreateSetGet(t *testing.T) {
err := CacheInit(CacheCfg{Name: "test", Size: 100, TTL: 1 * time.Second})
assert.Empty(t, err)
//set & get
err = SetKey("test", "testkey0", "testvalue1", nil)
assert.Empty(t, err)
ret, err := GetKey("test", "testkey0")
assert.Equal(t, "testvalue1", ret)
assert.Empty(t, err)
//re-set
err = SetKey("test", "testkey0", "testvalue2", nil)
assert.Empty(t, err)
assert.Equal(t, "testvalue1", ret)
assert.Empty(t, err)
//expire
time.Sleep(1500 * time.Millisecond)
ret, err = GetKey("test", "testkey0")
assert.Equal(t, "", ret)
assert.Empty(t, err)
}

View file

@ -82,7 +82,7 @@ func (l *LocalApiClientCfg) Load() error {
}
}
if l.Credentials.Login != "" && (l.Credentials.CACertPath != "" || l.Credentials.CertPath != "" || l.Credentials.KeyPath != "") {
if l.Credentials.Login != "" && (l.Credentials.CertPath != "" || l.Credentials.KeyPath != "") {
return fmt.Errorf("user/password authentication and TLS authentication are mutually exclusive")
}
@ -92,12 +92,7 @@ func (l *LocalApiClientCfg) Load() error {
apiclient.InsecureSkipVerify = *l.InsecureSkipVerify
}
if l.Credentials.CACertPath != "" && l.Credentials.CertPath != "" && l.Credentials.KeyPath != "" {
cert, err := tls.LoadX509KeyPair(l.Credentials.CertPath, l.Credentials.KeyPath)
if err != nil {
return errors.Wrapf(err, "failed to load api client certificate")
}
if l.Credentials.CACertPath != "" {
caCert, err := os.ReadFile(l.Credentials.CACertPath)
if err != nil {
return errors.Wrapf(err, "failed to load cacert")
@ -105,10 +100,18 @@ func (l *LocalApiClientCfg) Load() error {
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
apiclient.Cert = &cert
apiclient.CaCertPool = caCertPool
}
if l.Credentials.CertPath != "" && l.Credentials.KeyPath != "" {
cert, err := tls.LoadX509KeyPair(l.Credentials.CertPath, l.Credentials.KeyPath)
if err != nil {
return errors.Wrapf(err, "failed to load api client certificate")
}
apiclient.Cert = &cert
}
return nil
}

View file

@ -213,6 +213,7 @@ func TestLoadAPIServer(t *testing.T) {
ShareManualDecisions: types.BoolPtr(false),
ShareTaintedScenarios: types.BoolPtr(true),
ShareCustomScenarios: types.BoolPtr(true),
ShareContext: types.BoolPtr(false),
},
LogDir: LogDirFullPath,
LogMedia: "stdout",

View file

@ -46,8 +46,9 @@ func (c *Config) Dump() error {
return nil
}
func NewConfig(configFile string, disableAgent bool, disableAPI bool) (*Config, error) {
func NewConfig(configFile string, disableAgent bool, disableAPI bool, quiet bool) (*Config, error) {
patcher := yamlpatch.NewPatcher(configFile, ".local")
patcher.SetQuiet(quiet)
fcontent, err := patcher.MergedPatchContent()
if err != nil {
return nil, err

View file

@ -10,13 +10,13 @@ import (
)
func TestNormalLoad(t *testing.T) {
_, err := NewConfig("./tests/config.yaml", false, false)
_, err := NewConfig("./tests/config.yaml", false, false, false)
require.NoError(t, err)
_, err = NewConfig("./tests/xxx.yaml", false, false)
_, err = NewConfig("./tests/xxx.yaml", false, false, false)
assert.EqualError(t, err, "while reading yaml file: open ./tests/xxx.yaml: "+cstest.FileNotFoundMessage)
_, err = NewConfig("./tests/simulation.yaml", false, false)
_, err = NewConfig("./tests/simulation.yaml", false, false, false)
assert.EqualError(t, err, "./tests/simulation.yaml: yaml: unmarshal errors:\n line 1: field simulation not found in type csconfig.Config")
}

View file

@ -14,9 +14,10 @@ const (
SEND_CUSTOM_SCENARIOS = "custom"
SEND_TAINTED_SCENARIOS = "tainted"
SEND_MANUAL_SCENARIOS = "manual"
SEND_CONTEXT = "context"
)
var CONSOLE_CONFIGS = []string{SEND_CUSTOM_SCENARIOS, SEND_MANUAL_SCENARIOS, SEND_TAINTED_SCENARIOS}
var CONSOLE_CONFIGS = []string{SEND_CUSTOM_SCENARIOS, SEND_MANUAL_SCENARIOS, SEND_TAINTED_SCENARIOS, SEND_CONTEXT}
var DefaultConsoleConfigFilePath = DefaultConfigPath("console.yaml")
@ -24,6 +25,7 @@ type ConsoleConfig struct {
ShareManualDecisions *bool `yaml:"share_manual_decisions"`
ShareTaintedScenarios *bool `yaml:"share_tainted"`
ShareCustomScenarios *bool `yaml:"share_custom"`
ShareContext *bool `yaml:"share_context"`
}
func (c *LocalApiServerCfg) LoadConsoleConfig() error {
@ -33,6 +35,7 @@ func (c *LocalApiServerCfg) LoadConsoleConfig() error {
c.ConsoleConfig.ShareCustomScenarios = types.BoolPtr(true)
c.ConsoleConfig.ShareTaintedScenarios = types.BoolPtr(true)
c.ConsoleConfig.ShareManualDecisions = types.BoolPtr(false)
c.ConsoleConfig.ShareContext = types.BoolPtr(false)
return nil
}
@ -57,6 +60,12 @@ func (c *LocalApiServerCfg) LoadConsoleConfig() error {
log.Debugf("no share_manual scenarios found, setting to false")
c.ConsoleConfig.ShareManualDecisions = types.BoolPtr(false)
}
if c.ConsoleConfig.ShareContext == nil {
log.Debugf("no 'context' found, setting to false")
c.ConsoleConfig.ShareContext = types.BoolPtr(false)
}
log.Debugf("Console configuration '%s' loaded successfully", c.ConsoleConfigPath)
return nil

View file

@ -7,31 +7,34 @@ import (
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
// CrowdsecServiceCfg contains the location of parsers/scenarios/... and acquisition files
type CrowdsecServiceCfg struct {
Enable *bool `yaml:"enable"`
AcquisitionFilePath string `yaml:"acquisition_path,omitempty"`
AcquisitionDirPath string `yaml:"acquisition_dir,omitempty"`
Enable *bool `yaml:"enable"`
AcquisitionFilePath string `yaml:"acquisition_path,omitempty"`
AcquisitionDirPath string `yaml:"acquisition_dir,omitempty"`
ConsoleContextPath string `yaml:"console_context_path"`
ConsoleContextValueLength int `yaml:"console_context_value_length"`
AcquisitionFiles []string `yaml:"-"`
ParserRoutinesCount int `yaml:"parser_routines"`
BucketsRoutinesCount int `yaml:"buckets_routines"`
OutputRoutinesCount int `yaml:"output_routines"`
SimulationConfig *SimulationConfig `yaml:"-"`
LintOnly bool `yaml:"-"` // if set to true, exit after loading configs
BucketStateFile string `yaml:"state_input_file,omitempty"` // if we need to unserialize buckets at start
BucketStateDumpDir string `yaml:"state_output_dir,omitempty"` // if we need to unserialize buckets on shutdown
BucketsGCEnabled bool `yaml:"-"` // we need to garbage collect buckets when in forensic mode
AcquisitionFiles []string `yaml:"-"`
ParserRoutinesCount int `yaml:"parser_routines"`
BucketsRoutinesCount int `yaml:"buckets_routines"`
OutputRoutinesCount int `yaml:"output_routines"`
SimulationConfig *SimulationConfig `yaml:"-"`
LintOnly bool `yaml:"-"` // if set to true, exit after loading configs
BucketStateFile string `yaml:"state_input_file,omitempty"` // if we need to unserialize buckets at start
BucketStateDumpDir string `yaml:"state_output_dir,omitempty"` // if we need to unserialize buckets on shutdown
BucketsGCEnabled bool `yaml:"-"` // we need to garbage collect buckets when in forensic mode
HubDir string `yaml:"-"`
DataDir string `yaml:"-"`
ConfigDir string `yaml:"-"`
HubIndexFile string `yaml:"-"`
SimulationFilePath string `yaml:"-"`
HubDir string `yaml:"-"`
DataDir string `yaml:"-"`
ConfigDir string `yaml:"-"`
HubIndexFile string `yaml:"-"`
SimulationFilePath string `yaml:"-"`
ContextToSend map[string][]string `yaml:"-"`
}
func (c *Config) LoadCrowdsec() error {
@ -152,5 +155,50 @@ func (c *Config) LoadCrowdsec() error {
return errors.Wrap(err, "while loading hub")
}
c.Crowdsec.ContextToSend = make(map[string][]string, 0)
fallback := false
if c.Crowdsec.ConsoleContextPath == "" {
// fallback to default config file
c.Crowdsec.ConsoleContextPath = filepath.Join(c.Crowdsec.ConfigDir, "console", "context.yaml")
fallback = true
}
f, err := filepath.Abs(c.Crowdsec.ConsoleContextPath)
if err != nil {
return fmt.Errorf("fail to get absolute path of %s: %s", c.Crowdsec.ConsoleContextPath, err)
}
c.Crowdsec.ConsoleContextPath = f
yamlFile, err := os.ReadFile(c.Crowdsec.ConsoleContextPath)
if err != nil {
if fallback {
log.Debugf("Default context config file doesn't exist, will not use it")
} else {
return fmt.Errorf("failed to open context file: %s", err)
}
} else {
err = yaml.Unmarshal(yamlFile, c.Crowdsec.ContextToSend)
if err != nil {
return fmt.Errorf("unmarshaling labels console config file '%s': %s", c.Crowdsec.ConsoleContextPath, err)
}
}
return nil
}
func (c *CrowdsecServiceCfg) DumpContextConfigFile() error {
var out []byte
var err error
if out, err = yaml.Marshal(c.ContextToSend); err != nil {
return errors.Wrapf(err, "while marshaling ConsoleConfig (for %s)", c.ConsoleContextPath)
}
if err := os.WriteFile(c.ConsoleContextPath, out, 0600); err != nil {
return errors.Wrapf(err, "while dumping console config to %s", c.ConsoleContextPath)
}
log.Infof("%s file saved", c.ConsoleContextPath)
return nil
}

View file

@ -33,6 +33,9 @@ func TestLoadCrowdsec(t *testing.T) {
hubIndexFileFullPath, err := filepath.Abs("./hub/.index.json")
require.NoError(t, err)
contextFileFullPath, err := filepath.Abs("./tests/context.yaml")
require.NoError(t, err)
tests := []struct {
name string
input *Config
@ -53,23 +56,30 @@ func TestLoadCrowdsec(t *testing.T) {
},
},
Crowdsec: &CrowdsecServiceCfg{
AcquisitionFilePath: "./tests/acquis.yaml",
SimulationFilePath: "./tests/simulation.yaml",
AcquisitionFilePath: "./tests/acquis.yaml",
SimulationFilePath: "./tests/simulation.yaml",
ConsoleContextPath: "./tests/context.yaml",
ConsoleContextValueLength: 2500,
},
},
expectedResult: &CrowdsecServiceCfg{
Enable: types.BoolPtr(true),
AcquisitionDirPath: "",
AcquisitionFilePath: acquisFullPath,
ConfigDir: configDirFullPath,
DataDir: dataFullPath,
HubDir: hubFullPath,
HubIndexFile: hubIndexFileFullPath,
BucketsRoutinesCount: 1,
ParserRoutinesCount: 1,
OutputRoutinesCount: 1,
AcquisitionFiles: []string{acquisFullPath},
SimulationFilePath: "./tests/simulation.yaml",
Enable: types.BoolPtr(true),
AcquisitionDirPath: "",
ConsoleContextPath: contextFileFullPath,
AcquisitionFilePath: acquisFullPath,
ConfigDir: configDirFullPath,
DataDir: dataFullPath,
HubDir: hubFullPath,
HubIndexFile: hubIndexFileFullPath,
BucketsRoutinesCount: 1,
ParserRoutinesCount: 1,
OutputRoutinesCount: 1,
ConsoleContextValueLength: 2500,
AcquisitionFiles: []string{acquisFullPath},
SimulationFilePath: "./tests/simulation.yaml",
ContextToSend: map[string][]string{
"source_ip": {"evt.Parsed.source_ip"},
},
SimulationConfig: &SimulationConfig{
Simulation: &falseBoolPtr,
},
@ -92,21 +102,27 @@ func TestLoadCrowdsec(t *testing.T) {
AcquisitionFilePath: "./tests/acquis.yaml",
AcquisitionDirPath: "./tests/acquis/",
SimulationFilePath: "./tests/simulation.yaml",
ConsoleContextPath: "./tests/context.yaml",
},
},
expectedResult: &CrowdsecServiceCfg{
Enable: types.BoolPtr(true),
AcquisitionDirPath: acquisDirFullPath,
AcquisitionFilePath: acquisFullPath,
ConfigDir: configDirFullPath,
HubIndexFile: hubIndexFileFullPath,
DataDir: dataFullPath,
HubDir: hubFullPath,
BucketsRoutinesCount: 1,
ParserRoutinesCount: 1,
OutputRoutinesCount: 1,
AcquisitionFiles: []string{acquisFullPath, acquisInDirFullPath},
SimulationFilePath: "./tests/simulation.yaml",
Enable: types.BoolPtr(true),
AcquisitionDirPath: acquisDirFullPath,
AcquisitionFilePath: acquisFullPath,
ConsoleContextPath: contextFileFullPath,
ConfigDir: configDirFullPath,
HubIndexFile: hubIndexFileFullPath,
DataDir: dataFullPath,
HubDir: hubFullPath,
BucketsRoutinesCount: 1,
ParserRoutinesCount: 1,
OutputRoutinesCount: 1,
ConsoleContextValueLength: 0,
AcquisitionFiles: []string{acquisFullPath, acquisInDirFullPath},
ContextToSend: map[string][]string{
"source_ip": {"evt.Parsed.source_ip"},
},
SimulationFilePath: "./tests/simulation.yaml",
SimulationConfig: &SimulationConfig{
Simulation: &falseBoolPtr,
},
@ -125,21 +141,29 @@ func TestLoadCrowdsec(t *testing.T) {
CredentialsFilePath: "./tests/lapi-secrets.yaml",
},
},
Crowdsec: &CrowdsecServiceCfg{},
Crowdsec: &CrowdsecServiceCfg{
ConsoleContextPath: contextFileFullPath,
ConsoleContextValueLength: 10,
},
},
expectedResult: &CrowdsecServiceCfg{
Enable: types.BoolPtr(true),
AcquisitionDirPath: "",
AcquisitionFilePath: "",
ConfigDir: configDirFullPath,
HubIndexFile: hubIndexFileFullPath,
DataDir: dataFullPath,
HubDir: hubFullPath,
BucketsRoutinesCount: 1,
ParserRoutinesCount: 1,
OutputRoutinesCount: 1,
AcquisitionFiles: []string{},
SimulationFilePath: "",
Enable: types.BoolPtr(true),
AcquisitionDirPath: "",
AcquisitionFilePath: "",
ConfigDir: configDirFullPath,
HubIndexFile: hubIndexFileFullPath,
DataDir: dataFullPath,
HubDir: hubFullPath,
ConsoleContextPath: contextFileFullPath,
BucketsRoutinesCount: 1,
ParserRoutinesCount: 1,
OutputRoutinesCount: 1,
ConsoleContextValueLength: 10,
AcquisitionFiles: []string{},
SimulationFilePath: "",
ContextToSend: map[string][]string{
"source_ip": {"evt.Parsed.source_ip"},
},
SimulationConfig: &SimulationConfig{
Simulation: &falseBoolPtr,
},
@ -159,6 +183,7 @@ func TestLoadCrowdsec(t *testing.T) {
},
},
Crowdsec: &CrowdsecServiceCfg{
ConsoleContextPath: "",
AcquisitionFilePath: "./tests/acquis_not_exist.yaml",
},
},

View file

@ -0,0 +1,2 @@
source_ip:
- evt.Parsed.source_ip

View file

@ -7,6 +7,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
logtest "github.com/sirupsen/logrus/hooks/test"
)
func AssertErrorContains(t *testing.T, err error, expectedErr string) {
@ -20,6 +22,21 @@ func AssertErrorContains(t *testing.T, err error, expectedErr string) {
assert.NoError(t, err)
}
func AssertErrorMessage(t *testing.T, err error, expectedErr string) {
t.Helper()
if expectedErr != "" {
errmsg := ""
if err != nil {
errmsg = err.Error()
}
assert.Equal(t, expectedErr, errmsg)
return
}
require.NoError(t, err)
}
func RequireErrorContains(t *testing.T, err error, expectedErr string) {
t.Helper()
@ -31,6 +48,39 @@ func RequireErrorContains(t *testing.T, err error, expectedErr string) {
require.NoError(t, err)
}
func RequireErrorMessage(t *testing.T, err error, expectedErr string) {
t.Helper()
if expectedErr != "" {
errmsg := ""
if err != nil {
errmsg = err.Error()
}
require.Equal(t, expectedErr, errmsg)
return
}
require.NoError(t, err)
}
func RequireLogContains(t *testing.T, hook *logtest.Hook, expected string) {
t.Helper()
// look for a log entry that matches the expected message
for _, entry := range hook.AllEntries() {
if strings.Contains(entry.Message, expected) {
return
}
}
// show all hook entries, in case the test fails we'll need them
for _, entry := range hook.AllEntries() {
t.Logf("log entry: %s", entry.Message)
}
require.Fail(t, "no log entry found with message", expected)
}
// Interpolate fills a string template with the given values, can be map or struct.
// example: Interpolate("{{.Name}}", map[string]string{"Name": "JohnDoe"})
func Interpolate(s string, data interface{}) (string, error) {

View file

@ -14,6 +14,7 @@ import (
"github.com/c-robinson/iplib"
"github.com/crowdsecurity/crowdsec/pkg/cache"
"github.com/crowdsecurity/crowdsec/pkg/database"
"github.com/davecgh/go-spew/spew"
log "github.com/sirupsen/logrus"
@ -68,6 +69,9 @@ func GetExprEnv(ctx map[string]interface{}) map[string]interface{} {
"GetDecisionsCount": GetDecisionsCount,
"GetDecisionsSinceCount": GetDecisionsSinceCount,
"Sprintf": fmt.Sprintf,
"ParseUnix": ParseUnix,
"GetFromStash": cache.GetKey,
"SetInStash": cache.SetKey,
}
for k, v := range ctx {
ExprLib[k] = v
@ -283,10 +287,28 @@ func GetDecisionsSinceCount(value string, since string) int {
}
func LookupHost(value string) []string {
addresses , err := net.LookupHost(value)
addresses, err := net.LookupHost(value)
if err != nil {
log.Errorf("Failed to lookup host '%s' : %s", value, err)
return []string{}
return []string{}
}
return addresses
}
func ParseUnixTime(value string) (time.Time, error) {
//Splitting string here as some unix timestamp may have milliseconds and break ParseInt
i, err := strconv.ParseInt(strings.Split(value, ".")[0], 10, 64)
if err != nil || i <= 0 {
return time.Time{}, fmt.Errorf("unable to parse %s as unix timestamp", value)
}
return time.Unix(i, 0), nil
}
func ParseUnix(value string) string {
t, err := ParseUnixTime(value)
if err != nil {
log.Error(err)
return ""
}
return t.Format(time.RFC3339)
}

View file

@ -12,6 +12,7 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/database"
"github.com/crowdsecurity/crowdsec/pkg/models"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/crowdsecurity/crowdsec/pkg/cstest"
log "github.com/sirupsen/logrus"
"testing"
@ -971,3 +972,47 @@ func TestGetDecisionsSinceCount(t *testing.T) {
log.Printf("test '%s' : OK", test.name)
}
}
func TestParseUnixTime(t *testing.T) {
tests := []struct {
name string
value string
expected time.Time
expectedErr string
}{
{
name: "ParseUnix() test: valid value with milli",
value: "1672239773.3590894",
expected: time.Date(2022, 12, 28, 15, 02, 53, 0, time.UTC),
},
{
name: "ParseUnix() test: valid value without milli",
value: "1672239773",
expected: time.Date(2022, 12, 28, 15, 02, 53, 0, time.UTC),
},
{
name: "ParseUnix() test: invalid input",
value: "AbcDefG!#",
expected: time.Time{},
expectedErr: "unable to parse AbcDefG!# as unix timestamp",
},
{
name: "ParseUnix() test: negative value",
value: "-1000",
expected: time.Time{},
expectedErr: "unable to parse -1000 as unix timestamp",
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
output, err := ParseUnixTime(tc.value)
cstest.RequireErrorContains(t, err, tc.expectedErr)
if tc.expectedErr != "" {
return
}
require.WithinDuration(t, tc.expected, output, time.Second)
})
}
}

19
pkg/fflag/crowdsec.go Normal file
View file

@ -0,0 +1,19 @@
package fflag
var Crowdsec = FeatureRegister{EnvPrefix: "CROWDSEC_FEATURE_"}
var CscliSetup = &Feature{Name: "cscli_setup"}
var DisableHttpRetryBackoff = &Feature{Name: "disable_http_retry_backoff", Description: "Disable http retry backoff"}
func RegisterAllFeatures() error {
err := Crowdsec.RegisterFeature(CscliSetup)
if err != nil {
return err
}
err = Crowdsec.RegisterFeature(DisableHttpRetryBackoff)
if err != nil {
return err
}
return nil
}

264
pkg/fflag/features.go Normal file
View file

@ -0,0 +1,264 @@
// Package fflag provides a simple feature flag system.
//
// Feature names are lowercase and can only contain letters, numbers, undercores
// and dots.
//
// good: "foo", "foo_bar", "foo.bar"
// bad: "Foo", "foo-bar"
//
// A feature flag can be enabled by the user with an environment variable
// or by adding it to {ConfigDir}/feature.yaml
//
// I.e. CROWDSEC_FEATURE_FOO_BAR=true
// or in feature.yaml:
// ---
// - foo_bar
//
// If the variable is set to false, the feature can still be enabled
// in feature.yaml. Features cannot be disabled in the file.
//
// A feature flag can be deprecated or retired. A deprecated feature flag is
// still accepted but a warning is logged. A retired feature flag is ignored
// and an error is logged.
//
// A specific deprecation message is used to inform the user of the behavior
// that has been decided when the flag is/was finally retired.
package fflag
import (
"errors"
"fmt"
"io"
"os"
"regexp"
"sort"
"strings"
"github.com/goccy/go-yaml"
"github.com/sirupsen/logrus"
)
var (
ErrFeatureNameEmpty = errors.New("name is empty")
ErrFeatureNameCase = errors.New("name is not lowercase")
ErrFeatureNameInvalid = errors.New("invalid name (allowed a-z, 0-9, _, .)")
ErrFeatureUnknown = errors.New("unknown feature")
ErrFeatureDeprecated = errors.New("the flag is deprecated")
ErrFeatureRetired = errors.New("the flag is retired")
)
const (
ActiveState = iota // the feature can be enabled, and its description is logged (Info)
DeprecatedState // the feature can be enabled, and a deprecation message is logged (Warning)
RetiredState // the feature is ignored and a deprecation message is logged (Error)
)
type Feature struct {
Name string
State int // active, deprecated, retired
// Description should be a short sentence, explaining the feature.
Description string
// DeprecationMessage is used to inform the user of the behavior that has
// been decided when the flag is/was finally retired.
DeprecationMsg string
enabled bool
}
func (f *Feature) IsEnabled() bool {
return f.enabled
}
// Set enables or disables a feature flag
// It should not be called directly by the user, but by SetFromEnv or SetFromYaml
func (f *Feature) Set(value bool) error {
// retired feature flags are ignored
if f.State == RetiredState {
return ErrFeatureRetired
}
f.enabled = value
// deprecated feature flags are still accepted, but a warning is triggered.
// We return an error but set the feature anyway.
if f.State == DeprecatedState {
return ErrFeatureDeprecated
}
return nil
}
// A register allows to enable features from the environment or a file
type FeatureRegister struct {
EnvPrefix string
features map[string]*Feature
}
var featureNameRexp = regexp.MustCompile(`^[a-z0-9_\.]+$`)
func validateFeatureName(featureName string) error {
if featureName == "" {
return ErrFeatureNameEmpty
}
if featureName != strings.ToLower(featureName) {
return ErrFeatureNameCase
}
if !featureNameRexp.MatchString(featureName) {
return ErrFeatureNameInvalid
}
return nil
}
func (fr *FeatureRegister) RegisterFeature(feat *Feature) error {
if err := validateFeatureName(feat.Name); err != nil {
return fmt.Errorf("feature flag '%s': %w", feat.Name, err)
}
if fr.features == nil {
fr.features = make(map[string]*Feature)
}
fr.features[feat.Name] = feat
return nil
}
func (fr *FeatureRegister) GetFeature(featureName string) (*Feature, error) {
feat, ok := fr.features[featureName]
if !ok {
return feat, ErrFeatureUnknown
}
return feat, nil
}
func (fr *FeatureRegister) SetFromEnv(logger *logrus.Logger) error {
for _, e := range os.Environ() {
// ignore non-feature variables
if !strings.HasPrefix(e, fr.EnvPrefix) {
continue
}
// extract feature name and value
pair := strings.SplitN(e, "=", 2)
varName := pair[0]
featureName := strings.ToLower(varName[len(fr.EnvPrefix):])
value := pair[1]
var enable bool
switch value {
case "true":
enable = true
case "false":
enable = false
default:
logger.Errorf("Ignored envvar %s=%s: invalid value (must be 'true' or 'false')", varName, value)
continue
}
feat, err := fr.GetFeature(featureName)
if err != nil {
logger.Errorf("Ignored envvar '%s': %s.", varName, err)
continue
}
err = feat.Set(enable)
switch {
case errors.Is(err, ErrFeatureRetired):
logger.Errorf("Ignored envvar '%s': %s. %s", varName, err, feat.DeprecationMsg)
continue
case errors.Is(err, ErrFeatureDeprecated):
logger.Warningf("Envvar '%s': %s. %s", varName, err, feat.DeprecationMsg)
case err != nil:
return err
}
logger.Infof("Feature flag: %s=%t (from envvar). %s", featureName, enable, feat.Description)
}
return nil
}
func (fr *FeatureRegister) SetFromYaml(r io.Reader, logger *logrus.Logger) error {
var cfg []string
bys, err := io.ReadAll(r)
if err != nil {
return err
}
// parse config file
if err := yaml.Unmarshal(bys, &cfg); err != nil {
if !errors.Is(err, io.EOF) {
return fmt.Errorf("failed to parse feature flags: %w", err)
}
logger.Debug("No feature flags in config file")
}
// set features
for _, k := range cfg {
feat, err := fr.GetFeature(k)
if err != nil {
logger.Errorf("Ignored feature flag '%s': %s", k, err)
continue
}
err = feat.Set(true)
switch {
case errors.Is(err, ErrFeatureRetired):
logger.Errorf("Ignored feature flag '%s': %s. %s", k, err, feat.DeprecationMsg)
continue
case errors.Is(err, ErrFeatureDeprecated):
logger.Warningf("Feature '%s': %s. %s", k, err, feat.DeprecationMsg)
case err != nil:
return err
}
logger.Infof("Feature flag: %s=true (from config file). %s", k, feat.Description)
}
return nil
}
func (fr *FeatureRegister) SetFromYamlFile(path string, logger *logrus.Logger) error {
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
logger.Debugf("Feature flags config file '%s' does not exist", path)
return nil
}
return fmt.Errorf("failed to open feature flags file: %w", err)
}
defer f.Close()
logger.Debugf("Reading feature flags from %s", path)
return fr.SetFromYaml(f, logger)
}
// GetEnabledFeatures returns the list of features that have been enabled by the user
func (fr *FeatureRegister) GetEnabledFeatures() []string {
ret := make([]string, 0)
for k, feat := range fr.features {
if feat.IsEnabled() {
ret = append(ret, k)
}
}
sort.Strings(ret)
return ret
}

397
pkg/fflag/features_test.go Normal file
View file

@ -0,0 +1,397 @@
package fflag_test
import (
"os"
"strings"
"testing"
"github.com/sirupsen/logrus"
logtest "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/require"
"github.com/crowdsecurity/crowdsec/pkg/cstest"
"github.com/crowdsecurity/crowdsec/pkg/fflag"
)
func TestRegisterFeature(t *testing.T) {
tests := []struct {
name string
feature fflag.Feature
expectedErr string
}{
{
name: "a plain feature",
feature: fflag.Feature{
Name: "plain",
},
},
{
name: "capitalized feature name",
feature: fflag.Feature{
Name: "Plain",
},
expectedErr: "feature flag 'Plain': name is not lowercase",
},
{
name: "empty feature name",
feature: fflag.Feature{
Name: "",
},
expectedErr: "feature flag '': name is empty",
},
{
name: "invalid feature name",
feature: fflag.Feature{
Name: "meh!",
},
expectedErr: "feature flag 'meh!': invalid name (allowed a-z, 0-9, _, .)",
},
}
for _, tc := range tests {
tc := tc
t.Run("", func(t *testing.T) {
fr := fflag.FeatureRegister{EnvPrefix: "FFLAG_TEST_"}
err := fr.RegisterFeature(&tc.feature)
cstest.RequireErrorContains(t, err, tc.expectedErr)
})
}
}
func setUp(t *testing.T) fflag.FeatureRegister {
t.Helper()
fr := fflag.FeatureRegister{EnvPrefix: "FFLAG_TEST_"}
err := fr.RegisterFeature(&fflag.Feature{Name: "experimental1"})
require.NoError(t, err)
err = fr.RegisterFeature(&fflag.Feature{
Name: "some_feature",
Description: "A feature that does something, with a description",
})
require.NoError(t, err)
err = fr.RegisterFeature(&fflag.Feature{
Name: "new_standard",
State: fflag.DeprecatedState,
Description: "This implements the new standard T34.256w",
DeprecationMsg: "In 2.0 we'll do T34.256w by default",
})
require.NoError(t, err)
err = fr.RegisterFeature(&fflag.Feature{
Name: "was_adopted",
State: fflag.RetiredState,
Description: "This implements a new tricket",
DeprecationMsg: "The trinket was implemented in 1.5",
})
require.NoError(t, err)
return fr
}
func TestGetFeature(t *testing.T) {
tests := []struct {
name string
feature string
expectedErr string
}{
{
name: "just a feature",
feature: "experimental1",
}, {
name: "feature that does not exist",
feature: "will_never_exist",
expectedErr: "unknown feature",
},
}
fr := setUp(t)
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
_, err := fr.GetFeature(tc.feature)
cstest.RequireErrorMessage(t, err, tc.expectedErr)
if tc.expectedErr != "" {
return
}
})
}
}
func TestIsEnabled(t *testing.T) {
tests := []struct {
name string
feature string
enable bool
expected bool
}{
{
name: "feature that was not enabled",
feature: "experimental1",
expected: false,
}, {
name: "feature that was enabled",
feature: "experimental1",
enable: true,
expected: true,
},
}
fr := setUp(t)
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
feat, err := fr.GetFeature(tc.feature)
require.NoError(t, err)
err = feat.Set(tc.enable)
require.NoError(t, err)
require.Equal(t, tc.expected, feat.IsEnabled())
})
}
}
func TestFeatureSet(t *testing.T) {
tests := []struct {
name string // test description
feature string // feature name
value bool // value for SetFeature
expected bool // expected value from IsEnabled
expectedSetErr string // error expected from SetFeature
expectedGetErr string // error expected from GetFeature
}{
{
name: "enable a feature to try something new",
feature: "experimental1",
value: true,
expected: true,
}, {
// not useful in practice, unlikely to happen
name: "disable the feature that was enabled",
feature: "experimental1",
value: false,
expected: false,
}, {
name: "enable a feature that will be retired in v2",
feature: "new_standard",
value: true,
expected: true,
expectedSetErr: "the flag is deprecated",
}, {
name: "enable a feature that was retired in v1.5",
feature: "was_adopted",
value: true,
expected: false,
expectedSetErr: "the flag is retired",
}, {
name: "enable a feature that does not exist",
feature: "will_never_exist",
value: true,
expectedSetErr: "unknown feature",
expectedGetErr: "unknown feature",
},
}
// the tests are not indepedent because we don't instantiate a feature
// map for each one, but it simplified the code
fr := setUp(t)
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
feat, err := fr.GetFeature(tc.feature)
cstest.RequireErrorMessage(t, err, tc.expectedGetErr)
if tc.expectedGetErr != "" {
return
}
err = feat.Set(tc.value)
cstest.RequireErrorMessage(t, err, tc.expectedSetErr)
require.Equal(t, tc.expected, feat.IsEnabled())
})
}
}
func TestSetFromEnv(t *testing.T) {
tests := []struct {
name string
envvar string
value string
// expected bool
expectedLog []string
expectedErr string
}{
{
name: "variable that does not start with FFLAG_TEST_",
envvar: "PATH",
value: "/bin:/usr/bin/:/usr/local/bin",
// silently ignored
}, {
name: "enable a feature flag",
envvar: "FFLAG_TEST_EXPERIMENTAL1",
value: "true",
expectedLog: []string{"Feature flag: experimental1=true (from envvar)"},
}, {
name: "invalid value (not true or false)",
envvar: "FFLAG_TEST_EXPERIMENTAL1",
value: "maybe",
expectedLog: []string{"Ignored envvar FFLAG_TEST_EXPERIMENTAL1=maybe: invalid value (must be 'true' or 'false')"},
}, {
name: "feature flag that is unknown",
envvar: "FFLAG_TEST_WILL_NEVER_EXIST",
value: "true",
expectedLog: []string{"Ignored envvar 'FFLAG_TEST_WILL_NEVER_EXIST': unknown feature"},
}, {
name: "enable a feature flag with a description",
envvar: "FFLAG_TEST_SOME_FEATURE",
value: "true",
expectedLog: []string{
"Feature flag: some_feature=true (from envvar). A feature that does something, with a description",
},
}, {
name: "enable a deprecated feature",
envvar: "FFLAG_TEST_NEW_STANDARD",
value: "true",
expectedLog: []string{
"Envvar 'FFLAG_TEST_NEW_STANDARD': the flag is deprecated. In 2.0 we'll do T34.256w by default",
"Feature flag: new_standard=true (from envvar). This implements the new standard T34.256w",
},
}, {
name: "enable a feature that was retired in v1.5",
envvar: "FFLAG_TEST_WAS_ADOPTED",
value: "true",
expectedLog: []string{
"Ignored envvar 'FFLAG_TEST_WAS_ADOPTED': the flag is retired. " +
"The trinket was implemented in 1.5",
},
}, {
// this could happen in theory, but only if environment variables
// are parsed after configuration files, which is not a good idea
// because they are more useful asap
name: "disable a feature flag already set",
envvar: "FFLAG_TEST_EXPERIMENTAL1",
value: "false",
},
}
fr := setUp(t)
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
logger, hook := logtest.NewNullLogger()
logger.SetLevel(logrus.InfoLevel)
t.Setenv(tc.envvar, tc.value)
err := fr.SetFromEnv(logger)
cstest.RequireErrorMessage(t, err, tc.expectedErr)
for _, expectedMessage := range tc.expectedLog {
cstest.RequireLogContains(t, hook, expectedMessage)
}
})
}
}
func TestSetFromYaml(t *testing.T) {
tests := []struct {
name string
yml string
expectedLog []string
expectedErr string
}{
{
name: "empty file",
yml: "",
// no error
}, {
name: "invalid yaml",
yml: "bad! content, bad!",
expectedErr: "failed to parse feature flags: [1:1] string was used where sequence is expected\n > 1 | bad! content, bad!\n ^",
}, {
name: "invalid feature flag name",
yml: "- not_a_feature",
expectedLog: []string{"Ignored feature flag 'not_a_feature': unknown feature"},
}, {
name: "invalid value (must be a list)",
yml: "experimental1: true",
expectedErr: "failed to parse feature flags: [1:14] value was used where sequence is expected\n > 1 | experimental1: true\n ^",
}, {
name: "enable a feature flag",
yml: "- experimental1",
expectedLog: []string{"Feature flag: experimental1=true (from config file)"},
}, {
name: "enable a deprecated feature",
yml: "- new_standard",
expectedLog: []string{
"Feature 'new_standard': the flag is deprecated. In 2.0 we'll do T34.256w by default",
"Feature flag: new_standard=true (from config file). This implements the new standard T34.256w",
},
}, {
name: "enable a retired feature",
yml: "- was_adopted",
expectedLog: []string{
"Ignored feature flag 'was_adopted': the flag is retired. The trinket was implemented in 1.5",
},
},
}
fr := setUp(t)
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
logger, hook := logtest.NewNullLogger()
logger.SetLevel(logrus.InfoLevel)
err := fr.SetFromYaml(strings.NewReader(tc.yml), logger)
cstest.RequireErrorMessage(t, err, tc.expectedErr)
for _, expectedMessage := range tc.expectedLog {
cstest.RequireLogContains(t, hook, expectedMessage)
}
})
}
}
func TestSetFromYamlFile(t *testing.T) {
tmpfile, err := os.CreateTemp("", "test")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
// write the config file
_, err = tmpfile.Write([]byte("- experimental1"))
require.NoError(t, err)
require.NoError(t, tmpfile.Close())
fr := setUp(t)
logger, hook := logtest.NewNullLogger()
logger.SetLevel(logrus.InfoLevel)
err = fr.SetFromYamlFile(tmpfile.Name(), logger)
require.NoError(t, err)
cstest.RequireLogContains(t, hook, "Feature flag: experimental1=true (from config file)")
}
func TestGetEnabledFeatures(t *testing.T) {
fr := setUp(t)
feat1, err := fr.GetFeature("new_standard")
require.NoError(t, err)
feat1.Set(true)
feat2, err := fr.GetFeature("experimental1")
require.NoError(t, err)
feat2.Set(true)
expected := []string{
"experimental1",
"new_standard",
}
require.Equal(t, expected, fr.GetEnabledFeatures())
}

View file

@ -78,6 +78,7 @@ func (p *ParserAssert) LoadTest(filename string) error {
}
func (p *ParserAssert) AssertFile(testFile string) error {
file, err := os.Open(p.File)
if err != nil {
@ -268,6 +269,32 @@ func LoadParserDump(filepath string) (*ParserResults, error) {
if err := yaml.Unmarshal(results, &pdump); err != nil {
return nil, err
}
/* we know that some variables should always be set,
let's check if they're present in last parser output of last stage */
stages := make([]string, 0, len(pdump))
for k := range pdump {
stages = append(stages, k)
}
sort.Strings(stages)
/*the very last one is set to 'success' which is just a bool indicating if the line was successfully parsed*/
lastStage := stages[len(stages)-2]
parsers := make([]string, 0, len(pdump[lastStage]))
for k := range pdump[lastStage] {
parsers = append(parsers, k)
}
sort.Strings(parsers)
lastParser := parsers[len(parsers)-1]
for idx, result := range pdump[lastStage][lastParser] {
if result.Evt.StrTime == "" {
log.Warningf("Line %d/%d is missing evt.StrTime. It is most likely a mistake as it will prevent your logs to be processed in time-machine/forensic mode.", idx, len(pdump[lastStage][lastParser]))
} else {
log.Debugf("Line %d/%d has evt.StrTime set to '%s'", idx, len(pdump[lastStage][lastParser]), result.Evt.StrTime)
}
}
return &pdump, nil
}

View file

@ -61,16 +61,17 @@ type Leaky struct {
Duration time.Duration
Pour func(*Leaky, types.Event) `json:"-"`
//Profiling when set to true enables profiling of bucket
Profiling bool
timedOverflow bool
logger *log.Entry
scopeType types.ScopeType
hash string
scenarioVersion string
tomb *tomb.Tomb
wgPour *sync.WaitGroup
wgDumpState *sync.WaitGroup
mutex *sync.Mutex //used only for TIMEMACHINE mode to allow garbage collection without races
Profiling bool
timedOverflow bool
conditionalOverflow bool
logger *log.Entry
scopeType types.ScopeType
hash string
scenarioVersion string
tomb *tomb.Tomb
wgPour *sync.WaitGroup
wgDumpState *sync.WaitGroup
mutex *sync.Mutex //used only for TIMEMACHINE mode to allow garbage collection without races
}
var BucketsPour = prometheus.NewCounterVec(
@ -188,6 +189,10 @@ func FromFactory(bucketFactory BucketFactory) *Leaky {
l.timedOverflow = true
}
if l.BucketConfig.Type == "conditional" {
l.conditionalOverflow = true
l.Duration = l.BucketConfig.leakspeed
}
return l
}
@ -247,6 +252,14 @@ func LeakRoutine(leaky *Leaky) error {
BucketsPour.With(prometheus.Labels{"name": leaky.Name, "source": msg.Line.Src, "type": msg.Line.Module}).Inc()
leaky.Pour(leaky, *msg) // glue for now
for _, processor := range processors {
msg = processor.AfterBucketPour(leaky.BucketConfig)(*msg, leaky)
if msg == nil {
goto End
}
}
//Clear cache on behalf of pour
// if durationTicker isn't initialized, then we're pouring our first event
@ -337,7 +350,8 @@ func Pour(leaky *Leaky, msg types.Event) {
leaky.First_ts = time.Now().UTC()
}
leaky.Last_ts = time.Now().UTC()
if leaky.Limiter.Allow() {
if leaky.Limiter.Allow() || leaky.conditionalOverflow {
leaky.Queue.Add(msg)
} else {
leaky.Ovflw_ts = time.Now().UTC()

View file

@ -64,8 +64,8 @@ func TestBucket(t *testing.T) {
}
}
//during tests, we're likely to have only one scenario, and thus only one holder.
//we want to avoid the death of the tomb because all existing buckets have been destroyed.
// during tests, we're likely to have only one scenario, and thus only one holder.
// we want to avoid the death of the tomb because all existing buckets have been destroyed.
func watchTomb(tomb *tomb.Tomb) {
for {
if tomb.Alive() == false {

View file

@ -0,0 +1,61 @@
package leakybucket
import (
"fmt"
"time"
"github.com/antonmedv/expr"
"github.com/antonmedv/expr/vm"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
type ConditionalOverflow struct {
ConditionalFilter string
ConditionalFilterRuntime *vm.Program
DumbProcessor
}
func NewConditionalOverflow(g *BucketFactory) (*ConditionalOverflow, error) {
var err error
c := ConditionalOverflow{}
c.ConditionalFilter = g.ConditionalOverflow
c.ConditionalFilterRuntime, err = expr.Compile(c.ConditionalFilter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{
"queue": &Queue{}, "leaky": &Leaky{}})))
if err != nil {
g.logger.Errorf("Unable to compile condition expression for conditional bucket : %s", err)
return nil, fmt.Errorf("unable to compile condition expression for conditional bucket : %v", err)
}
return &c, nil
}
func (c *ConditionalOverflow) AfterBucketPour(b *BucketFactory) func(types.Event, *Leaky) *types.Event {
return func(msg types.Event, l *Leaky) *types.Event {
var condition, ok bool
if c.ConditionalFilterRuntime != nil {
l.logger.Debugf("Running condition expression : %s", c.ConditionalFilter)
ret, err := expr.Run(c.ConditionalFilterRuntime, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &msg, "queue": l.Queue, "leaky": l}))
if err != nil {
l.logger.Errorf("unable to run conditional filter : %s", err)
return &msg
}
l.logger.Debugf("Conditional bucket expression returned : %v", ret)
if condition, ok = ret.(bool); !ok {
l.logger.Warningf("overflow condition, unexpected non-bool return : %T", ret)
return &msg
}
if condition {
l.logger.Debugf("Conditional bucket overflow")
l.Ovflw_ts = time.Now().UTC()
l.Out <- l.Queue
return nil
}
}
return &msg
}
}

View file

@ -11,6 +11,8 @@ import (
"sync"
"time"
"github.com/crowdsecurity/crowdsec/pkg/alertcontext"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
@ -32,49 +34,50 @@ import (
// BucketFactory struct holds all fields for any bucket configuration. This is to have a
// generic struct for buckets. This can be seen as a bucket factory.
type BucketFactory struct {
FormatVersion string `yaml:"format"`
Author string `yaml:"author"`
Description string `yaml:"description"`
References []string `yaml:"references"`
Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics
Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique
Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity
LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket
Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time
Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct
GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip
Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result)
Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically
Labels map[string]string `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow
Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration
logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well)
Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain
CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket
Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc.
OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through
ScopeType types.ScopeType `yaml:"scope,omitempty"` //to enforce a different remediation than blocking an IP. Will default this to IP
BucketName string `yaml:"-"`
Filename string `yaml:"-"`
RunTimeFilter *vm.Program `json:"-"`
ExprDebugger *exprhelpers.ExprDebugger `yaml:"-" json:"-"` // used to debug expression by printing the content of each variable of the expression
RunTimeGroupBy *vm.Program `json:"-"`
Data []*types.DataSource `yaml:"data,omitempty"`
DataDir string `yaml:"-"`
CancelOnFilter string `yaml:"cancel_on,omitempty"` //a filter that, if matched, kills the bucket
leakspeed time.Duration //internal representation of `Leakspeed`
duration time.Duration //internal representation of `Duration`
ret chan types.Event //the bucket-specific output chan for overflows
processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.)
output bool //??
ScenarioVersion string `yaml:"version,omitempty"`
hash string `yaml:"-"`
Simulated bool `yaml:"simulated"` //Set to true if the scenario instantiating the bucket was in the exclusion list
tomb *tomb.Tomb `yaml:"-"`
wgPour *sync.WaitGroup `yaml:"-"`
wgDumpState *sync.WaitGroup `yaml:"-"`
FormatVersion string `yaml:"format"`
Author string `yaml:"author"`
Description string `yaml:"description"`
References []string `yaml:"references"`
Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics
Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique
Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity
LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket
Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time
Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct
GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip
Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result)
Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically
Labels map[string]string `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow
Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration
logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well)
Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain
CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket
Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc.
OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through
ConditionalOverflow string `yaml:"condition"` //condition if present, is an expression that must return true for the bucket to overflow
ScopeType types.ScopeType `yaml:"scope,omitempty"` //to enforce a different remediation than blocking an IP. Will default this to IP
BucketName string `yaml:"-"`
Filename string `yaml:"-"`
RunTimeFilter *vm.Program `json:"-"`
ExprDebugger *exprhelpers.ExprDebugger `yaml:"-" json:"-"` // used to debug expression by printing the content of each variable of the expression
RunTimeGroupBy *vm.Program `json:"-"`
Data []*types.DataSource `yaml:"data,omitempty"`
DataDir string `yaml:"-"`
CancelOnFilter string `yaml:"cancel_on,omitempty"` //a filter that, if matched, kills the bucket
leakspeed time.Duration //internal representation of `Leakspeed`
duration time.Duration //internal representation of `Duration`
ret chan types.Event //the bucket-specific output chan for overflows
processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.)
output bool //??
ScenarioVersion string `yaml:"version,omitempty"`
hash string `yaml:"-"`
Simulated bool `yaml:"simulated"` //Set to true if the scenario instantiating the bucket was in the exclusion list
tomb *tomb.Tomb `yaml:"-"`
wgPour *sync.WaitGroup `yaml:"-"`
wgDumpState *sync.WaitGroup `yaml:"-"`
}
//we use one NameGenerator for all the future buckets
// we use one NameGenerator for all the future buckets
var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano())
func ValidateFactory(bucketFactory *BucketFactory) error {
@ -96,7 +99,7 @@ func ValidateFactory(bucketFactory *BucketFactory) error {
}
} else if bucketFactory.Type == "counter" {
if bucketFactory.Duration == "" {
return fmt.Errorf("duration ca't be empty for counter")
return fmt.Errorf("duration can't be empty for counter")
}
if bucketFactory.duration == 0 {
return fmt.Errorf("bad duration for counter bucket '%d'", bucketFactory.duration)
@ -108,6 +111,19 @@ func ValidateFactory(bucketFactory *BucketFactory) error {
if bucketFactory.Capacity != 0 {
return fmt.Errorf("trigger bucket must have 0 capacity")
}
} else if bucketFactory.Type == "conditional" {
if bucketFactory.ConditionalOverflow == "" {
return fmt.Errorf("conditional bucket must have a condition")
}
if bucketFactory.Capacity != -1 {
bucketFactory.logger.Warnf("Using a value different than -1 as capacity for conditional bucket, this may lead to unexpected overflows")
}
if bucketFactory.LeakSpeed == "" {
return fmt.Errorf("leakspeed can't be empty for conditional bucket")
}
if bucketFactory.leakspeed == 0 {
return fmt.Errorf("bad leakspeed for conditional bucket '%s'", bucketFactory.LeakSpeed)
}
} else {
return fmt.Errorf("unknown bucket type '%s'", bucketFactory.Type)
}
@ -225,6 +241,11 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, files []string, tomb *tomb.
ret = append(ret, bucketFactory)
}
}
if err := alertcontext.NewAlertContext(cscfg.ContextToSend, cscfg.ConsoleContextValueLength); err != nil {
return nil, nil, fmt.Errorf("unable to load alert context: %s", err)
}
log.Warningf("Loaded %d scenarios", len(ret))
return ret, response, nil
}
@ -297,6 +318,8 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
bucketFactory.processors = append(bucketFactory.processors, &Trigger{})
case "counter":
bucketFactory.processors = append(bucketFactory.processors, &DumbProcessor{})
case "conditional":
bucketFactory.processors = append(bucketFactory.processors, &DumbProcessor{})
default:
return fmt.Errorf("invalid type '%s' in %s : %v", bucketFactory.Type, bucketFactory.Filename, err)
}
@ -331,6 +354,16 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
bucketFactory.processors = append(bucketFactory.processors, blackhole)
}
if bucketFactory.ConditionalOverflow != "" {
bucketFactory.logger.Tracef("Adding conditional overflow.")
condovflw, err := NewConditionalOverflow(bucketFactory)
if err != nil {
bucketFactory.logger.Errorf("Error creating conditional overflow : %s", err)
return fmt.Errorf("error creating conditional overflow : %s", err)
}
bucketFactory.processors = append(bucketFactory.processors, condovflw)
}
if len(bucketFactory.Data) > 0 {
for _, data := range bucketFactory.Data {
if data.DestPath == "" {
@ -349,6 +382,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
return fmt.Errorf("invalid bucket from %s : %v", bucketFactory.Filename, err)
}
bucketFactory.tomb = tomb
return nil
}

View file

@ -6,6 +6,7 @@ import (
"sort"
"strconv"
"github.com/crowdsecurity/crowdsec/pkg/alertcontext"
"github.com/crowdsecurity/crowdsec/pkg/models"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/davecgh/go-spew/spew"
@ -17,7 +18,7 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
)
//SourceFromEvent extracts and formats a valid models.Source object from an Event
// SourceFromEvent extracts and formats a valid models.Source object from an Event
func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, error) {
srcs := make(map[string]models.Source)
/*if it's already an overflow, we have properly formatted sources.
@ -160,7 +161,7 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e
return srcs, nil
}
//EventsFromQueue iterates the queue to collect & prepare meta-datas from alert
// EventsFromQueue iterates the queue to collect & prepare meta-datas from alert
func EventsFromQueue(queue *Queue) []*models.Event {
events := []*models.Event{}
@ -207,7 +208,7 @@ func EventsFromQueue(queue *Queue) []*models.Event {
return events
}
//alertFormatSource iterates over the queue to collect sources
// alertFormatSource iterates over the queue to collect sources
func alertFormatSource(leaky *Leaky, queue *Queue) (map[string]models.Source, string, error) {
var sources map[string]models.Source = make(map[string]models.Source)
var source_type string
@ -233,7 +234,7 @@ func alertFormatSource(leaky *Leaky, queue *Queue) (map[string]models.Source, st
return sources, source_type, nil
}
//NewAlert will generate a RuntimeAlert and its APIAlert(s) from a bucket that overflowed
// NewAlert will generate a RuntimeAlert and its APIAlert(s) from a bucket that overflowed
func NewAlert(leaky *Leaky, queue *Queue) (types.RuntimeAlert, error) {
var runtimeAlert types.RuntimeAlert
@ -293,6 +294,11 @@ func NewAlert(leaky *Leaky, queue *Queue) (types.RuntimeAlert, error) {
*apiAlert.Message = fmt.Sprintf("%s %s performed '%s' (%d events over %s) at %s", source_scope, sourceStr, leaky.Name, leaky.Total_count, leaky.Ovflw_ts.Sub(leaky.First_ts), leaky.Last_ts)
//Get the events from Leaky/Queue
apiAlert.Events = EventsFromQueue(queue)
var warnings []error
apiAlert.Meta, warnings = alertcontext.EventToContext(leaky.Queue.GetQueue())
for _, w := range warnings {
log.Warningf("while extracting context from bucket %s : %s", leaky.Name, w)
}
//Loop over the Sources and generate appropriate number of ApiAlerts
for _, srcValue := range sources {

View file

@ -6,6 +6,8 @@ type Processor interface {
OnBucketInit(Bucket *BucketFactory) error
OnBucketPour(Bucket *BucketFactory) func(types.Event, *Leaky) *types.Event
OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue)
AfterBucketPour(Bucket *BucketFactory) func(types.Event, *Leaky) *types.Event
}
type DumbProcessor struct {
@ -25,5 +27,10 @@ func (d *DumbProcessor) OnBucketOverflow(b *BucketFactory) func(*Leaky, types.Ru
return func(leaky *Leaky, alert types.RuntimeAlert, queue *Queue) (types.RuntimeAlert, *Queue) {
return alert, queue
}
}
func (d *DumbProcessor) AfterBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event {
return func(msg types.Event, leaky *Leaky) *types.Event {
return &msg
}
}

View file

@ -64,6 +64,12 @@ func (u *CancelOnFilter) OnBucketOverflow(bucketFactory *BucketFactory) func(*Le
}
}
func (u *CancelOnFilter) AfterBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event {
return func(msg types.Event, leaky *Leaky) *types.Event {
return &msg
}
}
func (u *CancelOnFilter) OnBucketInit(bucketFactory *BucketFactory) error {
var err error
var compiledExpr struct {

View file

@ -0,0 +1,11 @@
type: conditional
name: test/conditional
#debug: true
description: "conditional bucket"
filter: "evt.Meta.log_type == 'http_access-log'"
groupby: evt.Meta.source_ip
condition: any(queue.Queue, {.Meta.http_path == "/"}) and any(queue.Queue, {.Meta.http_path == "/foo"})
leakspeed: 1s
capacity: -1
labels:
type: overflow_1

View file

@ -0,0 +1 @@
- filename: {{.TestDirectory}}/bucket.yaml

View file

@ -0,0 +1,50 @@
{
"lines": [
{
"Line": {
"Labels": {
"type": "nginx"
},
"Raw": "don't care"
},
"MarshaledTime": "2020-01-01T10:00:00.000Z",
"Meta": {
"source_ip": "2a00:1450:4007:816::200e",
"log_type": "http_access-log",
"http_path": "/"
}
},
{
"Line": {
"Labels": {
"type": "nginx"
},
"Raw": "don't care"
},
"MarshaledTime": "2020-01-01T10:00:00.000Z",
"Meta": {
"source_ip": "2a00:1450:4007:816::200e",
"log_type": "http_access-log",
"http_path": "/foo"
}
}
],
"results": [
{
"Type" : 1,
"Alert": {
"sources" : {
"2a00:1450:4007:816::200e": {
"ip": "2a00:1450:4007:816::200e",
"scope": "Ip",
"value": "2a00:1450:4007:816::200e"
}
},
"Alert" : {
"scenario": "test/conditional",
"events_count": 2
}
}
}
]
}

View file

@ -4,7 +4,6 @@ import (
"time"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/davecgh/go-spew/spew"
log "github.com/sirupsen/logrus"
)
@ -14,7 +13,11 @@ func TimeMachinePour(l *Leaky, msg types.Event) {
err error
)
if msg.MarshaledTime == "" {
log.Warningf("Trying to time-machine event without timestamp : %s", spew.Sdump(msg))
log.WithFields(log.Fields{
"evt_type": msg.Line.Labels["type"],
"evt_src": msg.Line.Src,
"scenario": l.Name,
}).Warningf("Trying to process event without evt.StrTime. Event cannot be poured to scenario")
return
}

View file

@ -53,6 +53,12 @@ func (u *Uniq) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, types
}
}
func (u *Uniq) AfterBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event {
return func(msg types.Event, leaky *Leaky) *types.Event {
return &msg
}
}
func (u *Uniq) OnBucketInit(bucketFactory *BucketFactory) error {
var err error
var compiledExpr *vm.Program

View file

@ -7,6 +7,7 @@ package models
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
@ -19,6 +20,12 @@ import (
// swagger:model AddSignalsRequestItem
type AddSignalsRequestItem struct {
// alert id
AlertID int64 `json:"alert_id,omitempty"`
// context
Context []*AddSignalsRequestItemContextItems0 `json:"context"`
// created at
CreatedAt string `json:"created_at,omitempty"`
@ -38,8 +45,7 @@ type AddSignalsRequestItem struct {
ScenarioHash *string `json:"scenario_hash"`
// scenario trust
// Required: true
ScenarioTrust *string `json:"scenario_trust"`
ScenarioTrust string `json:"scenario_trust,omitempty"`
// scenario version
// Required: true
@ -62,6 +68,10 @@ type AddSignalsRequestItem struct {
func (m *AddSignalsRequestItem) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateContext(formats); err != nil {
res = append(res, err)
}
if err := m.validateMessage(formats); err != nil {
res = append(res, err)
}
@ -74,10 +84,6 @@ func (m *AddSignalsRequestItem) Validate(formats strfmt.Registry) error {
res = append(res, err)
}
if err := m.validateScenarioTrust(formats); err != nil {
res = append(res, err)
}
if err := m.validateScenarioVersion(formats); err != nil {
res = append(res, err)
}
@ -100,6 +106,32 @@ func (m *AddSignalsRequestItem) Validate(formats strfmt.Registry) error {
return nil
}
func (m *AddSignalsRequestItem) validateContext(formats strfmt.Registry) error {
if swag.IsZero(m.Context) { // not required
return nil
}
for i := 0; i < len(m.Context); i++ {
if swag.IsZero(m.Context[i]) { // not required
continue
}
if m.Context[i] != nil {
if err := m.Context[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("context" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("context" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *AddSignalsRequestItem) validateMessage(formats strfmt.Registry) error {
if err := validate.Required("message", "body", m.Message); err != nil {
@ -127,15 +159,6 @@ func (m *AddSignalsRequestItem) validateScenarioHash(formats strfmt.Registry) er
return nil
}
func (m *AddSignalsRequestItem) validateScenarioTrust(formats strfmt.Registry) error {
if err := validate.Required("scenario_trust", "body", m.ScenarioTrust); err != nil {
return err
}
return nil
}
func (m *AddSignalsRequestItem) validateScenarioVersion(formats strfmt.Registry) error {
if err := validate.Required("scenario_version", "body", m.ScenarioVersion); err != nil {
@ -187,6 +210,10 @@ func (m *AddSignalsRequestItem) validateStopAt(formats strfmt.Registry) error {
func (m *AddSignalsRequestItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateContext(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSource(ctx, formats); err != nil {
res = append(res, err)
}
@ -197,6 +224,26 @@ func (m *AddSignalsRequestItem) ContextValidate(ctx context.Context, formats str
return nil
}
func (m *AddSignalsRequestItem) contextValidateContext(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Context); i++ {
if m.Context[i] != nil {
if err := m.Context[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("context" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("context" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *AddSignalsRequestItem) contextValidateSource(ctx context.Context, formats strfmt.Registry) error {
if m.Source != nil {
@ -230,3 +277,43 @@ func (m *AddSignalsRequestItem) UnmarshalBinary(b []byte) error {
*m = res
return nil
}
// AddSignalsRequestItemContextItems0 add signals request item context items0
//
// swagger:model AddSignalsRequestItemContextItems0
type AddSignalsRequestItemContextItems0 struct {
// key
Key string `json:"key,omitempty"`
// value
Value string `json:"value,omitempty"`
}
// Validate validates this add signals request item context items0
func (m *AddSignalsRequestItemContextItems0) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this add signals request item context items0 based on context it is used
func (m *AddSignalsRequestItemContextItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *AddSignalsRequestItemContextItems0) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *AddSignalsRequestItemContextItems0) UnmarshalBinary(b []byte) error {
var res AddSignalsRequestItemContextItems0
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View file

@ -3,6 +3,7 @@ package parser
import (
"time"
expr "github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/types"
log "github.com/sirupsen/logrus"
)
@ -59,20 +60,25 @@ func ParseDate(in string, p *types.Event, x interface{}, plog *log.Entry) (map[s
var ret map[string]string = make(map[string]string)
var strDate string
var parsedDate time.Time
if p.StrTimeFormat != "" {
strDate, parsedDate = parseDateWithFormat(in, p.StrTimeFormat)
if in != "" {
if p.StrTimeFormat != "" {
strDate, parsedDate = parseDateWithFormat(in, p.StrTimeFormat)
if !parsedDate.IsZero() {
ret["MarshaledTime"] = strDate
return ret, nil
}
plog.Debugf("unable to parse '%s' with layout '%s'", in, p.StrTimeFormat)
}
strDate, parsedDate = GenDateParse(in)
if !parsedDate.IsZero() {
ret["MarshaledTime"] = strDate
return ret, nil
} else {
plog.Debugf("unable to parse '%s' with layout '%s'", in, p.StrTimeFormat)
}
}
strDate, parsedDate = GenDateParse(in)
if !parsedDate.IsZero() {
ret["MarshaledTime"] = strDate
return ret, nil
strDate = expr.ParseUnix(in)
if strDate != "" {
ret["MarshaledTime"] = strDate
return ret, nil
}
}
plog.Debugf("no suitable date format found for '%s', falling back to now", in)
now := time.Now().UTC()

View file

@ -4,6 +4,7 @@ import (
"fmt"
"net"
"strings"
"time"
"github.com/antonmedv/expr"
"github.com/crowdsecurity/grokky"
@ -11,6 +12,7 @@ import (
yaml "gopkg.in/yaml.v2"
"github.com/antonmedv/expr/vm"
"github.com/crowdsecurity/crowdsec/pkg/cache"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/davecgh/go-spew/spew"
@ -57,6 +59,8 @@ type Node struct {
Grok types.GrokPattern `yaml:"grok,omitempty"`
//Statics can be present in any type of node and is executed last
Statics []types.ExtraField `yaml:"statics,omitempty"`
//Stash allows to capture data from the log line and store it in an accessible cache
Stash []types.DataCapture `yaml:"stash,omitempty"`
//Whitelists
Whitelist Whitelist `yaml:"whitelist,omitempty"`
Data []*types.DataSource `yaml:"data,omitempty"`
@ -103,6 +107,25 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error {
}
}
}
for idx, stash := range n.Stash {
if stash.Name == "" {
return fmt.Errorf("stash %d : name must be set", idx)
}
if stash.Value == "" {
return fmt.Errorf("stash %s : value expression must be set", stash.Name)
}
if stash.Key == "" {
return fmt.Errorf("stash %s : key expression must be set", stash.Name)
}
if stash.TTL == "" {
return fmt.Errorf("stash %s : ttl must be set", stash.Name)
}
//should be configurable
if stash.MaxMapSize == 0 {
stash.MaxMapSize = 100
}
}
return nil
}
@ -285,6 +308,50 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp)
}
//Process the stash (data collection) if : a grok was present and succeeded, or if there is no grok
if NodeHasOKGrok || n.Grok.RunTimeRegexp == nil {
for idx, stash := range n.Stash {
var value string
var key string
if stash.ValueExpression == nil {
clog.Warningf("Stash %d has no value expression, skipping", idx)
continue
}
if stash.KeyExpression == nil {
clog.Warningf("Stash %d has no key expression, skipping", idx)
continue
}
//collect the data
output, err := expr.Run(stash.ValueExpression, cachedExprEnv)
if err != nil {
clog.Warningf("Error while running stash val expression : %v", err)
}
//can we expect anything else than a string ?
switch output := output.(type) {
case string:
value = output
default:
clog.Warningf("unexpected type %t (%v) while running '%s'", output, output, stash.Value)
continue
}
//collect the key
output, err = expr.Run(stash.KeyExpression, cachedExprEnv)
if err != nil {
clog.Warningf("Error while running stash key expression : %v", err)
}
//can we expect anything else than a string ?
switch output := output.(type) {
case string:
key = output
default:
clog.Warningf("unexpected type %t (%v) while running '%s'", output, output, stash.Key)
continue
}
cache.SetKey(stash.Name, key, value, &stash.TTLVal)
}
}
//Iterate on leafs
if len(n.LeavesNodes) > 0 {
for _, leaf := range n.LeavesNodes {
@ -434,10 +501,10 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
n.Logger.Tracef("+ Regexp Compilation '%s'", n.Grok.RegexpName)
n.Grok.RunTimeRegexp, err = pctx.Grok.Get(n.Grok.RegexpName)
if err != nil {
return fmt.Errorf("Unable to find grok '%s' : %v", n.Grok.RegexpName, err)
return fmt.Errorf("unable to find grok '%s' : %v", n.Grok.RegexpName, err)
}
if n.Grok.RunTimeRegexp == nil {
return fmt.Errorf("Empty grok '%s'", n.Grok.RegexpName)
return fmt.Errorf("empty grok '%s'", n.Grok.RegexpName)
}
n.Logger.Tracef("%s regexp: %s", n.Grok.RegexpName, n.Grok.RunTimeRegexp.Regexp.String())
valid = true
@ -447,11 +514,11 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
}
n.Grok.RunTimeRegexp, err = pctx.Grok.Compile(n.Grok.RegexpValue)
if err != nil {
return fmt.Errorf("Failed to compile grok '%s': %v\n", n.Grok.RegexpValue, err)
return fmt.Errorf("failed to compile grok '%s': %v", n.Grok.RegexpValue, err)
}
if n.Grok.RunTimeRegexp == nil {
// We shouldn't be here because compilation succeeded, so regexp shouldn't be nil
return fmt.Errorf("Grok compilation failure: %s", n.Grok.RegexpValue)
return fmt.Errorf("grok compilation failure: %s", n.Grok.RegexpValue)
}
n.Logger.Tracef("%s regexp : %s", n.Grok.RegexpValue, n.Grok.RunTimeRegexp.Regexp.String())
valid = true
@ -480,6 +547,38 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
}
valid = true
}
/* load data capture (stash) */
for i, stash := range n.Stash {
n.Stash[i].ValueExpression, err = expr.Compile(stash.Value,
expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
if err != nil {
return errors.Wrap(err, "while compiling stash value expression")
}
n.Stash[i].KeyExpression, err = expr.Compile(stash.Key,
expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
if err != nil {
return errors.Wrap(err, "while compiling stash key expression")
}
n.Stash[i].TTLVal, err = time.ParseDuration(stash.TTL)
if err != nil {
return errors.Wrap(err, "while parsing stash ttl")
}
logLvl := n.Logger.Logger.GetLevel()
//init the cache, does it make sense to create it here just to be sure everything is fine ?
if err := cache.CacheInit(cache.CacheCfg{
Size: n.Stash[i].MaxMapSize,
TTL: n.Stash[i].TTLVal,
Name: n.Stash[i].Name,
LogLevel: &logLvl,
}); err != nil {
return errors.Wrap(err, "while initializing cache")
}
}
/* compile leafs if present */
if len(n.LeavesNodes) > 0 {
for idx := range n.LeavesNodes {

View file

@ -138,7 +138,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing
return nil
}
//prepTests is going to do the initialisation of parser : it's going to load enrichment plugins and load the patterns. This is done here so that we don't redo it for each test
// prepTests is going to do the initialisation of parser : it's going to load enrichment plugins and load the patterns. This is done here so that we don't redo it for each test
func prepTests() (*UnixParserCtx, EnricherCtx, error) {
var (
err error
@ -252,6 +252,7 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo
if debug {
retInfo = append(retInfo, fmt.Sprintf("mismatch %s[%s] %s != %s", outLabels[mapIdx], expKey, expVal, outVal))
}
valid = false
goto checkFinished
}
} else { //missing entry
@ -266,11 +267,11 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo
checkFinished:
if valid {
if debug {
retInfo = append(retInfo, fmt.Sprintf("OK ! %s", strings.Join(retInfo, "/")))
retInfo = append(retInfo, fmt.Sprintf("OK ! \n\t%s", strings.Join(retInfo, "\n\t")))
}
} else {
if debug {
retInfo = append(retInfo, fmt.Sprintf("KO ! %s", strings.Join(retInfo, "/")))
retInfo = append(retInfo, fmt.Sprintf("KO ! \n\t%s", strings.Join(retInfo, "\n\t")))
}
}
return retInfo, valid

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.1 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View file

@ -0,0 +1,31 @@
filter: "evt.Line.Labels.type == 'testlog'"
debug: true
onsuccess: next_stage
name: tests/base-grok-stash
pattern_syntax:
TEST_START: start %{DATA:program} thing with pid %{NUMBER:pid}
TEST_CONTINUED: pid %{NUMBER:pid} did a forbidden thing
nodes:
- #name: tests/base-grok-stash-sub-start
grok:
name: "TEST_START"
apply_on: Line.Raw
statics:
- meta: log_type
value: test_start
stash:
- name: test_program_pid_assoc
key: evt.Parsed.pid
value: evt.Parsed.program
ttl: 30s
size: 10
- #name: tests/base-grok-stash-sub-cont
grok:
name: "TEST_CONTINUED"
apply_on: Line.Raw
statics:
- meta: log_type
value: test_continue
- meta: associated_prog_name
expression: GetFromStash("test_program_pid_assoc", evt.Parsed.pid)

View file

@ -0,0 +1,2 @@
- filename: {{.TestDirectory}}/base-grok-stash.yaml
stage: s00-raw

View file

@ -0,0 +1,63 @@
#these are the events we input into parser
lines:
- Line:
Labels:
type: testlog
Raw: start foobar thing with pid 12
- Line:
Labels:
type: testlog
Raw: start toto thing with pid 42
- Line:
Labels:
type: testlog
Raw: pid 12 did a forbidden thing
- Line:
Labels:
type: testlog
Raw: pid 42 did a forbidden thing
- Line:
Labels:
type: testlog
Raw: pid 45 did a forbidden thing
#these are the results we expect from the parser
results:
- Meta:
log_type: test_start
Parsed:
program: foobar
pid: "12"
Process: true
Stage: s00-raw
- Meta:
log_type: test_start
Parsed:
program: toto
pid: "42"
Process: true
Stage: s00-raw
- Meta:
log_type: test_continue
associated_prog_name: foobar
Parsed:
pid: "12"
Process: true
Stage: s00-raw
- Meta:
log_type: test_continue
associated_prog_name: toto
Parsed:
pid: "42"
Process: true
Stage: s00-raw
- Meta:
log_type: test_continue
Parsed:
pid: "45"
Process: true
Stage: s00-raw

View file

@ -1,5 +1,6 @@
filter: "'source_ip' in evt.Meta"
name: tests/geoip-enrich
debug: true
description: "Populate event with geoloc info : as, country, coords, source range."
statics:
- method: GeoIpCity

View file

@ -2,7 +2,7 @@
lines:
- Meta:
test: test1
source_ip: 8.8.8.8
source_ip: 1.0.0.1
- Meta:
test: test2
source_ip: 192.168.0.1
@ -10,11 +10,10 @@ lines:
results:
- Process: true
Enriched:
IsoCode: US
IsInEU: false
ASNOrg: Google LLC
ASNOrg: "Google Inc."
Meta:
source_ip: 8.8.8.8
source_ip: 1.0.0.1
- Process: true
Enriched:
IsInEU: false

View file

@ -4,9 +4,11 @@ import (
"fmt"
"os"
"path"
"sort"
"strings"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
"github.com/crowdsecurity/grokky"
log "github.com/sirupsen/logrus"
@ -50,6 +52,45 @@ func Init(c map[string]interface{}) (*UnixParserCtx, error) {
return &r, nil
}
// Return new parsers
// nodes and povfwnodes are already initialized in parser.LoadStages
func NewParsers() *Parsers {
parsers := &Parsers{
Ctx: &UnixParserCtx{},
Povfwctx: &UnixParserCtx{},
StageFiles: make([]Stagefile, 0),
PovfwStageFiles: make([]Stagefile, 0),
}
for _, itemType := range []string{cwhub.PARSERS, cwhub.PARSERS_OVFLW} {
for _, hubParserItem := range cwhub.GetItemMap(itemType) {
if hubParserItem.Installed {
stagefile := Stagefile{
Filename: hubParserItem.LocalPath,
Stage: hubParserItem.Stage,
}
if itemType == cwhub.PARSERS {
parsers.StageFiles = append(parsers.StageFiles, stagefile)
}
if itemType == cwhub.PARSERS_OVFLW {
parsers.PovfwStageFiles = append(parsers.PovfwStageFiles, stagefile)
}
}
}
}
if parsers.StageFiles != nil {
sort.Slice(parsers.StageFiles, func(i, j int) bool {
return parsers.StageFiles[i].Filename < parsers.StageFiles[j].Filename
})
}
if parsers.PovfwStageFiles != nil {
sort.Slice(parsers.PovfwStageFiles, func(i, j int) bool {
return parsers.PovfwStageFiles[i].Filename < parsers.PovfwStageFiles[j].Filename
})
}
return parsers
}
func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) {
var err error

View file

@ -1,11 +1,13 @@
package types
import (
"time"
"github.com/antonmedv/expr/vm"
"github.com/crowdsecurity/grokky"
)
//Used mostly for statics
// Used mostly for statics
type ExtraField struct {
//if the target is indicated by name Struct.Field etc,
TargetByName string `yaml:"target,omitempty"`
@ -39,3 +41,14 @@ type GrokPattern struct {
//a grok can contain statics that apply if pattern is successful
Statics []ExtraField `yaml:"statics,omitempty"`
}
type DataCapture struct {
Name string `yaml:"name,omitempty"`
Key string `yaml:"key,omitempty"`
KeyExpression *vm.Program `yaml:"-"`
Value string `yaml:"value,omitempty"`
ValueExpression *vm.Program `yaml:"-"`
TTL string `yaml:"ttl,omitempty"`
TTLVal time.Duration `yaml:"-"`
MaxMapSize int `yaml:"size,omitempty"`
}

View file

@ -13,15 +13,24 @@ import (
type Patcher struct {
BaseFilePath string
PatchFilePath string
quiet bool
}
func NewPatcher(filePath string, suffix string) *Patcher {
return &Patcher{
BaseFilePath: filePath,
PatchFilePath: filePath + suffix,
quiet: false,
}
}
// SetQuiet sets the quiet flag, which will log as DEBUG_LEVEL instead of INFO
func (p *Patcher) SetQuiet(quiet bool) {
p.quiet = quiet
}
// read a single YAML file, check for errors (the merge package doesn't) then return the content as bytes.
func readYAML(filePath string) ([]byte, error) {
var content []byte
@ -55,13 +64,19 @@ func (p *Patcher) MergedPatchContent() ([]byte, error) {
var over []byte
over, err = readYAML(p.PatchFilePath)
// optional file, ignore if it does not exist
if err != nil && !errors.Is(err, os.ErrNotExist) {
if errors.Is(err, os.ErrNotExist) {
return base, nil
}
if err != nil {
return nil, err
}
if err == nil {
log.Infof("Patching yaml: '%s' with '%s'", p.BaseFilePath, p.PatchFilePath)
logf := log.Infof
if p.quiet {
logf = log.Debugf
}
logf("Patching yaml: '%s' with '%s'", p.BaseFilePath, p.PatchFilePath)
var patched *bytes.Buffer
@ -138,7 +153,11 @@ func (p *Patcher) PrependedPatchContent() ([]byte, error) {
if err = decodeDocuments(patchFile, &result, true); err != nil {
return nil, err
}
log.Infof("Prepending yaml: '%s' with '%s'", p.BaseFilePath, p.PatchFilePath)
logf := log.Infof
if p.quiet {
logf = log.Debugf
}
logf("Prepending yaml: '%s' with '%s'", p.BaseFilePath, p.PatchFilePath)
}
baseFile, err = os.Open(p.BaseFilePath)

View file

@ -10,4 +10,4 @@
+ExecStart=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml
#ExecStartPost=/bin/sleep 0.1
ExecReload=/bin/kill -HUP $MAINPID
Restart=always

View file

@ -45,6 +45,7 @@ sed -i "s#/usr/local/lib/crowdsec/plugins/#%{_libdir}/%{name}/plugins/#g" config
rm -rf %{buildroot}
mkdir -p %{buildroot}/etc/crowdsec/hub
mkdir -p %{buildroot}/etc/crowdsec/patterns
mkdir -p %{buildroot}/etc/crowdsec/console/
mkdir -p %{buildroot}%{_sharedstatedir}/%{name}/data
mkdir -p %{buildroot}%{_presetdir}
@ -62,6 +63,7 @@ install -m 600 -D config/config.yaml %{buildroot}%{_sysconfdir}/crowdsec
install -m 644 -D config/simulation.yaml %{buildroot}%{_sysconfdir}/crowdsec
install -m 644 -D config/profiles.yaml %{buildroot}%{_sysconfdir}/crowdsec
install -m 644 -D config/console.yaml %{buildroot}%{_sysconfdir}/crowdsec
install -m 644 -D config/context.yaml %{buildroot}%{_sysconfdir}/crowdsec/console/
install -m 750 -D config/%{name}.cron.daily %{buildroot}%{_sysconfdir}/cron.daily/%{name}
install -m 644 -D %{SOURCE1} %{buildroot}%{_presetdir}
@ -115,6 +117,7 @@ rm -rf %{buildroot}
%config(noreplace) %{_sysconfdir}/%{name}/simulation.yaml
%config(noreplace) %{_sysconfdir}/%{name}/profiles.yaml
%config(noreplace) %{_sysconfdir}/%{name}/console.yaml
%config(noreplace) %{_sysconfdir}/%{name}/console/context.yaml
%config(noreplace) %{_presetdir}/80-%{name}.preset
%config(noreplace) %{_sysconfdir}/%{name}/notifications/http.yaml
%config(noreplace) %{_sysconfdir}/%{name}/notifications/slack.yaml

Some files were not shown because too many files have changed in this diff Show more