Compare commits

...

18 commits

Author SHA1 Message Date
Laurence Jones
05b54687b6
feat: support stdout in cscli support dump (#2939)
* feat: support stdout in cscli support dump

* fix: skip log.info if stdout

* fix: handle errors by returning to runE instead
2024-04-26 15:56:15 +01:00
mmetc
c4473839c4
Refact pkg/parser/node (#2953)
* extract method processFilter()

* extract method processWhitelist()

* lint (whitespace, errors)
2024-04-25 17:53:10 +02:00
mmetc
d2c4bc55fc
plugins: use yaml.v3 (#2969)
* plugins: use yaml.v3

* lint
2024-04-25 17:34:49 +02:00
mmetc
2abc078e53
use go 1.22.2 (#2826) 2024-04-25 15:11:08 +02:00
blotus
ceb4479ec4
add zfs magic for GetFSType (#2950) 2024-04-25 15:05:11 +02:00
mmetc
845d4542bb
cscli: use yaml.v3 (#2965)
* cscli: use yaml.v3

* lint
2024-04-25 14:41:02 +02:00
Thibault "bui" Koechlin
f4ed7b3520
Truncate meta data (#2966)
* truncate meta-data if they are too big
2024-04-25 13:43:38 +02:00
mmetc
60431804d8
db config: don't exit setup if can't detect fs, improve detection for freebsd (#2963) 2024-04-25 11:11:57 +02:00
mmetc
0f942a95f1
pkg/cwhub - rename methods for clarity (#2961)
* pkg/cwhub - rename methods for clarity

* lint
2024-04-24 11:09:37 +02:00
mmetc
97e6588a45
cscli hub items: avoid global (#2960)
* cscli hub items: avoid global

* lint (whitespace, errors)

* lint
2024-04-24 10:05:55 +02:00
mmetc
725cae1fa8
CI: upload coverage with token (#2958) 2024-04-23 12:41:50 +02:00
mmetc
c64332d30a
cscli config show: avoid globals, use yaml v3 (#2863)
* cscli config show: avoid globals, use yaml v3

* lint (whitespace/errors)
2024-04-23 12:28:38 +02:00
mmetc
718d1c54b2
pkg/database/decisiosn: remove filter parameter, which is always passed empty (#2954) 2024-04-23 11:15:27 +02:00
mmetc
b48b728317
cscli support: include stack traces (#2935) 2024-04-22 23:54:51 +02:00
mmetc
fb393f1c57
tests: bump yq, cfssl (#2952) 2024-04-22 17:19:00 +02:00
mmetc
630cbf0c70
update linter list and descriptions (#2951) 2024-04-22 17:18:11 +02:00
Laurence Jones
95f27677e4
enhance: add refactoring to governance (#2955) 2024-04-22 14:18:34 +01:00
blotus
c6e40191dd
Revert "docker: pre-download all hub items and data, opt-in hub updat… (#2947) 2024-04-18 15:33:51 +02:00
70 changed files with 771 additions and 491 deletions

View file

@ -42,7 +42,7 @@ issue:
3. Check [Releases](https://github.com/crowdsecurity/crowdsec/releases/latest) to make sure your agent is on the latest version.
- prefix: kind
list: ['feature', 'bug', 'packaging', 'enhancement']
list: ['feature', 'bug', 'packaging', 'enhancement', 'refactoring']
multiple: false
author_association:
author: true
@ -54,6 +54,7 @@ issue:
@$AUTHOR: There are no 'kind' label on this issue. You need a 'kind' label to start the triage process.
* `/kind feature`
* `/kind enhancement`
* `/kind refactoring`
* `/kind bug`
* `/kind packaging`
@ -65,12 +66,13 @@ pull_request:
labels:
- prefix: kind
multiple: false
list: [ 'feature', 'enhancement', 'fix', 'chore', 'dependencies']
list: [ 'feature', 'enhancement', 'fix', 'chore', 'dependencies', 'refactoring']
needs:
comment: |
@$AUTHOR: There are no 'kind' label on this PR. You need a 'kind' label to generate the release automatically.
* `/kind feature`
* `/kind enhancement`
* `/kind refactoring`
* `/kind fix`
* `/kind chore`
* `/kind dependencies`

View file

@ -33,7 +33,7 @@ jobs:
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: "1.21.9"
go-version: "1.22.2"
- name: "Install bats dependencies"
env:

View file

@ -36,7 +36,7 @@ jobs:
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: "1.21.9"
go-version: "1.22.2"
- name: "Install bats dependencies"
env:

View file

@ -45,7 +45,7 @@ jobs:
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: "1.21.9"
go-version: "1.22.2"
- name: "Install bats dependencies"
env:

View file

@ -28,7 +28,7 @@ jobs:
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: "1.21.9"
go-version: "1.22.2"
- name: "Install bats dependencies"
env:
@ -81,3 +81,4 @@ jobs:
with:
files: ./coverage-bats.out
flags: bats
token: ${{ secrets.CODECOV_TOKEN }}

View file

@ -35,7 +35,7 @@ jobs:
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: "1.21.9"
go-version: "1.22.2"
- name: Build
run: make windows_installer BUILD_RE2_WASM=1

View file

@ -52,7 +52,7 @@ jobs:
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: "1.21.9"
go-version: "1.22.2"
cache-dependency-path: "**/go.sum"
# Initializes the CodeQL tools for scanning.

View file

@ -59,15 +59,15 @@ jobs:
cd docker/test
python -m pip install --upgrade pipenv wheel
- name: "Cache virtualenvs"
id: cache-pipenv
uses: actions/cache@v4
with:
path: ~/.local/share/virtualenvs
key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }}
#- name: "Cache virtualenvs"
# id: cache-pipenv
# uses: actions/cache@v4
# with:
# path: ~/.local/share/virtualenvs
# key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }}
- name: "Install dependencies"
if: steps.cache-pipenv.outputs.cache-hit != 'true'
#if: steps.cache-pipenv.outputs.cache-hit != 'true'
run: |
cd docker/test
pipenv install --deploy

View file

@ -34,7 +34,7 @@ jobs:
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: "1.21.9"
go-version: "1.22.2"
- name: Build
run: |
@ -52,6 +52,7 @@ jobs:
with:
files: coverage.out
flags: unit-windows
token: ${{ secrets.CODECOV_TOKEN }}
- name: golangci-lint
uses: golangci/golangci-lint-action@v4

View file

@ -126,7 +126,7 @@ jobs:
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: "1.21.9"
go-version: "1.22.2"
- name: Create localstack streams
run: |
@ -153,6 +153,7 @@ jobs:
with:
files: coverage.out
flags: unit-linux
token: ${{ secrets.CODECOV_TOKEN }}
- name: golangci-lint
uses: golangci/golangci-lint-action@v4

View file

@ -25,7 +25,7 @@ jobs:
- name: "Set up Go"
uses: actions/setup-go@v5
with:
go-version: "1.21.9"
go-version: "1.22.2"
- name: Build the binaries
run: |

View file

@ -3,7 +3,7 @@
linters-settings:
cyclop:
# lower this after refactoring
max-complexity: 53
max-complexity: 48
gci:
sections:
@ -22,7 +22,7 @@ linters-settings:
gocyclo:
# lower this after refactoring
min-complexity: 49
min-complexity: 48
funlen:
# Checks the number of lines in a function.
@ -37,17 +37,10 @@ linters-settings:
statements: 122
govet:
enable:
- atomicalign
- deepequalerrors
# TODO: - fieldalignment
- findcall
- nilness
# TODO: - reflectvaluecompare
- shadow
- sortslice
- timeformat
- unusedwrite
enable-all: true
disable:
- reflectvaluecompare
- fieldalignment
lll:
# lower this after refactoring
@ -65,7 +58,7 @@ linters-settings:
min-complexity: 28
nlreturn:
block-size: 4
block-size: 5
nolintlint:
allow-unused: false # report any unused nolint directives
@ -89,18 +82,6 @@ linters-settings:
- "!**/pkg/apiserver/controllers/v1/errors.go"
yaml:
files:
- "!**/cmd/crowdsec-cli/alerts.go"
- "!**/cmd/crowdsec-cli/capi.go"
- "!**/cmd/crowdsec-cli/config_show.go"
- "!**/cmd/crowdsec-cli/hubtest.go"
- "!**/cmd/crowdsec-cli/lapi.go"
- "!**/cmd/crowdsec-cli/simulation.go"
- "!**/cmd/crowdsec/crowdsec.go"
- "!**/cmd/notification-dummy/main.go"
- "!**/cmd/notification-email/main.go"
- "!**/cmd/notification-http/main.go"
- "!**/cmd/notification-slack/main.go"
- "!**/cmd/notification-splunk/main.go"
- "!**/pkg/acquisition/acquisition.go"
- "!**/pkg/acquisition/acquisition_test.go"
- "!**/pkg/acquisition/modules/appsec/appsec.go"
@ -147,23 +128,30 @@ linters:
#
# DEPRECATED by golangi-lint
#
- deadcode # The owner seems to have abandoned the linter. Replaced by unused.
- exhaustivestruct # The owner seems to have abandoned the linter. Replaced by exhaustruct.
- golint # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes
- ifshort # Checks that your code uses short syntax for if-statements whenever possible
- interfacer # Linter that suggests narrower interface types
- maligned # Tool to detect Go structs that would take less memory if their fields were sorted
- nosnakecase # nosnakecase is a linter that detects snake case of variable naming and function name.
- scopelint # Scopelint checks for unpinned variables in go programs
- structcheck # The owner seems to have abandoned the linter. Replaced by unused.
- varcheck # The owner seems to have abandoned the linter. Replaced by unused.
- deadcode
- exhaustivestruct
- golint
- ifshort
- interfacer
- maligned
- nosnakecase
- scopelint
- structcheck
- varcheck
#
# Disabled until fixed for go 1.22
#
- copyloopvar # copyloopvar is a linter detects places where loop variables are copied
- intrange # intrange is a linter to find places where for loops could make use of an integer range.
#
# Enabled
#
# - asasalint # check for pass []any as any in variadic func(...any)
# - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers
# - asciicheck # checks that all code identifiers does not have non-ASCII symbols in the name
# - bidichk # Checks for dangerous unicode character sequences
# - bodyclose # checks whether HTTP response body is closed successfully
# - cyclop # checks function and package cyclomatic complexity
@ -171,13 +159,15 @@ linters:
# - depguard # Go linter that checks if package imports are in a list of acceptable packages
# - dupword # checks for duplicate words in the source code
# - durationcheck # check for two durations multiplied together
# - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases
# - errcheck # errcheck is a program for checking for unchecked errors in Go code. These unchecked errors can be critical bugs in some cases
# - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13.
# - execinquery # execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds
# - exportloopref # checks for pointers to enclosing loop variables
# - funlen # Tool for detection of long functions
# - ginkgolinter # enforces standards of using ginkgo and gomega
# - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid.
# - gochecknoinits # Checks that no init functions are present in Go code
# - gochecksumtype # Run exhaustiveness checks on Go "sum types"
# - gocognit # Computes and checks the cognitive complexity of functions
# - gocritic # Provides diagnostics that check for bugs, performance and style issues.
# - gocyclo # Computes and checks the cyclomatic complexity of functions
@ -185,48 +175,55 @@ linters:
# - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod.
# - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations.
# - goprintffuncname # Checks that printf-like functions are named with `f` at the end
# - gosimple # (megacheck): Linter for Go source code that specializes in simplifying a code
# - govet # (vet, vetshadow): Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
# - grouper # An analyzer to analyze expression groups.
# - gosimple # (megacheck): Linter for Go source code that specializes in simplifying code
# - gosmopolitan # Report certain i18n/l10n anti-patterns in your Go codebase
# - govet # (vet, vetshadow): Vet examines Go source code and reports suspicious constructs. It is roughly the same as 'go vet' and uses its passes.
# - grouper # Analyze expression groups.
# - importas # Enforces consistent import aliases
# - ineffassign # Detects when assignments to existing variables are not used
# - interfacebloat # A linter that checks the number of methods inside an interface.
# - lll # Reports long lines
# - loggercheck # (logrlint): Checks key value pairs for common logger libraries (kitlog,klog,logr,zap).
# - logrlint # Check logr arguments.
# - maintidx # maintidx measures the maintainability index of each function.
# - makezero # Finds slice declarations with non-zero initial length
# - misspell # Finds commonly misspelled English words in comments
# - nakedret # Finds naked returns in functions greater than a specified function length
# - mirror # reports wrong mirror patterns of bytes/strings usage
# - misspell # Finds commonly misspelled English words
# - nakedret # Checks that functions with naked returns are not longer than a maximum size (can be zero).
# - nestif # Reports deeply nested if statements
# - nilerr # Finds the code that returns nil even if it checks that the error is not nil.
# - nolintlint # Reports ill-formed or insufficient nolint directives
# - nonamedreturns # Reports all named returns
# - nosprintfhostport # Checks for misuse of Sprintf to construct a host with port in a URL.
# - perfsprint # Checks that fmt.Sprintf can be replaced with a faster alternative.
# - predeclared # find code that shadows one of Go's predeclared identifiers
# - reassign # Checks that package variables are not reassigned
# - rowserrcheck # checks whether Err of rows is checked successfully
# - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed.
# - staticcheck # (megacheck): Staticcheck is a go vet on steroids, applying a ton of static analysis checks
# - testableexamples # linter checks if examples are testable (have an expected output)
# - rowserrcheck # checks whether Rows.Err of rows is checked successfully
# - sloglint # ensure consistent code style when using log/slog
# - spancheck # Checks for mistakes with OpenTelemetry/Census spans.
# - sqlclosecheck # Checks that sql.Rows, sql.Stmt, sqlx.NamedStmt, pgx.Query are closed.
# - staticcheck # (megacheck): It's a set of rules from staticcheck. It's not the same thing as the staticcheck binary. The author of staticcheck doesn't support or approve the use of staticcheck as a library inside golangci-lint.
# - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17
# - testableexamples # linter checks if examples are testable (have an expected output)
# - testifylint # Checks usage of github.com/stretchr/testify.
# - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes
# - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code
# - unconvert # Remove unnecessary type conversions
# - unused # (megacheck): Checks Go code for unused constants, variables, functions and types
# - usestdlibvars # A linter that detect the possibility to use variables/constants from the Go standard library.
# - wastedassign # wastedassign finds wasted assignment statements.
# - wastedassign # Finds wasted assignment statements
# - zerologlint # Detects the wrong usage of `zerolog` that a user forgets to dispatch with `Send` or `Msg`
#
# Recommended? (easy)
#
- dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())
- errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occations, where the check for the returned error can be omitted.
- errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and reports occations, where the check for the returned error can be omitted.
- exhaustive # check exhaustiveness of enum switch statements
- gci # Gci control golang package import order and make it always deterministic.
- godot # Check if comments end in a period
- gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
- goimports # In addition to fixing imports, goimports also formats your code in the same style as gofmt.
- goimports # Check import statements are formatted according to the 'goimport' command. Reformat imports in autofix mode.
- gosec # (gas): Inspects source code for security problems
- inamedparam # reports interfaces with unnamed method parameters
- musttag # enforce field tags in (un)marshaled structs
@ -234,7 +231,7 @@ linters:
- protogetter # Reports direct reads from proto message fields when getters should be used
- revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint.
- tagalign # check that struct tags are well aligned
- thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers
- thelper # thelper detects tests helpers which is not start with t.Helper() method.
- wrapcheck # Checks that errors returned from external packages are wrapped
#
@ -242,12 +239,12 @@ linters:
#
- containedctx # containedctx is a linter that detects struct contained context.Context field
- contextcheck # check the function whether use a non-inherited context
- contextcheck # check whether the function uses a non-inherited context
- errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`.
- gomnd # An analyzer to detect magic numbers.
- ireturn # Accept Interfaces, Return Concrete Types
- nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value.
- noctx # noctx finds sending http request without context.Context
- noctx # Finds sending http request without context.Context
- unparam # Reports unused function parameters
#
@ -256,8 +253,8 @@ linters:
- gofumpt # Gofumpt checks whether code was gofumpt-ed.
- nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity
- whitespace # Tool for detection of leading and trailing whitespace
- wsl # Whitespace Linter - Forces you to use empty lines!
- whitespace # Whitespace is a linter that checks for unnecessary newlines at the start and end of functions, if, for, etc.
- wsl # add or remove empty lines
#
# Well intended, but not ready for this
@ -265,8 +262,8 @@ linters:
- dupl # Tool for code clone detection
- forcetypeassert # finds forced type assertions
- godox # Tool for detection of FIXME, TODO and other comment keywords
- goerr113 # Golang linter to check the errors handling expressions
- paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test
- goerr113 # Go linter to check the errors handling expressions
- paralleltest # Detects missing usage of t.Parallel() method in your Go test
- testpackage # linter that makes you use a separate _test package
#
@ -274,7 +271,7 @@ linters:
#
- exhaustruct # Checks if all structure fields are initialized
- forbidigo # Forbids identifiers
- gochecknoglobals # check that no global variables exist
- gochecknoglobals # Check that no global variables exist.
- goconst # Finds repeated strings that could be replaced by a constant
- stylecheck # Stylecheck is a replacement for golint
- tagliatelle # Checks the struct tags.

View file

@ -1,5 +1,5 @@
# vim: set ft=dockerfile:
FROM golang:1.21.9-alpine3.18 AS build
FROM golang:1.22.2-alpine3.18 AS build
ARG BUILD_VERSION
@ -16,7 +16,7 @@ RUN apk add --no-cache git g++ gcc libc-dev make bash gettext binutils-gold core
cd re2-${RE2_VERSION} && \
make install && \
echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \
go install github.com/mikefarah/yq/v4@v4.40.4
go install github.com/mikefarah/yq/v4@v4.43.1
COPY . .
@ -25,7 +25,6 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \
./wizard.sh --docker-mode && \
cd - >/dev/null && \
cscli hub update && \
./docker/preload-hub-items && \
cscli collections install crowdsecurity/linux && \
cscli parsers install crowdsecurity/whitelists

View file

@ -1,5 +1,5 @@
# vim: set ft=dockerfile:
FROM golang:1.21.9-bookworm AS build
FROM golang:1.22.2-bookworm AS build
ARG BUILD_VERSION
@ -21,7 +21,7 @@ RUN apt-get update && \
make && \
make install && \
echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \
go install github.com/mikefarah/yq/v4@v4.40.4
go install github.com/mikefarah/yq/v4@v4.43.1
COPY . .
@ -30,7 +30,6 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \
./wizard.sh --docker-mode && \
cd - >/dev/null && \
cscli hub update && \
./docker/preload-hub-items && \
cscli collections install crowdsecurity/linux && \
cscli parsers install crowdsecurity/whitelists

View file

@ -21,7 +21,7 @@ stages:
- task: GoTool@0
displayName: "Install Go"
inputs:
version: '1.21.9'
version: '1.22.2'
- pwsh: |
choco install -y make

View file

@ -4,6 +4,7 @@ import (
"context"
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"net/url"
"os"
@ -16,7 +17,7 @@ import (
"github.com/go-openapi/strfmt"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
"github.com/crowdsecurity/go-cs-lib/version"
@ -204,6 +205,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command {
if err != nil {
return fmt.Errorf("parsing api url %s: %w", apiURL, err)
}
cli.client, err = apiclient.NewClient(&apiclient.Config{
MachineID: cfg.API.Client.Credentials.Login,
Password: strfmt.Password(cfg.API.Client.Credentials.Password),
@ -211,7 +213,6 @@ func (cli *cliAlerts) NewCommand() *cobra.Command {
URL: apiURL,
VersionPrefix: "v1",
})
if err != nil {
return fmt.Errorf("new api client: %w", err)
}
@ -229,7 +230,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command {
}
func (cli *cliAlerts) NewListCmd() *cobra.Command {
var alertListFilter = apiclient.AlertsListOpts{
alertListFilter := apiclient.AlertsListOpts{
ScopeEquals: new(string),
ValueEquals: new(string),
ScenarioEquals: new(string),
@ -363,7 +364,7 @@ func (cli *cliAlerts) NewDeleteCmd() *cobra.Command {
delAlertByID string
)
var alertDeleteFilter = apiclient.AlertsDeleteOpts{
alertDeleteFilter := apiclient.AlertsDeleteOpts{
ScopeEquals: new(string),
ValueEquals: new(string),
ScenarioEquals: new(string),
@ -391,7 +392,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`,
*alertDeleteFilter.ScenarioEquals == "" && *alertDeleteFilter.IPEquals == "" &&
*alertDeleteFilter.RangeEquals == "" && delAlertByID == "" {
_ = cmd.Usage()
return fmt.Errorf("at least one filter or --all must be specified")
return errors.New("at least one filter or --all must be specified")
}
return nil
@ -478,7 +479,7 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command {
cfg := cli.cfg()
if len(args) == 0 {
printHelp(cmd)
return fmt.Errorf("missing alert_id")
return errors.New("missing alert_id")
}
for _, alertID := range args {
id, err := strconv.Atoi(alertID)

View file

@ -10,7 +10,7 @@ import (
"github.com/go-openapi/strfmt"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
"github.com/crowdsecurity/go-cs-lib/version"
@ -85,7 +85,6 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error {
URL: apiurl,
VersionPrefix: CAPIURLPrefix,
}, nil)
if err != nil {
return fmt.Errorf("api client register ('%s'): %w", types.CAPIBaseURL, err)
}
@ -175,7 +174,7 @@ func (cli *cliCapi) status() error {
return err
}
scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS)
scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS)
if err != nil {
return fmt.Errorf("failed to get scenarios: %w", err)
}

View file

@ -10,13 +10,15 @@ import (
"github.com/sanity-io/litter"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
)
func showConfigKey(key string) error {
func (cli *cliConfig) showKey(key string) error {
cfg := cli.cfg()
type Env struct {
Config *csconfig.Config
}
@ -30,15 +32,15 @@ func showConfigKey(key string) error {
return err
}
output, err := expr.Run(program, Env{Config: csConfig})
output, err := expr.Run(program, Env{Config: cfg})
if err != nil {
return err
}
switch csConfig.Cscli.Output {
switch cfg.Cscli.Output {
case "human", "raw":
// Don't use litter for strings, it adds quotes
// that we didn't have before
// that would break compatibility with previous versions
switch output.(type) {
case string:
fmt.Println(output)
@ -51,13 +53,14 @@ func showConfigKey(key string) error {
return fmt.Errorf("failed to marshal configuration: %w", err)
}
fmt.Printf("%s\n", string(data))
fmt.Println(string(data))
}
return nil
}
var configShowTemplate = `Global:
func (cli *cliConfig) template() string {
return `Global:
{{- if .ConfigPaths }}
- Configuration Folder : {{.ConfigPaths.ConfigDir}}
@ -182,19 +185,11 @@ Central API:
{{- end }}
{{- end }}
`
}
func (cli *cliConfig) show(key string) error {
func (cli *cliConfig) show() error {
cfg := cli.cfg()
if err := cfg.LoadAPIClient(); err != nil {
log.Errorf("failed to load API client configuration: %s", err)
// don't return, we can still show the configuration
}
if key != "" {
return showConfigKey(key)
}
switch cfg.Cscli.Output {
case "human":
// The tests on .Enable look funny because the option has a true default which has
@ -205,7 +200,7 @@ func (cli *cliConfig) show(key string) error {
"ValueBool": func(b *bool) bool { return b != nil && *b },
}
tmp, err := template.New("config").Funcs(funcs).Parse(configShowTemplate)
tmp, err := template.New("config").Funcs(funcs).Parse(cli.template())
if err != nil {
return err
}
@ -220,14 +215,14 @@ func (cli *cliConfig) show(key string) error {
return fmt.Errorf("failed to marshal configuration: %w", err)
}
fmt.Printf("%s\n", string(data))
fmt.Println(string(data))
case "raw":
data, err := yaml.Marshal(cfg)
if err != nil {
return fmt.Errorf("failed to marshal configuration: %w", err)
}
fmt.Printf("%s\n", string(data))
fmt.Println(string(data))
}
return nil
@ -243,7 +238,16 @@ func (cli *cliConfig) newShowCmd() *cobra.Command {
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(_ *cobra.Command, _ []string) error {
return cli.show(key)
if err := cli.cfg().LoadAPIClient(); err != nil {
log.Errorf("failed to load API client configuration: %s", err)
// don't return, we can still show the configuration
}
if key != "" {
return cli.showKey(key)
}
return cli.show()
},
}

View file

@ -4,9 +4,11 @@ import (
"context"
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"net/url"
"os"
"strconv"
"strings"
"github.com/fatih/color"
@ -36,7 +38,7 @@ func NewCLIConsole(cfg configGetter) *cliConsole {
}
func (cli *cliConsole) NewCommand() *cobra.Command {
var cmd = &cobra.Command{
cmd := &cobra.Command{
Use: "console [action]",
Short: "Manage interaction with Crowdsec console (https://app.crowdsec.net)",
Args: cobra.MinimumNArgs(1),
@ -101,7 +103,7 @@ After running this command your will need to validate the enrollment in the weba
return err
}
scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS)
scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS)
if err != nil {
return fmt.Errorf("failed to get installed scenarios: %w", err)
}
@ -203,7 +205,7 @@ Enable given information push to the central API. Allows to empower the console`
log.Infof("All features have been enabled successfully")
} else {
if len(args) == 0 {
return fmt.Errorf("you must specify at least one feature to enable")
return errors.New("you must specify at least one feature to enable")
}
if err := cli.setConsoleOpts(args, true); err != nil {
return err
@ -288,11 +290,11 @@ func (cli *cliConsole) newStatusCmd() *cobra.Command {
}
rows := [][]string{
{csconfig.SEND_MANUAL_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareManualDecisions)},
{csconfig.SEND_CUSTOM_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareCustomScenarios)},
{csconfig.SEND_TAINTED_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareTaintedScenarios)},
{csconfig.SEND_CONTEXT, fmt.Sprintf("%t", *consoleCfg.ShareContext)},
{csconfig.CONSOLE_MANAGEMENT, fmt.Sprintf("%t", *consoleCfg.ConsoleManagement)},
{csconfig.SEND_MANUAL_SCENARIOS, strconv.FormatBool(*consoleCfg.ShareManualDecisions)},
{csconfig.SEND_CUSTOM_SCENARIOS, strconv.FormatBool(*consoleCfg.ShareCustomScenarios)},
{csconfig.SEND_TAINTED_SCENARIOS, strconv.FormatBool(*consoleCfg.ShareTaintedScenarios)},
{csconfig.SEND_CONTEXT, strconv.FormatBool(*consoleCfg.ShareContext)},
{csconfig.CONSOLE_MANAGEMENT, strconv.FormatBool(*consoleCfg.ConsoleManagement)},
}
for _, row := range rows {
err = csvwriter.Write(row)

View file

@ -9,7 +9,6 @@ import (
log "github.com/sirupsen/logrus"
)
/*help to copy the file, ioutil doesn't offer the feature*/
func copyFileContents(src, dst string) (err error) {
@ -69,6 +68,7 @@ func CopyFile(sourceSymLink, destinationFile string) error {
if !(destinationFileStat.Mode().IsRegular()) {
return fmt.Errorf("copyFile: non-regular destination file %s (%q)", destinationFileStat.Name(), destinationFileStat.Mode().String())
}
if os.SameFile(sourceFileStat, destinationFileStat) {
return err
}
@ -80,4 +80,3 @@ func CopyFile(sourceSymLink, destinationFile string) error {
return err
}

View file

@ -4,6 +4,7 @@ import (
"context"
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"net/url"
"os"
@ -346,7 +347,7 @@ cscli decisions add --scope username --value foobar
addScope = types.Range
} else if addValue == "" {
printHelp(cmd)
return fmt.Errorf("missing arguments, a value is required (--ip, --range or --scope and --value)")
return errors.New("missing arguments, a value is required (--ip, --range or --scope and --value)")
}
if addReason == "" {
@ -371,7 +372,7 @@ cscli decisions add --scope username --value foobar
Scenario: &addReason,
ScenarioVersion: &empty,
Simulated: &simulated,
//setting empty scope/value broke plugins, and it didn't seem to be needed anymore w/ latest papi changes
// setting empty scope/value broke plugins, and it didn't seem to be needed anymore w/ latest papi changes
Source: &models.Source{
AsName: empty,
AsNumber: empty,
@ -411,7 +412,7 @@ cscli decisions add --scope username --value foobar
}
func (cli *cliDecisions) newDeleteCmd() *cobra.Command {
var delFilter = apiclient.DecisionsDeleteOpts{
delFilter := apiclient.DecisionsDeleteOpts{
ScopeEquals: new(string),
ValueEquals: new(string),
TypeEquals: new(string),
@ -448,7 +449,7 @@ cscli decisions delete --origin lists --scenario list_name
*delFilter.RangeEquals == "" && *delFilter.ScenarioEquals == "" &&
*delFilter.OriginEquals == "" && delDecisionID == "" {
cmd.Usage()
return fmt.Errorf("at least one filter or --all must be specified")
return errors.New("at least one filter or --all must be specified")
}
return nil

View file

@ -5,6 +5,7 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
@ -81,7 +82,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error {
}
if defaultDuration == "" {
return fmt.Errorf("--duration cannot be empty")
return errors.New("--duration cannot be empty")
}
defaultScope, err := flags.GetString("scope")
@ -90,7 +91,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error {
}
if defaultScope == "" {
return fmt.Errorf("--scope cannot be empty")
return errors.New("--scope cannot be empty")
}
defaultReason, err := flags.GetString("reason")
@ -99,7 +100,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error {
}
if defaultReason == "" {
return fmt.Errorf("--reason cannot be empty")
return errors.New("--reason cannot be empty")
}
defaultType, err := flags.GetString("type")
@ -108,7 +109,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error {
}
if defaultType == "" {
return fmt.Errorf("--type cannot be empty")
return errors.New("--type cannot be empty")
}
batchSize, err := flags.GetInt("batch")
@ -136,7 +137,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error {
}
if format == "" {
return fmt.Errorf("unable to guess format from file extension, please provide a format with --format flag")
return errors.New("unable to guess format from file extension, please provide a format with --format flag")
}
if input == "-" {
@ -235,7 +236,6 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error {
return nil
}
func (cli *cliDecisions) newImportCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "import [options]",

View file

@ -39,8 +39,10 @@ id: %s
title: %s
---
`
name := filepath.Base(filename)
base := strings.TrimSuffix(name, filepath.Ext(name))
return fmt.Sprintf(header, base, strings.ReplaceAll(base, "_", " "))
}

View file

@ -83,7 +83,7 @@ tail -n 5 myfile.log | cscli explain --type nginx -f -
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
fileInfo, _ := os.Stdin.Stat()
if cli.flags.logFile == "-" && ((fileInfo.Mode() & os.ModeCharDevice) == os.ModeCharDevice) {
return fmt.Errorf("the option -f - is intended to work with pipes")
return errors.New("the option -f - is intended to work with pipes")
}
return nil
@ -160,18 +160,22 @@ func (cli *cliExplain) run() error {
} else if logFile == "-" {
reader := bufio.NewReader(os.Stdin)
errCount := 0
for {
input, err := reader.ReadBytes('\n')
if err != nil && errors.Is(err, io.EOF) {
break
}
if len(input) > 1 {
_, err = f.Write(input)
}
if err != nil || len(input) <= 1 {
errCount++
}
}
if errCount > 0 {
log.Warnf("Failed to write %d lines to %s", errCount, tmpFile)
}
@ -207,7 +211,7 @@ func (cli *cliExplain) run() error {
}
if dsn == "" {
return fmt.Errorf("no acquisition (--file or --dsn) provided, can't run cscli test")
return errors.New("no acquisition (--file or --dsn) provided, can't run cscli test")
}
cmdArgs := []string{"-c", ConfigFilePath, "-type", logType, "-dsn", dsn, "-dump-data", dir, "-no-api"}

View file

@ -13,7 +13,7 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
)
type cliHub struct {
type cliHub struct{
cfg configGetter
}
@ -137,7 +137,7 @@ func (cli *cliHub) upgrade(force bool) error {
}
for _, itemType := range cwhub.ItemTypes {
items, err := hub.GetInstalledItems(itemType)
items, err := hub.GetInstalledItemsByType(itemType)
if err != nil {
return err
}

View file

@ -13,8 +13,9 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
)
func NewCLIAppsecConfig() *cliItem {
func NewCLIAppsecConfig(cfg configGetter) *cliItem {
return &cliItem{
cfg: cfg,
name: cwhub.APPSEC_CONFIGS,
singular: "appsec-config",
oneOrMore: "appsec-config(s)",
@ -46,7 +47,7 @@ cscli appsec-configs list crowdsecurity/vpatch`,
}
}
func NewCLIAppsecRule() *cliItem {
func NewCLIAppsecRule(cfg configGetter) *cliItem {
inspectDetail := func(item *cwhub.Item) error {
// Only show the converted rules in human mode
if csConfig.Cscli.Output != "human" {
@ -57,11 +58,11 @@ func NewCLIAppsecRule() *cliItem {
yamlContent, err := os.ReadFile(item.State.LocalPath)
if err != nil {
return fmt.Errorf("unable to read file %s : %s", item.State.LocalPath, err)
return fmt.Errorf("unable to read file %s: %w", item.State.LocalPath, err)
}
if err := yaml.Unmarshal(yamlContent, &appsecRule); err != nil {
return fmt.Errorf("unable to unmarshal yaml file %s : %s", item.State.LocalPath, err)
return fmt.Errorf("unable to unmarshal yaml file %s: %w", item.State.LocalPath, err)
}
for _, ruleType := range appsec_rule.SupportedTypes() {
@ -70,7 +71,7 @@ func NewCLIAppsecRule() *cliItem {
for _, rule := range appsecRule.Rules {
convertedRule, _, err := rule.Convert(ruleType, appsecRule.Name)
if err != nil {
return fmt.Errorf("unable to convert rule %s : %s", rule.Name, err)
return fmt.Errorf("unable to convert rule %s: %w", rule.Name, err)
}
fmt.Println(convertedRule)
@ -88,6 +89,7 @@ func NewCLIAppsecRule() *cliItem {
}
return &cliItem{
cfg: cfg,
name: "appsec-rules",
singular: "appsec-rule",
oneOrMore: "appsec-rule(s)",

View file

@ -4,8 +4,9 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
)
func NewCLICollection() *cliItem {
func NewCLICollection(cfg configGetter) *cliItem {
return &cliItem{
cfg: cfg,
name: cwhub.COLLECTIONS,
singular: "collection",
oneOrMore: "collection(s)",

View file

@ -4,8 +4,9 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
)
func NewCLIContext() *cliItem {
func NewCLIContext(cfg configGetter) *cliItem {
return &cliItem{
cfg: cfg,
name: cwhub.CONTEXTS,
singular: "context",
oneOrMore: "context(s)",

View file

@ -4,8 +4,9 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
)
func NewCLIParser() *cliItem {
func NewCLIParser(cfg configGetter) *cliItem {
return &cliItem{
cfg: cfg,
name: cwhub.PARSERS,
singular: "parser",
oneOrMore: "parser(s)",

View file

@ -4,8 +4,9 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
)
func NewCLIPostOverflow() *cliItem {
func NewCLIPostOverflow(cfg configGetter) *cliItem {
return &cliItem{
cfg: cfg,
name: cwhub.POSTOVERFLOWS,
singular: "postoverflow",
oneOrMore: "postoverflow(s)",

View file

@ -4,8 +4,9 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
)
func NewCLIScenario() *cliItem {
func NewCLIScenario(cfg configGetter) *cliItem {
return &cliItem{
cfg: cfg,
name: cwhub.SCENARIOS,
singular: "scenario",
oneOrMore: "scenario(s)",

View file

@ -14,7 +14,7 @@ import (
"github.com/fatih/color"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
"github.com/crowdsecurity/crowdsec/pkg/dumps"
"github.com/crowdsecurity/crowdsec/pkg/emoji"
@ -135,7 +135,8 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios
// create empty nuclei template file
nucleiFileName := fmt.Sprintf("%s.yaml", testName)
nucleiFilePath := filepath.Join(testPath, nucleiFileName)
nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0755)
nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0o755)
if err != nil {
return err
}
@ -405,7 +406,7 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command {
}
func (cli *cliHubTest) NewCleanCmd() *cobra.Command {
var cmd = &cobra.Command{
cmd := &cobra.Command{
Use: "clean",
Short: "clean [test_name]",
Args: cobra.MinimumNArgs(1),

View file

@ -37,6 +37,7 @@ func ShowMetrics(hubItem *cwhub.Item) error {
appsecMetricsTable(color.Output, hubItem.Name, metrics)
default: // no metrics for this item type
}
return nil
}
@ -49,21 +50,27 @@ func GetParserMetric(url string, itemName string) map[string]map[string]int {
if !strings.HasPrefix(fam.Name, "cs_") {
continue
}
log.Tracef("round %d", idx)
for _, m := range fam.Metrics {
metric, ok := m.(prom2json.Metric)
if !ok {
log.Debugf("failed to convert metric to prom2json.Metric")
continue
}
name, ok := metric.Labels["name"]
if !ok {
log.Debugf("no name in Metric %v", metric.Labels)
}
if name != itemName {
continue
}
source, ok := metric.Labels["source"]
if !ok {
log.Debugf("no source in Metric %v", metric.Labels)
} else {
@ -71,12 +78,15 @@ func GetParserMetric(url string, itemName string) map[string]map[string]int {
source = srctype + ":" + source
}
}
value := m.(prom2json.Metric).Value
fval, err := strconv.ParseFloat(value, 32)
if err != nil {
log.Errorf("Unexpected int value %s : %s", value, err)
continue
}
ival := int(fval)
switch fam.Name {
@ -119,6 +129,7 @@ func GetParserMetric(url string, itemName string) map[string]map[string]int {
}
}
}
return stats
}
@ -136,26 +147,34 @@ func GetScenarioMetric(url string, itemName string) map[string]int {
if !strings.HasPrefix(fam.Name, "cs_") {
continue
}
log.Tracef("round %d", idx)
for _, m := range fam.Metrics {
metric, ok := m.(prom2json.Metric)
if !ok {
log.Debugf("failed to convert metric to prom2json.Metric")
continue
}
name, ok := metric.Labels["name"]
if !ok {
log.Debugf("no name in Metric %v", metric.Labels)
}
if name != itemName {
continue
}
value := m.(prom2json.Metric).Value
fval, err := strconv.ParseFloat(value, 32)
if err != nil {
log.Errorf("Unexpected int value %s : %s", value, err)
continue
}
ival := int(fval)
switch fam.Name {
@ -174,6 +193,7 @@ func GetScenarioMetric(url string, itemName string) map[string]int {
}
}
}
return stats
}
@ -188,17 +208,22 @@ func GetAppsecRuleMetric(url string, itemName string) map[string]int {
if !strings.HasPrefix(fam.Name, "cs_") {
continue
}
log.Tracef("round %d", idx)
for _, m := range fam.Metrics {
metric, ok := m.(prom2json.Metric)
if !ok {
log.Debugf("failed to convert metric to prom2json.Metric")
continue
}
name, ok := metric.Labels["rule_name"]
if !ok {
log.Debugf("no rule_name in Metric %v", metric.Labels)
}
if name != itemName {
continue
}
@ -209,11 +234,13 @@ func GetAppsecRuleMetric(url string, itemName string) map[string]int {
}
value := m.(prom2json.Metric).Value
fval, err := strconv.ParseFloat(value, 32)
if err != nil {
log.Errorf("Unexpected int value %s : %s", value, err)
continue
}
ival := int(fval)
switch fam.Name {
@ -231,6 +258,7 @@ func GetAppsecRuleMetric(url string, itemName string) map[string]int {
}
}
}
return stats
}
@ -247,6 +275,7 @@ func GetPrometheusMetric(url string) []*prom2json.Family {
go func() {
defer trace.CatchPanic("crowdsec/GetPrometheusMetric")
err := prom2json.FetchMetricFamilies(url, mfChan, transport)
if err != nil {
log.Fatalf("failed to fetch prometheus metrics : %v", err)
@ -257,6 +286,7 @@ func GetPrometheusMetric(url string) []*prom2json.Family {
for mf := range mfChan {
result = append(result, prom2json.NewFamily(mf))
}
log.Debugf("Finished reading prometheus output, %d entries", len(result))
return result

View file

@ -61,7 +61,7 @@ func compInstalledItems(itemType string, args []string, toComplete string) ([]st
return nil, cobra.ShellCompDirectiveDefault
}
items, err := hub.GetInstalledItemNames(itemType)
items, err := hub.GetInstalledNamesByType(itemType)
if err != nil {
cobra.CompDebugln(fmt.Sprintf("list installed %s err: %s", itemType, err), true)
return nil, cobra.ShellCompDirectiveDefault

View file

@ -1,6 +1,7 @@
package main
import (
"errors"
"fmt"
"os"
"strings"
@ -28,6 +29,7 @@ type cliHelp struct {
}
type cliItem struct {
cfg configGetter
name string // plural, as used in the hub index
singular string
oneOrMore string // parenthetical pluralizaion: "parser(s)"
@ -61,7 +63,9 @@ func (cli cliItem) NewCommand() *cobra.Command {
}
func (cli cliItem) install(args []string, downloadOnly bool, force bool, ignoreError bool) error {
hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger())
cfg := cli.cfg()
hub, err := require.Hub(cfg, require.RemoteHub(cfg), log.StandardLogger())
if err != nil {
return err
}
@ -71,7 +75,7 @@ func (cli cliItem) install(args []string, downloadOnly bool, force bool, ignoreE
if item == nil {
msg := suggestNearestMessage(hub, cli.name, name)
if !ignoreError {
return fmt.Errorf(msg)
return errors.New(msg)
}
log.Errorf(msg)
@ -107,10 +111,10 @@ func (cli cliItem) newInstallCmd() *cobra.Command {
Example: cli.installHelp.example,
Args: cobra.MinimumNArgs(1),
DisableAutoGenTag: true,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compAllItems(cli.name, args, toComplete)
},
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(_ *cobra.Command, args []string) error {
return cli.install(args, downloadOnly, force, ignoreError)
},
}
@ -137,15 +141,15 @@ func istalledParentNames(item *cwhub.Item) []string {
}
func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error {
hub, err := require.Hub(csConfig, nil, log.StandardLogger())
hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger())
if err != nil {
return err
}
if all {
getter := hub.GetInstalledItems
getter := hub.GetInstalledItemsByType
if purge {
getter = hub.GetAllItems
getter = hub.GetItemsByType
}
items, err := getter(cli.name)
@ -163,6 +167,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error
if didRemove {
log.Infof("Removed %s", item.Name)
removed++
}
}
@ -204,6 +209,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error
if didRemove {
log.Infof("Removed %s", item.Name)
removed++
}
}
@ -231,10 +237,10 @@ func (cli cliItem) newRemoveCmd() *cobra.Command {
Example: cli.removeHelp.example,
Aliases: []string{"delete"},
DisableAutoGenTag: true,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compInstalledItems(cli.name, args, toComplete)
},
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(_ *cobra.Command, args []string) error {
return cli.remove(args, purge, force, all)
},
}
@ -248,13 +254,15 @@ func (cli cliItem) newRemoveCmd() *cobra.Command {
}
func (cli cliItem) upgrade(args []string, force bool, all bool) error {
hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger())
cfg := cli.cfg()
hub, err := require.Hub(cfg, require.RemoteHub(cfg), log.StandardLogger())
if err != nil {
return err
}
if all {
items, err := hub.GetInstalledItems(cli.name)
items, err := hub.GetInstalledItemsByType(cli.name)
if err != nil {
return err
}
@ -300,6 +308,7 @@ func (cli cliItem) upgrade(args []string, force bool, all bool) error {
if didUpdate {
log.Infof("Updated %s", item.Name)
updated++
}
}
@ -323,10 +332,10 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command {
Long: coalesce.String(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)),
Example: cli.upgradeHelp.example,
DisableAutoGenTag: true,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compInstalledItems(cli.name, args, toComplete)
},
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(_ *cobra.Command, args []string) error {
return cli.upgrade(args, force, all)
},
}
@ -339,21 +348,23 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command {
}
func (cli cliItem) inspect(args []string, url string, diff bool, rev bool, noMetrics bool) error {
cfg := cli.cfg()
if rev && !diff {
return fmt.Errorf("--rev can only be used with --diff")
return errors.New("--rev can only be used with --diff")
}
if url != "" {
csConfig.Cscli.PrometheusUrl = url
cfg.Cscli.PrometheusUrl = url
}
remote := (*cwhub.RemoteHubCfg)(nil)
if diff {
remote = require.RemoteHub(csConfig)
remote = require.RemoteHub(cfg)
}
hub, err := require.Hub(csConfig, remote, log.StandardLogger())
hub, err := require.Hub(cfg, remote, log.StandardLogger())
if err != nil {
return err
}
@ -399,10 +410,10 @@ func (cli cliItem) newInspectCmd() *cobra.Command {
Example: cli.inspectHelp.example,
Args: cobra.MinimumNArgs(1),
DisableAutoGenTag: true,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compInstalledItems(cli.name, args, toComplete)
},
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(_ *cobra.Command, args []string) error {
return cli.inspect(args, url, diff, rev, noMetrics)
},
}
@ -417,7 +428,7 @@ func (cli cliItem) newInspectCmd() *cobra.Command {
}
func (cli cliItem) list(args []string, all bool) error {
hub, err := require.Hub(csConfig, nil, log.StandardLogger())
hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger())
if err != nil {
return err
}
@ -526,6 +537,7 @@ func (cli cliItem) whyTainted(hub *cwhub.Hub, item *cwhub.Item, reverse bool) st
// hack: avoid message "item is tainted by itself"
continue
}
ret = append(ret, fmt.Sprintf("# %s is tainted by %s", sub.FQName(), taintList))
}
}

View file

@ -17,7 +17,7 @@ import (
// selectItems returns a slice of items of a given type, selected by name and sorted by case-insensitive name
func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly bool) ([]*cwhub.Item, error) {
itemNames := hub.GetItemNames(itemType)
itemNames := hub.GetNamesByType(itemType)
notExist := []string{}
@ -116,7 +116,7 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item
}
if err := csvwriter.Write(header); err != nil {
return fmt.Errorf("failed to write header: %s", err)
return fmt.Errorf("failed to write header: %w", err)
}
for _, itemType := range itemTypes {
@ -132,7 +132,7 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item
}
if err := csvwriter.Write(row); err != nil {
return fmt.Errorf("failed to write raw output: %s", err)
return fmt.Errorf("failed to write raw output: %w", err)
}
}
}
@ -150,12 +150,12 @@ func inspectItem(item *cwhub.Item, showMetrics bool) error {
enc.SetIndent(2)
if err := enc.Encode(item); err != nil {
return fmt.Errorf("unable to encode item: %s", err)
return fmt.Errorf("unable to encode item: %w", err)
}
case "json":
b, err := json.MarshalIndent(*item, "", " ")
if err != nil {
return fmt.Errorf("unable to marshal item: %s", err)
return fmt.Errorf("unable to marshal item: %w", err)
}
fmt.Print(string(b))

View file

@ -13,7 +13,7 @@ import (
"github.com/go-openapi/strfmt"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
"github.com/crowdsecurity/go-cs-lib/version"
@ -56,7 +56,7 @@ func (cli *cliLapi) status() error {
return err
}
scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS)
scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS)
if err != nil {
return fmt.Errorf("failed to get scenarios: %w", err)
}
@ -116,7 +116,6 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e
URL: apiurl,
VersionPrefix: LAPIURLPrefix,
}, nil)
if err != nil {
return fmt.Errorf("api client register: %w", err)
}
@ -585,7 +584,7 @@ func detectNode(node parser.Node, parserCTX parser.UnixParserCtx) []string {
}
func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string {
var ret = make([]string, 0)
ret := make([]string, 0)
for _, subnode := range node.LeavesNodes {
if subnode.Grok.RunTimeRegexp != nil {

View file

@ -1,7 +1,9 @@
package main
import (
"fmt"
"os"
"path/filepath"
"slices"
"time"
@ -10,14 +12,18 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/crowdsecurity/go-cs-lib/trace"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/database"
"github.com/crowdsecurity/crowdsec/pkg/fflag"
)
var ConfigFilePath string
var csConfig *csconfig.Config
var dbClient *database.Client
var (
ConfigFilePath string
csConfig *csconfig.Config
dbClient *database.Client
)
type configGetter func() *csconfig.Config
@ -82,6 +88,11 @@ func loadConfigFor(command string) (*csconfig.Config, string, error) {
return nil, "", err
}
// set up directory for trace files
if err := trace.Init(filepath.Join(config.ConfigPaths.DataDir, "trace")); err != nil {
return nil, "", fmt.Errorf("while setting up trace directory: %w", err)
}
return config, merged, nil
}
@ -249,13 +260,13 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall
cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand())
cmd.AddCommand(NewCLISupport().NewCommand())
cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand())
cmd.AddCommand(NewCLICollection().NewCommand())
cmd.AddCommand(NewCLIParser().NewCommand())
cmd.AddCommand(NewCLIScenario().NewCommand())
cmd.AddCommand(NewCLIPostOverflow().NewCommand())
cmd.AddCommand(NewCLIContext().NewCommand())
cmd.AddCommand(NewCLIAppsecConfig().NewCommand())
cmd.AddCommand(NewCLIAppsecRule().NewCommand())
cmd.AddCommand(NewCLICollection(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIParser(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIScenario(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIPostOverflow(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIContext(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIAppsecConfig(cli.cfg).NewCommand())
cmd.AddCommand(NewCLIAppsecRule(cli.cfg).NewCommand())
if fflag.CscliSetup.IsEnabled() {
cmd.AddCommand(NewSetupCmd())

View file

@ -4,6 +4,7 @@ import (
"context"
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"io/fs"
"net/url"
@ -88,7 +89,7 @@ func (cli *cliNotifications) getPluginConfigs() (map[string]csplugin.PluginConfi
return fmt.Errorf("error while traversing directory %s: %w", path, err)
}
name := filepath.Join(cfg.ConfigPaths.NotificationDir, info.Name()) //Avoid calling info.Name() twice
name := filepath.Join(cfg.ConfigPaths.NotificationDir, info.Name()) // Avoid calling info.Name() twice
if (strings.HasSuffix(name, "yaml") || strings.HasSuffix(name, "yml")) && !(info.IsDir()) {
ts, err := csplugin.ParsePluginConfigFile(name)
if err != nil {
@ -266,7 +267,7 @@ func (cli *cliNotifications) NewTestCmd() *cobra.Command {
if !ok {
return fmt.Errorf("plugin name: '%s' does not exist", args[0])
}
//Create a single profile with plugin name as notification name
// Create a single profile with plugin name as notification name
return pluginBroker.Init(cfg.PluginConfig, []*csconfig.ProfileCfg{
{
Notifications: []string{
@ -320,8 +321,8 @@ func (cli *cliNotifications) NewTestCmd() *cobra.Command {
Alert: alert,
}
//time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent
pluginTomb.Kill(fmt.Errorf("terminating"))
// time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent
pluginTomb.Kill(errors.New("terminating"))
pluginTomb.Wait()
return nil
@ -416,8 +417,8 @@ cscli notifications reinject <alert_id> -a '{"remediation": true,"scenario":"not
break
}
}
//time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent
pluginTomb.Kill(fmt.Errorf("terminating"))
// time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent
pluginTomb.Kill(errors.New("terminating"))
pluginTomb.Wait()
return nil

View file

@ -64,25 +64,22 @@ func (cli *cliPapi) NewStatusCmd() *cobra.Command {
cfg := cli.cfg()
dbClient, err = database.NewClient(cfg.DbConfig)
if err != nil {
return fmt.Errorf("unable to initialize database client: %s", err)
return fmt.Errorf("unable to initialize database client: %w", err)
}
apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists)
if err != nil {
return fmt.Errorf("unable to initialize API client: %s", err)
return fmt.Errorf("unable to initialize API client: %w", err)
}
papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel())
if err != nil {
return fmt.Errorf("unable to initialize PAPI client: %s", err)
return fmt.Errorf("unable to initialize PAPI client: %w", err)
}
perms, err := papi.GetPermissions()
if err != nil {
return fmt.Errorf("unable to get PAPI permissions: %s", err)
return fmt.Errorf("unable to get PAPI permissions: %w", err)
}
var lastTimestampStr *string
lastTimestampStr, err = dbClient.GetConfigItem(apiserver.PapiPullKey)
@ -118,27 +115,26 @@ func (cli *cliPapi) NewSyncCmd() *cobra.Command {
dbClient, err = database.NewClient(cfg.DbConfig)
if err != nil {
return fmt.Errorf("unable to initialize database client: %s", err)
return fmt.Errorf("unable to initialize database client: %w", err)
}
apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists)
if err != nil {
return fmt.Errorf("unable to initialize API client: %s", err)
return fmt.Errorf("unable to initialize API client: %w", err)
}
t.Go(apic.Push)
papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel())
if err != nil {
return fmt.Errorf("unable to initialize PAPI client: %s", err)
return fmt.Errorf("unable to initialize PAPI client: %w", err)
}
t.Go(papi.SyncDecisions)
err = papi.PullOnce(time.Time{}, true)
if err != nil {
return fmt.Errorf("unable to sync decisions: %s", err)
return fmt.Errorf("unable to sync decisions: %w", err)
}
log.Infof("Sending acknowledgements to CAPI")

View file

@ -1,6 +1,7 @@
package require
import (
"errors"
"fmt"
"io"
@ -16,7 +17,7 @@ func LAPI(c *csconfig.Config) error {
}
if c.DisableAPI {
return fmt.Errorf("local API is disabled -- this command must be run on the local API machine")
return errors.New("local API is disabled -- this command must be run on the local API machine")
}
return nil
@ -32,7 +33,7 @@ func CAPI(c *csconfig.Config) error {
func PAPI(c *csconfig.Config) error {
if c.API.Server.OnlineClient.Credentials.PapiURL == "" {
return fmt.Errorf("no PAPI URL in configuration")
return errors.New("no PAPI URL in configuration")
}
return nil
@ -40,7 +41,7 @@ func PAPI(c *csconfig.Config) error {
func CAPIRegistered(c *csconfig.Config) error {
if c.API.Server.OnlineClient.Credentials == nil {
return fmt.Errorf("the Central API (CAPI) must be configured with 'cscli capi register'")
return errors.New("the Central API (CAPI) must be configured with 'cscli capi register'")
}
return nil
@ -56,7 +57,7 @@ func DB(c *csconfig.Config) error {
func Notifications(c *csconfig.Config) error {
if c.ConfigPaths.NotificationDir == "" {
return fmt.Errorf("config_paths.notification_dir is not set in crowdsec config")
return errors.New("config_paths.notification_dir is not set in crowdsec config")
}
return nil
@ -82,7 +83,7 @@ func Hub(c *csconfig.Config, remote *cwhub.RemoteHubCfg, logger *logrus.Logger)
local := c.Hub
if local == nil {
return nil, fmt.Errorf("you must configure cli before interacting with hub")
return nil, errors.New("you must configure cli before interacting with hub")
}
if logger == nil {

View file

@ -2,6 +2,7 @@ package main
import (
"bytes"
"errors"
"fmt"
"os"
"os/exec"
@ -118,9 +119,11 @@ func runSetupDetect(cmd *cobra.Command, args []string) error {
switch detectConfigFile {
case "-":
log.Tracef("Reading detection rules from stdin")
detectReader = os.Stdin
default:
log.Tracef("Reading detection rules: %s", detectConfigFile)
detectReader, err = os.Open(detectConfigFile)
if err != nil {
return err
@ -171,6 +174,7 @@ func runSetupDetect(cmd *cobra.Command, args []string) error {
_, err := exec.LookPath("systemctl")
if err != nil {
log.Debug("systemctl not available: snubbing systemd")
snubSystemd = true
}
}
@ -182,6 +186,7 @@ func runSetupDetect(cmd *cobra.Command, args []string) error {
if forcedOSFamily == "" && forcedOSID != "" {
log.Debug("force-os-id is set: force-os-family defaults to 'linux'")
forcedOSFamily = "linux"
}
@ -219,6 +224,7 @@ func runSetupDetect(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
fmt.Println(setup)
return nil
@ -318,6 +324,7 @@ func runSetupInstallHub(cmd *cobra.Command, args []string) error {
func runSetupValidate(cmd *cobra.Command, args []string) error {
fromFile := args[0]
input, err := os.ReadFile(fromFile)
if err != nil {
return fmt.Errorf("while reading stdin: %w", err)
@ -325,7 +332,7 @@ func runSetupValidate(cmd *cobra.Command, args []string) error {
if err = setup.Validate(input); err != nil {
fmt.Printf("%v\n", err)
return fmt.Errorf("invalid setup file")
return errors.New("invalid setup file")
}
return nil

View file

@ -1,13 +1,14 @@
package main
import (
"errors"
"fmt"
"os"
"slices"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
@ -36,7 +37,7 @@ cscli simulation disable crowdsecurity/ssh-bf`,
return err
}
if cli.cfg().Cscli.SimulationConfig == nil {
return fmt.Errorf("no simulation configured")
return errors.New("no simulation configured")
}
return nil
@ -73,7 +74,7 @@ func (cli *cliSimulation) NewEnableCmd() *cobra.Command {
if len(args) > 0 {
for _, scenario := range args {
var item = hub.GetItem(cwhub.SCENARIOS, scenario)
item := hub.GetItem(cwhub.SCENARIOS, scenario)
if item == nil {
log.Errorf("'%s' doesn't exist or is not a scenario", scenario)
continue
@ -99,11 +100,11 @@ func (cli *cliSimulation) NewEnableCmd() *cobra.Command {
log.Printf("simulation mode for '%s' enabled", scenario)
}
if err := cli.dumpSimulationFile(); err != nil {
return fmt.Errorf("simulation enable: %s", err)
return fmt.Errorf("simulation enable: %w", err)
}
} else if forceGlobalSimulation {
if err := cli.enableGlobalSimulation(); err != nil {
return fmt.Errorf("unable to enable global simulation mode: %s", err)
return fmt.Errorf("unable to enable global simulation mode: %w", err)
}
} else {
printHelp(cmd)
@ -146,11 +147,11 @@ func (cli *cliSimulation) NewDisableCmd() *cobra.Command {
log.Printf("simulation mode for '%s' disabled", scenario)
}
if err := cli.dumpSimulationFile(); err != nil {
return fmt.Errorf("simulation disable: %s", err)
return fmt.Errorf("simulation disable: %w", err)
}
} else if forceGlobalSimulation {
if err := cli.disableGlobalSimulation(); err != nil {
return fmt.Errorf("unable to disable global simulation mode: %s", err)
return fmt.Errorf("unable to disable global simulation mode: %w", err)
}
} else {
printHelp(cmd)
@ -202,7 +203,7 @@ func (cli *cliSimulation) enableGlobalSimulation() error {
cfg.Cscli.SimulationConfig.Exclusions = []string{}
if err := cli.dumpSimulationFile(); err != nil {
return fmt.Errorf("unable to dump simulation file: %s", err)
return fmt.Errorf("unable to dump simulation file: %w", err)
}
log.Printf("global simulation: enabled")
@ -215,12 +216,12 @@ func (cli *cliSimulation) dumpSimulationFile() error {
newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig)
if err != nil {
return fmt.Errorf("unable to marshal simulation configuration: %s", err)
return fmt.Errorf("unable to marshal simulation configuration: %w", err)
}
err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644)
if err != nil {
return fmt.Errorf("write simulation config in '%s' failed: %s", cfg.ConfigPaths.SimulationFilePath, err)
return fmt.Errorf("write simulation config in '%s' failed: %w", cfg.ConfigPaths.SimulationFilePath, err)
}
log.Debugf("updated simulation file %s", cfg.ConfigPaths.SimulationFilePath)
@ -237,12 +238,12 @@ func (cli *cliSimulation) disableGlobalSimulation() error {
newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig)
if err != nil {
return fmt.Errorf("unable to marshal new simulation configuration: %s", err)
return fmt.Errorf("unable to marshal new simulation configuration: %w", err)
}
err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644)
if err != nil {
return fmt.Errorf("unable to write new simulation config in '%s': %s", cfg.ConfigPaths.SimulationFilePath, err)
return fmt.Errorf("unable to write new simulation config in '%s': %w", cfg.ConfigPaths.SimulationFilePath, err)
}
log.Printf("global simulation: disabled")
@ -269,8 +270,10 @@ func (cli *cliSimulation) status() {
}
} else {
log.Println("global simulation: disabled")
if len(cfg.Cscli.SimulationConfig.Exclusions) > 0 {
log.Println("Scenarios in simulation mode :")
for _, scenario := range cfg.Cscli.SimulationConfig.Exclusions {
log.Printf(" - %s", scenario)
}

View file

@ -4,6 +4,7 @@ import (
"archive/zip"
"bytes"
"context"
"errors"
"fmt"
"io"
"net/http"
@ -12,12 +13,14 @@ import (
"path/filepath"
"regexp"
"strings"
"time"
"github.com/blackfireio/osinfo"
"github.com/go-openapi/strfmt"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/crowdsecurity/go-cs-lib/trace"
"github.com/crowdsecurity/go-cs-lib/version"
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
@ -47,6 +50,7 @@ const (
SUPPORT_CAPI_STATUS_PATH = "capi_status.txt"
SUPPORT_ACQUISITION_CONFIG_BASE_PATH = "config/acquis/"
SUPPORT_CROWDSEC_PROFILE_PATH = "config/profiles.yaml"
SUPPORT_CRASH_PATH = "crash/"
)
// from https://github.com/acarl005/stripansi
@ -62,7 +66,7 @@ func collectMetrics() ([]byte, []byte, error) {
if csConfig.Cscli.PrometheusUrl == "" {
log.Warn("No Prometheus URL configured, metrics will not be collected")
return nil, nil, fmt.Errorf("prometheus_uri is not set")
return nil, nil, errors.New("prometheus_uri is not set")
}
humanMetrics := bytes.NewBuffer(nil)
@ -70,7 +74,7 @@ func collectMetrics() ([]byte, []byte, error) {
ms := NewMetricStore()
if err := ms.Fetch(csConfig.Cscli.PrometheusUrl); err != nil {
return nil, nil, fmt.Errorf("could not fetch prometheus metrics: %s", err)
return nil, nil, fmt.Errorf("could not fetch prometheus metrics: %w", err)
}
if err := ms.Format(humanMetrics, nil, "human", false); err != nil {
@ -79,21 +83,21 @@ func collectMetrics() ([]byte, []byte, error) {
req, err := http.NewRequest(http.MethodGet, csConfig.Cscli.PrometheusUrl, nil)
if err != nil {
return nil, nil, fmt.Errorf("could not create requests to prometheus endpoint: %s", err)
return nil, nil, fmt.Errorf("could not create requests to prometheus endpoint: %w", err)
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, nil, fmt.Errorf("could not get metrics from prometheus endpoint: %s", err)
return nil, nil, fmt.Errorf("could not get metrics from prometheus endpoint: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, nil, fmt.Errorf("could not read metrics from prometheus endpoint: %s", err)
return nil, nil, fmt.Errorf("could not read metrics from prometheus endpoint: %w", err)
}
return humanMetrics.Bytes(), body, nil
@ -121,19 +125,18 @@ func collectOSInfo() ([]byte, error) {
log.Info("Collecting OS info")
info, err := osinfo.GetOSInfo()
if err != nil {
return nil, err
}
w := bytes.NewBuffer(nil)
w.WriteString(fmt.Sprintf("Architecture: %s\n", info.Architecture))
w.WriteString(fmt.Sprintf("Family: %s\n", info.Family))
w.WriteString(fmt.Sprintf("ID: %s\n", info.ID))
w.WriteString(fmt.Sprintf("Name: %s\n", info.Name))
w.WriteString(fmt.Sprintf("Codename: %s\n", info.Codename))
w.WriteString(fmt.Sprintf("Version: %s\n", info.Version))
w.WriteString(fmt.Sprintf("Build: %s\n", info.Build))
fmt.Fprintf(w, "Architecture: %s\n", info.Architecture)
fmt.Fprintf(w, "Family: %s\n", info.Family)
fmt.Fprintf(w, "ID: %s\n", info.ID)
fmt.Fprintf(w, "Name: %s\n", info.Name)
fmt.Fprintf(w, "Codename: %s\n", info.Codename)
fmt.Fprintf(w, "Version: %s\n", info.Version)
fmt.Fprintf(w, "Build: %s\n", info.Build)
return w.Bytes(), nil
}
@ -163,7 +166,7 @@ func collectBouncers(dbClient *database.Client) ([]byte, error) {
bouncers, err := dbClient.ListBouncers()
if err != nil {
return nil, fmt.Errorf("unable to list bouncers: %s", err)
return nil, fmt.Errorf("unable to list bouncers: %w", err)
}
getBouncersTable(out, bouncers)
@ -176,7 +179,7 @@ func collectAgents(dbClient *database.Client) ([]byte, error) {
machines, err := dbClient.ListMachines()
if err != nil {
return nil, fmt.Errorf("unable to list machines: %s", err)
return nil, fmt.Errorf("unable to list machines: %w", err)
}
getAgentsTable(out, machines)
@ -196,7 +199,7 @@ func collectAPIStatus(login string, password string, endpoint string, prefix str
return []byte(fmt.Sprintf("cannot parse API URL: %s", err))
}
scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS)
scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS)
if err != nil {
return []byte(fmt.Sprintf("could not collect scenarios: %s", err))
}
@ -264,6 +267,11 @@ func collectAcquisitionConfig() map[string][]byte {
return ret
}
func collectCrash() ([]string, error) {
log.Info("Collecting crash dumps")
return trace.List()
}
type cliSupport struct{}
func NewCLISupport() *cliSupport {
@ -311,7 +319,7 @@ cscli support dump -f /tmp/crowdsec-support.zip
`,
Args: cobra.NoArgs,
DisableAutoGenTag: true,
Run: func(_ *cobra.Command, _ []string) {
RunE: func(_ *cobra.Command, _ []string) error {
var err error
var skipHub, skipDB, skipCAPI, skipLAPI, skipAgent bool
infos := map[string][]byte{
@ -431,11 +439,31 @@ cscli support dump -f /tmp/crowdsec-support.zip
}
}
crash, err := collectCrash()
if err != nil {
log.Errorf("could not collect crash dumps: %s", err)
}
for _, filename := range crash {
content, err := os.ReadFile(filename)
if err != nil {
log.Errorf("could not read crash dump %s: %s", filename, err)
}
infos[SUPPORT_CRASH_PATH+filepath.Base(filename)] = content
}
w := bytes.NewBuffer(nil)
zipWriter := zip.NewWriter(w)
for filename, data := range infos {
fw, err := zipWriter.Create(filename)
header := &zip.FileHeader{
Name: filename,
Method: zip.Deflate,
// TODO: retain mtime where possible (esp. trace)
Modified: time.Now(),
}
fw, err := zipWriter.CreateHeader(header)
if err != nil {
log.Errorf("Could not add zip entry for %s: %s", filename, err)
continue
@ -445,15 +473,19 @@ cscli support dump -f /tmp/crowdsec-support.zip
err = zipWriter.Close()
if err != nil {
log.Fatalf("could not finalize zip file: %s", err)
return fmt.Errorf("could not finalize zip file: %s", err)
}
if outFile == "-" {
_, err = os.Stdout.Write(w.Bytes())
return err
}
err = os.WriteFile(outFile, w.Bytes(), 0o600)
if err != nil {
log.Fatalf("could not write zip file to %s: %s", outFile, err)
return fmt.Errorf("could not write zip file to %s: %s", outFile, err)
}
log.Infof("Written zip file to %s", outFile)
return nil
},
}

View file

@ -9,7 +9,7 @@ import (
"time"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
"github.com/crowdsecurity/go-cs-lib/trace"
@ -207,7 +207,7 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub
}
func dumpBucketsPour() {
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucketpour-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucketpour-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666)
if err != nil {
log.Fatalf("open: %s", err)
}
@ -230,7 +230,7 @@ func dumpBucketsPour() {
}
func dumpParserState() {
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666)
if err != nil {
log.Fatalf("open: %s", err)
}
@ -253,7 +253,7 @@ func dumpParserState() {
}
func dumpOverflowState() {
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666)
if err != nil {
log.Fatalf("open: %s", err)
}

View file

@ -17,12 +17,12 @@ import (
)
func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub.Hub) (*apiclient.ApiClient, error) {
scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS)
scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS)
if err != nil {
return nil, fmt.Errorf("loading list of installed hub scenarios: %w", err)
}
appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES)
appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES)
if err != nil {
return nil, fmt.Errorf("loading list of installed hub appsec rules: %w", err)
}
@ -52,11 +52,11 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub.
PapiURL: papiURL,
VersionPrefix: "v1",
UpdateScenario: func() ([]string, error) {
scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS)
scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS)
if err != nil {
return nil, err
}
appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES)
appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES)
if err != nil {
return nil, err
}

View file

@ -6,6 +6,7 @@ import (
"fmt"
_ "net/http/pprof"
"os"
"path/filepath"
"runtime"
"runtime/pprof"
"strings"
@ -14,6 +15,8 @@ import (
log "github.com/sirupsen/logrus"
"gopkg.in/tomb.v2"
"github.com/crowdsecurity/go-cs-lib/trace"
"github.com/crowdsecurity/crowdsec/pkg/acquisition"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/csplugin"
@ -96,8 +99,8 @@ func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error {
buckets = leakybucket.NewBuckets()
log.Infof("Loading %d scenario files", len(files))
holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, files, &bucketsTomb, buckets, flags.OrderEvent)
holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, files, &bucketsTomb, buckets, flags.OrderEvent)
if err != nil {
return fmt.Errorf("scenario loading failed: %w", err)
}
@ -230,6 +233,10 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo
return nil, fmt.Errorf("while loading configuration file: %w", err)
}
if err := trace.Init(filepath.Join(cConfig.ConfigPaths.DataDir, "trace")); err != nil {
return nil, fmt.Errorf("while setting up trace directory: %w", err)
}
cConfig.Common.LogLevel = newLogLevel(cConfig.Common.LogLevel, flags)
if dumpFolder != "" {

View file

@ -3,7 +3,6 @@ package main
import (
"fmt"
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
@ -22,7 +21,8 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/parser"
)
/*prometheus*/
// Prometheus
var globalParserHits = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "cs_parser_hits_total",
@ -30,6 +30,7 @@ var globalParserHits = prometheus.NewCounterVec(
},
[]string{"source", "type"},
)
var globalParserHitsOk = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "cs_parser_hits_ok_total",
@ -37,6 +38,7 @@ var globalParserHitsOk = prometheus.NewCounterVec(
},
[]string{"source", "type"},
)
var globalParserHitsKo = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "cs_parser_hits_ko_total",
@ -116,9 +118,7 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha
return
}
decisionsFilters := make(map[string][]string, 0)
decisions, err := dbClient.QueryDecisionCountByScenario(decisionsFilters)
decisions, err := dbClient.QueryDecisionCountByScenario()
if err != nil {
log.Errorf("Error querying decisions for metrics: %v", err)
next.ServeHTTP(w, r)
@ -139,7 +139,6 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha
}
alerts, err := dbClient.AlertsCountPerScenario(alertsFilter)
if err != nil {
log.Errorf("Error querying alerts for metrics: %v", err)
next.ServeHTTP(w, r)
@ -194,7 +193,6 @@ func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client,
defer trace.CatchPanic("crowdsec/servePrometheus")
http.Handle("/metrics", computeDynamicMetrics(promhttp.Handler(), dbClient))
log.Debugf("serving metrics after %s ms", time.Since(crowdsecT0))
if err := http.ListenAndServe(fmt.Sprintf("%s:%d", config.ListenAddr, config.ListenPort), nil); err != nil {
// in time machine, we most likely have the LAPI using the port

View file

@ -391,7 +391,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error {
}
if cConfig.Common != nil && cConfig.Common.Daemonize {
csdaemon.NotifySystemd(log.StandardLogger())
csdaemon.Notify(csdaemon.Ready, log.StandardLogger())
// wait for signals
return HandleSignals(cConfig)
}

View file

@ -5,10 +5,11 @@ import (
"fmt"
"os"
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
"github.com/hashicorp/go-hclog"
plugin "github.com/hashicorp/go-plugin"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
)
type PluginConfig struct {
@ -32,6 +33,7 @@ func (s *DummyPlugin) Notify(ctx context.Context, notification *protobufs.Notifi
if _, ok := s.PluginConfigByName[notification.Name]; !ok {
return nil, fmt.Errorf("invalid plugin config name %s", notification.Name)
}
cfg := s.PluginConfigByName[notification.Name]
if cfg.LogLevel != nil && *cfg.LogLevel != "" {
@ -42,19 +44,22 @@ func (s *DummyPlugin) Notify(ctx context.Context, notification *protobufs.Notifi
logger.Debug(notification.Text)
if cfg.OutputFile != nil && *cfg.OutputFile != "" {
f, err := os.OpenFile(*cfg.OutputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
f, err := os.OpenFile(*cfg.OutputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
logger.Error(fmt.Sprintf("Cannot open notification file: %s", err))
}
if _, err := f.WriteString(notification.Text + "\n"); err != nil {
f.Close()
logger.Error(fmt.Sprintf("Cannot write notification to file: %s", err))
}
err = f.Close()
if err != nil {
logger.Error(fmt.Sprintf("Cannot close notification file: %s", err))
}
}
fmt.Println(notification.Text)
return &protobufs.Empty{}, nil
@ -64,11 +69,12 @@ func (s *DummyPlugin) Configure(ctx context.Context, config *protobufs.Config) (
d := PluginConfig{}
err := yaml.Unmarshal(config.Config, &d)
s.PluginConfigByName[d.Name] = d
return &protobufs.Empty{}, err
}
func main() {
var handshake = plugin.HandshakeConfig{
handshake := plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "CROWDSEC_PLUGIN_KEY",
MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"),

View file

@ -2,15 +2,17 @@ package main
import (
"context"
"errors"
"fmt"
"os"
"time"
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
"github.com/hashicorp/go-hclog"
plugin "github.com/hashicorp/go-plugin"
mail "github.com/xhit/go-simple-mail/v2"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
)
var baseLogger hclog.Logger = hclog.New(&hclog.LoggerOptions{
@ -72,19 +74,20 @@ func (n *EmailPlugin) Configure(ctx context.Context, config *protobufs.Config) (
}
if d.Name == "" {
return nil, fmt.Errorf("name is required")
return nil, errors.New("name is required")
}
if d.SMTPHost == "" {
return nil, fmt.Errorf("SMTP host is not set")
return nil, errors.New("SMTP host is not set")
}
if d.ReceiverEmails == nil || len(d.ReceiverEmails) == 0 {
return nil, fmt.Errorf("receiver emails are not set")
return nil, errors.New("receiver emails are not set")
}
n.ConfigByName[d.Name] = d
baseLogger.Debug(fmt.Sprintf("Email plugin '%s' use SMTP host '%s:%d'", d.Name, d.SMTPHost, d.SMTPPort))
return &protobufs.Empty{}, nil
}
@ -92,6 +95,7 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi
if _, ok := n.ConfigByName[notification.Name]; !ok {
return nil, fmt.Errorf("invalid plugin config name %s", notification.Name)
}
cfg := n.ConfigByName[notification.Name]
logger := baseLogger.Named(cfg.Name)
@ -117,6 +121,7 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi
server.ConnectTimeout, err = time.ParseDuration(cfg.ConnectTimeout)
if err != nil {
logger.Warn(fmt.Sprintf("invalid connect timeout '%s', using default '10s'", cfg.ConnectTimeout))
server.ConnectTimeout = 10 * time.Second
}
}
@ -125,15 +130,18 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi
server.SendTimeout, err = time.ParseDuration(cfg.SendTimeout)
if err != nil {
logger.Warn(fmt.Sprintf("invalid send timeout '%s', using default '10s'", cfg.SendTimeout))
server.SendTimeout = 10 * time.Second
}
}
logger.Debug("making smtp connection")
smtpClient, err := server.Connect()
if err != nil {
return &protobufs.Empty{}, err
}
logger.Debug("smtp connection done")
email := mail.NewMSG()
@ -146,12 +154,14 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi
if err != nil {
return &protobufs.Empty{}, err
}
logger.Info(fmt.Sprintf("sent email to %v", cfg.ReceiverEmails))
return &protobufs.Empty{}, nil
}
func main() {
var handshake = plugin.HandshakeConfig{
handshake := plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "CROWDSEC_PLUGIN_KEY",
MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"),

View file

@ -12,10 +12,11 @@ import (
"os"
"strings"
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
"github.com/hashicorp/go-hclog"
plugin "github.com/hashicorp/go-plugin"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
)
type PluginConfig struct {
@ -90,18 +91,23 @@ func getTLSClient(c *PluginConfig) error {
tlsConfig.Certificates = []tls.Certificate{cert}
}
transport := &http.Transport{
TLSClientConfig: tlsConfig,
}
if c.UnixSocket != "" {
logger.Info(fmt.Sprintf("Using socket '%s'", c.UnixSocket))
transport.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", strings.TrimSuffix(c.UnixSocket, "/"))
}
}
c.Client = &http.Client{
Transport: transport,
}
return nil
}
@ -109,6 +115,7 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific
if _, ok := s.PluginConfigByName[notification.Name]; !ok {
return nil, fmt.Errorf("invalid plugin config name %s", notification.Name)
}
cfg := s.PluginConfigByName[notification.Name]
if cfg.LogLevel != nil && *cfg.LogLevel != "" {
@ -121,11 +128,14 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific
if err != nil {
return nil, err
}
for headerName, headerValue := range cfg.Headers {
logger.Debug(fmt.Sprintf("adding header %s: %s", headerName, headerValue))
request.Header.Add(headerName, headerValue)
}
logger.Debug(fmt.Sprintf("making HTTP %s call to %s with body %s", cfg.Method, cfg.URL, notification.Text))
resp, err := cfg.Client.Do(request.WithContext(ctx))
if err != nil {
logger.Error(fmt.Sprintf("Failed to make HTTP request : %s", err))
@ -135,7 +145,7 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific
respData, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response body got error %s", err)
return nil, fmt.Errorf("failed to read response body got error %w", err)
}
logger.Debug(fmt.Sprintf("got response %s", string(respData)))
@ -143,6 +153,7 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
logger.Warn(fmt.Sprintf("HTTP server returned non 200 status code: %d", resp.StatusCode))
logger.Debug(fmt.Sprintf("HTTP server returned body: %s", string(respData)))
return &protobufs.Empty{}, nil
}
@ -151,21 +162,25 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific
func (s *HTTPPlugin) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) {
d := PluginConfig{}
err := yaml.Unmarshal(config.Config, &d)
if err != nil {
return nil, err
}
err = getTLSClient(&d)
if err != nil {
return nil, err
}
s.PluginConfigByName[d.Name] = d
logger.Debug(fmt.Sprintf("HTTP plugin '%s' use URL '%s'", d.Name, d.URL))
return &protobufs.Empty{}, err
}
func main() {
var handshake = plugin.HandshakeConfig{
handshake := plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "CROWDSEC_PLUGIN_KEY",
MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"),

View file

@ -5,12 +5,12 @@ import (
"fmt"
"os"
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
"github.com/hashicorp/go-hclog"
plugin "github.com/hashicorp/go-plugin"
"github.com/slack-go/slack"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
)
type PluginConfig struct {
@ -33,13 +33,16 @@ func (n *Notify) Notify(ctx context.Context, notification *protobufs.Notificatio
if _, ok := n.ConfigByName[notification.Name]; !ok {
return nil, fmt.Errorf("invalid plugin config name %s", notification.Name)
}
cfg := n.ConfigByName[notification.Name]
if cfg.LogLevel != nil && *cfg.LogLevel != "" {
logger.SetLevel(hclog.LevelFromString(*cfg.LogLevel))
}
logger.Info(fmt.Sprintf("found notify signal for %s config", notification.Name))
logger.Debug(fmt.Sprintf("posting to %s webhook, message %s", cfg.Webhook, notification.Text))
err := slack.PostWebhookContext(ctx, n.ConfigByName[notification.Name].Webhook, &slack.WebhookMessage{
Text: notification.Text,
})
@ -52,16 +55,19 @@ func (n *Notify) Notify(ctx context.Context, notification *protobufs.Notificatio
func (n *Notify) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) {
d := PluginConfig{}
if err := yaml.Unmarshal(config.Config, &d); err != nil {
return nil, err
}
n.ConfigByName[d.Name] = d
logger.Debug(fmt.Sprintf("Slack plugin '%s' use URL '%s'", d.Name, d.Webhook))
return &protobufs.Empty{}, nil
}
func main() {
var handshake = plugin.HandshakeConfig{
handshake := plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "CROWDSEC_PLUGIN_KEY",
MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"),

View file

@ -10,11 +10,11 @@ import (
"os"
"strings"
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
"github.com/hashicorp/go-hclog"
plugin "github.com/hashicorp/go-plugin"
"gopkg.in/yaml.v3"
"gopkg.in/yaml.v2"
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
)
var logger hclog.Logger = hclog.New(&hclog.LoggerOptions{
@ -44,6 +44,7 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio
if _, ok := s.PluginConfigByName[notification.Name]; !ok {
return &protobufs.Empty{}, fmt.Errorf("splunk invalid config name %s", notification.Name)
}
cfg := s.PluginConfigByName[notification.Name]
if cfg.LogLevel != nil && *cfg.LogLevel != "" {
@ -53,6 +54,7 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio
logger.Info(fmt.Sprintf("received notify signal for %s config", notification.Name))
p := Payload{Event: notification.Text}
data, err := json.Marshal(p)
if err != nil {
return &protobufs.Empty{}, err
@ -65,6 +67,7 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio
req.Header.Add("Authorization", fmt.Sprintf("Splunk %s", cfg.Token))
logger.Debug(fmt.Sprintf("posting event %s to %s", string(data), req.URL))
resp, err := s.Client.Do(req.WithContext(ctx))
if err != nil {
return &protobufs.Empty{}, err
@ -73,15 +76,19 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio
if resp.StatusCode != http.StatusOK {
content, err := io.ReadAll(resp.Body)
if err != nil {
return &protobufs.Empty{}, fmt.Errorf("got non 200 response and failed to read error %s", err)
return &protobufs.Empty{}, fmt.Errorf("got non 200 response and failed to read error %w", err)
}
return &protobufs.Empty{}, fmt.Errorf("got non 200 response %s", string(content))
}
respData, err := io.ReadAll(resp.Body)
if err != nil {
return &protobufs.Empty{}, fmt.Errorf("failed to read response body got error %s", err)
return &protobufs.Empty{}, fmt.Errorf("failed to read response body got error %w", err)
}
logger.Debug(fmt.Sprintf("got response %s", string(respData)))
return &protobufs.Empty{}, nil
}
@ -90,11 +97,12 @@ func (s *Splunk) Configure(ctx context.Context, config *protobufs.Config) (*prot
err := yaml.Unmarshal(config.Config, &d)
s.PluginConfigByName[d.Name] = d
logger.Debug(fmt.Sprintf("Splunk plugin '%s' use URL '%s'", d.Name, d.URL))
return &protobufs.Empty{}, err
}
func main() {
var handshake = plugin.HandshakeConfig{
handshake := plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "CROWDSEC_PLUGIN_KEY",
MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"),

View file

@ -134,6 +134,7 @@ labels:
type: apache2
```
## Recommended configuration
### Volumes
@ -145,14 +146,6 @@ to avoid losing credentials and decision data in case of container destruction a
* Acquisition: `/etc/crowdsec/acquis.d` and/or `/etc/crowdsec.acquis.yaml` (yes, they can be nested in `/etc/crowdsec`)
* Database when using SQLite (default): `/var/lib/crowdsec/data`
### Hub updates
To ensure you have the latest version of the collections, scenarios, parsers, etc., you can set the variable `DO_HUB_UPGRADE` to true.
This will perform an update/upgrade of the hub every time the container is started.
Be aware that if your container is misbehaving and caught in a restart loop, the CrowdSec hub may ban your IP for some time and your containers
will run with the version of the hub that is cached in the container's image. If you enable `DO_HUB_UPGRADE`, do it when your infrastructure is running
correctly and make sure you have some monitoring in place.
## Start a Crowdsec instance
@ -323,7 +316,7 @@ config.yaml) each time the container is run.
| `BOUNCERS_ALLOWED_OU` | bouncer-ou | OU values allowed for bouncers, separated by comma |
| | | |
| __Hub management__ | | |
| `DO_HUB_UPGRADE` | false | Force hub update / upgrade when the container starts. If for some reason the container restarts too often, it may lead to a temporary ban from hub updates. |
| `NO_HUB_UPGRADE` | false | Skip hub update / upgrade when the container starts |
| `COLLECTIONS` | | Collections to install, separated by space: `-e COLLECTIONS="crowdsecurity/linux crowdsecurity/apache2"` |
| `PARSERS` | | Parsers to install, separated by space |
| `SCENARIOS` | | Scenarios to install, separated by space |

View file

@ -304,8 +304,9 @@ conf_set_if "$PLUGIN_DIR" '.config_paths.plugin_dir = strenv(PLUGIN_DIR)'
## Install hub items
if istrue "$DO_HUB_UPGRADE"; then
cscli hub update || true
cscli hub update || true
if isfalse "$NO_HUB_UPGRADE"; then
cscli hub upgrade || true
fi

View file

@ -1,22 +0,0 @@
#!/usr/bin/env bash
set -eu
# pre-download everything but don't install anything
echo "Pre-downloading Hub content..."
types=$(cscli hub types -o raw)
for itemtype in $types; do
ALL_ITEMS=$(cscli "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name')
if [[ -n "${ALL_ITEMS}" ]]; then
#shellcheck disable=SC2086
cscli "$itemtype" install \
$ALL_ITEMS \
--download-only \
--error
fi
done
echo " done."

4
go.mod
View file

@ -1,6 +1,6 @@
module github.com/crowdsecurity/crowdsec
go 1.21
go 1.22
// Don't use the toolchain directive to avoid uncontrolled downloads during
// a build, especially in sandboxed environments (freebsd, gentoo...).
@ -27,7 +27,7 @@ require (
github.com/corazawaf/libinjection-go v0.1.2
github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607
github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26
github.com/crowdsecurity/go-cs-lib v0.0.6
github.com/crowdsecurity/go-cs-lib v0.0.10
github.com/crowdsecurity/grokky v0.2.1
github.com/crowdsecurity/machineid v1.0.2
github.com/davecgh/go-spew v1.1.1

4
go.sum
View file

@ -102,8 +102,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h
github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA=
github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU=
github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk=
github.com/crowdsecurity/go-cs-lib v0.0.6 h1:Ef6MylXe0GaJE9vrfvxEdbHb31+JUP1os+murPz7Pos=
github.com/crowdsecurity/go-cs-lib v0.0.6/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k=
github.com/crowdsecurity/go-cs-lib v0.0.10 h1:Twt/y/rYCUspGY1zxDnGurL2svRSREAz+2+puLepd9c=
github.com/crowdsecurity/go-cs-lib v0.0.10/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k=
github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4=
github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM=
github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc=

View file

@ -104,7 +104,7 @@ func LoadConsoleContext(c *csconfig.Config, hub *cwhub.Hub) error {
c.Crowdsec.ContextToSend = make(map[string][]string, 0)
if hub != nil {
items, err := hub.GetInstalledItems(cwhub.CONTEXTS)
items, err := hub.GetInstalledItemsByType(cwhub.CONTEXTS)
if err != nil {
return err
}

View file

@ -84,11 +84,16 @@ func recoverFromPanic(c *gin.Context) {
}
if brokenPipe {
log.Warningf("client %s disconnected : %s", c.ClientIP(), err)
log.Warningf("client %s disconnected: %s", c.ClientIP(), err)
c.Abort()
} else {
filename := trace.WriteStackTrace(err)
log.Warningf("client %s error : %s", c.ClientIP(), err)
log.Warningf("client %s error: %s", c.ClientIP(), err)
filename, err := trace.WriteStackTrace(err)
if err != nil {
log.Errorf("also while writing stacktrace: %s", err)
}
log.Warningf("stacktrace written to %s, please join to your issue", filename)
c.AbortWithStatus(http.StatusInternalServerError)
}

View file

@ -76,26 +76,24 @@ func (c *Config) LoadDBConfig(inCli bool) error {
if c.DbConfig.UseWal == nil {
dbDir := filepath.Dir(c.DbConfig.DbPath)
isNetwork, fsType, err := types.IsNetworkFS(dbDir)
if err != nil {
switch {
case err != nil:
log.Warnf("unable to determine if database is on network filesystem: %s", err)
log.Warning("You are using sqlite without WAL, this can have a performance impact. If you do not store the database in a network share, set db_config.use_wal to true. Set explicitly to false to disable this warning.")
return nil
}
if isNetwork {
case isNetwork:
log.Debugf("database is on network filesystem (%s), setting useWal to false", fsType)
c.DbConfig.UseWal = ptr.Of(false)
} else {
default:
log.Debugf("database is on local filesystem (%s), setting useWal to true", fsType)
c.DbConfig.UseWal = ptr.Of(true)
}
} else if *c.DbConfig.UseWal {
dbDir := filepath.Dir(c.DbConfig.DbPath)
isNetwork, fsType, err := types.IsNetworkFS(dbDir)
if err != nil {
switch {
case err != nil:
log.Warnf("unable to determine if database is on network filesystem: %s", err)
return nil
}
if isNetwork {
case isNetwork:
log.Warnf("database seems to be stored on a network share (%s), but useWal is set to true. Proceed at your own risk.", fsType)
}
}

View file

@ -214,9 +214,9 @@ func (h *Hub) GetItemFQ(itemFQName string) (*Item, error) {
return i, nil
}
// GetItemNames returns a slice of (full) item names for a given type
// GetNamesByType returns a slice of (full) item names for a given type
// (eg. for collections: crowdsecurity/apache2 crowdsecurity/nginx).
func (h *Hub) GetItemNames(itemType string) []string {
func (h *Hub) GetNamesByType(itemType string) []string {
m := h.GetItemMap(itemType)
if m == nil {
return nil
@ -230,8 +230,8 @@ func (h *Hub) GetItemNames(itemType string) []string {
return names
}
// GetAllItems returns a slice of all the items of a given type, installed or not.
func (h *Hub) GetAllItems(itemType string) ([]*Item, error) {
// GetItemsByType returns a slice of all the items of a given type, installed or not.
func (h *Hub) GetItemsByType(itemType string) ([]*Item, error) {
if !slices.Contains(ItemTypes, itemType) {
return nil, fmt.Errorf("invalid item type %s", itemType)
}
@ -250,8 +250,8 @@ func (h *Hub) GetAllItems(itemType string) ([]*Item, error) {
return ret, nil
}
// GetInstalledItems returns a slice of the installed items of a given type.
func (h *Hub) GetInstalledItems(itemType string) ([]*Item, error) {
// GetInstalledItemsByType returns a slice of the installed items of a given type.
func (h *Hub) GetInstalledItemsByType(itemType string) ([]*Item, error) {
if !slices.Contains(ItemTypes, itemType) {
return nil, fmt.Errorf("invalid item type %s", itemType)
}
@ -269,9 +269,9 @@ func (h *Hub) GetInstalledItems(itemType string) ([]*Item, error) {
return retItems, nil
}
// GetInstalledItemNames returns the names of the installed items of a given type.
func (h *Hub) GetInstalledItemNames(itemType string) ([]string, error) {
items, err := h.GetInstalledItems(itemType)
// GetInstalledNamesByType returns the names of the installed items of a given type.
func (h *Hub) GetInstalledNamesByType(itemType string) ([]string, error) {
items, err := h.GetInstalledItemsByType(itemType)
if err != nil {
return nil, err
}

View file

@ -636,14 +636,24 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [
if len(alertItem.Meta) > 0 {
metaBulk := make([]*ent.MetaCreate, len(alertItem.Meta))
for i, metaItem := range alertItem.Meta {
key := metaItem.Key
value := metaItem.Value
if len(metaItem.Value) > 4095 {
c.Log.Warningf("truncated meta %s : value too long", metaItem.Key)
value = value[:4095]
}
if len(metaItem.Key) > 255 {
c.Log.Warningf("truncated meta %s : key too long", metaItem.Key)
key = key[:255]
}
metaBulk[i] = c.Ent.Meta.Create().
SetKey(metaItem.Key).
SetValue(metaItem.Value)
SetKey(key).
SetValue(value)
}
metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(c.CTX)
if err != nil {
return nil, errors.Wrapf(BulkError, "creating alert meta: %s", err)
c.Log.Warningf("error creating alert meta: %s", err)
}
}

View file

@ -37,6 +37,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string]
if v[0] == "false" {
query = query.Where(decision.SimulatedEQ(false))
}
delete(filter, "simulated")
} else {
query = query.Where(decision.SimulatedEQ(false))
@ -49,7 +50,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string]
if err != nil {
return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err)
}
case "scopes", "scope": //Swagger mentions both of them, let's just support both to make sure we don't break anything
case "scopes", "scope": // Swagger mentions both of them, let's just support both to make sure we don't break anything
scopes := strings.Split(value[0], ",")
for i, scope := range scopes {
switch strings.ToLower(scope) {
@ -63,6 +64,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string]
scopes[i] = types.AS
}
}
query = query.Where(decision.ScopeIn(scopes...))
case "value":
query = query.Where(decision.ValueEQ(value[0]))
@ -164,11 +166,11 @@ func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) (
return data, nil
}
func (c *Client) QueryDecisionCountByScenario(filters map[string][]string) ([]*DecisionsByScenario, error) {
func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) {
query := c.Ent.Decision.Query().Where(
decision.UntilGT(time.Now().UTC()),
)
query, err := BuildDecisionRequestWithFilter(query, filters)
query, err := BuildDecisionRequestWithFilter(query, make(map[string][]string))
if err != nil {
c.Log.Warningf("QueryDecisionCountByScenario : %s", err)
@ -277,10 +279,12 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(since time.Time, filters map[
decision.CreatedAtGT(since),
decision.UntilGT(time.Now().UTC()),
)
//Allow a bouncer to ask for non-deduplicated results
// Allow a bouncer to ask for non-deduplicated results
if v, ok := filters["dedup"]; !ok || v[0] != "false" {
query = query.Where(longestDecisionForScopeTypeValue)
}
query, err := BuildDecisionRequestWithFilter(query, filters)
if err != nil {
c.Log.Warningf("QueryNewDecisionsSinceWithFilters : %s", err)
@ -294,17 +298,20 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(since time.Time, filters map[
c.Log.Warningf("QueryNewDecisionsSinceWithFilters : %s", err)
return []*ent.Decision{}, errors.Wrapf(QueryFail, "new decisions since '%s'", since.String())
}
return data, nil
}
func (c *Client) DeleteDecisionById(decisionId int) ([]*ent.Decision, error) {
toDelete, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionId)).All(c.CTX)
func (c *Client) DeleteDecisionById(decisionID int) ([]*ent.Decision, error) {
toDelete, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(c.CTX)
if err != nil {
c.Log.Warningf("DeleteDecisionById : %s", err)
return nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionId)
return nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID)
}
count, err := c.BulkDeleteDecisions(toDelete, false)
c.Log.Debugf("deleted %d decisions", count)
return toDelete, err
}
@ -317,6 +324,7 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string,
else, return bans that are *contained* by the given value (value is the outer) */
decisions := c.Ent.Decision.Query()
for param, value := range filter {
switch param {
case "contains":
@ -359,48 +367,48 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string,
} else if ip_sz == 16 {
if contains { /*decision contains {start_ip,end_ip}*/
decisions = decisions.Where(decision.And(
//matching addr size
// matching addr size
decision.IPSizeEQ(int64(ip_sz)),
decision.Or(
//decision.start_ip < query.start_ip
// decision.start_ip < query.start_ip
decision.StartIPLT(start_ip),
decision.And(
//decision.start_ip == query.start_ip
// decision.start_ip == query.start_ip
decision.StartIPEQ(start_ip),
//decision.start_suffix <= query.start_suffix
// decision.start_suffix <= query.start_suffix
decision.StartSuffixLTE(start_sfx),
)),
decision.Or(
//decision.end_ip > query.end_ip
// decision.end_ip > query.end_ip
decision.EndIPGT(end_ip),
decision.And(
//decision.end_ip == query.end_ip
// decision.end_ip == query.end_ip
decision.EndIPEQ(end_ip),
//decision.end_suffix >= query.end_suffix
// decision.end_suffix >= query.end_suffix
decision.EndSuffixGTE(end_sfx),
),
),
))
} else {
decisions = decisions.Where(decision.And(
//matching addr size
// matching addr size
decision.IPSizeEQ(int64(ip_sz)),
decision.Or(
//decision.start_ip > query.start_ip
// decision.start_ip > query.start_ip
decision.StartIPGT(start_ip),
decision.And(
//decision.start_ip == query.start_ip
// decision.start_ip == query.start_ip
decision.StartIPEQ(start_ip),
//decision.start_suffix >= query.start_suffix
// decision.start_suffix >= query.start_suffix
decision.StartSuffixGTE(start_sfx),
)),
decision.Or(
//decision.end_ip < query.end_ip
// decision.end_ip < query.end_ip
decision.EndIPLT(end_ip),
decision.And(
//decision.end_ip == query.end_ip
// decision.end_ip == query.end_ip
decision.EndIPEQ(end_ip),
//decision.end_suffix <= query.end_suffix
// decision.end_suffix <= query.end_suffix
decision.EndSuffixLTE(end_sfx),
),
),
@ -415,11 +423,13 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string,
c.Log.Warningf("DeleteDecisionsWithFilter : %s", err)
return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter")
}
count, err := c.BulkDeleteDecisions(toDelete, false)
if err != nil {
c.Log.Warningf("While deleting decisions : %s", err)
return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter")
}
return strconv.Itoa(count), toDelete, nil
}
@ -432,6 +442,7 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri
/*if contains is true, return bans that *contains* the given value (value is the inner)
else, return bans that are *contained* by the given value (value is the outer)*/
decisions := c.Ent.Decision.Query().Where(decision.UntilGT(time.Now().UTC()))
for param, value := range filter {
switch param {
case "contains":
@ -480,24 +491,24 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri
/*decision contains {start_ip,end_ip}*/
if contains {
decisions = decisions.Where(decision.And(
//matching addr size
// matching addr size
decision.IPSizeEQ(int64(ip_sz)),
decision.Or(
//decision.start_ip < query.start_ip
// decision.start_ip < query.start_ip
decision.StartIPLT(start_ip),
decision.And(
//decision.start_ip == query.start_ip
// decision.start_ip == query.start_ip
decision.StartIPEQ(start_ip),
//decision.start_suffix <= query.start_suffix
// decision.start_suffix <= query.start_suffix
decision.StartSuffixLTE(start_sfx),
)),
decision.Or(
//decision.end_ip > query.end_ip
// decision.end_ip > query.end_ip
decision.EndIPGT(end_ip),
decision.And(
//decision.end_ip == query.end_ip
// decision.end_ip == query.end_ip
decision.EndIPEQ(end_ip),
//decision.end_suffix >= query.end_suffix
// decision.end_suffix >= query.end_suffix
decision.EndSuffixGTE(end_sfx),
),
),
@ -505,24 +516,24 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri
} else {
/*decision is contained within {start_ip,end_ip}*/
decisions = decisions.Where(decision.And(
//matching addr size
// matching addr size
decision.IPSizeEQ(int64(ip_sz)),
decision.Or(
//decision.start_ip > query.start_ip
// decision.start_ip > query.start_ip
decision.StartIPGT(start_ip),
decision.And(
//decision.start_ip == query.start_ip
// decision.start_ip == query.start_ip
decision.StartIPEQ(start_ip),
//decision.start_suffix >= query.start_suffix
// decision.start_suffix >= query.start_suffix
decision.StartSuffixGTE(start_sfx),
)),
decision.Or(
//decision.end_ip < query.end_ip
// decision.end_ip < query.end_ip
decision.EndIPLT(end_ip),
decision.And(
//decision.end_ip == query.end_ip
// decision.end_ip == query.end_ip
decision.EndIPEQ(end_ip),
//decision.end_suffix <= query.end_suffix
// decision.end_suffix <= query.end_suffix
decision.EndSuffixLTE(end_sfx),
),
),
@ -531,6 +542,7 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri
} else if ip_sz != 0 {
return "0", nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz)
}
DecisionsToDelete, err := decisions.All(c.CTX)
if err != nil {
c.Log.Warningf("SoftDeleteDecisionsWithFilter : %s", err)
@ -541,13 +553,14 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri
if err != nil {
return "0", nil, errors.Wrapf(DeleteFail, "soft delete decisions with provided filter : %s", err)
}
return strconv.Itoa(count), DecisionsToDelete, err
}
// BulkDeleteDecisions set the expiration of a bulk of decisions to now() or hard deletes them.
// BulkDeleteDecisions sets the expiration of a bulk of decisions to now() or hard deletes them.
// We are doing it this way so we can return impacted decisions for sync with CAPI/PAPI
func (c *Client) BulkDeleteDecisions(decisionsToDelete []*ent.Decision, softDelete bool) (int, error) {
const bulkSize = 256 //scientifically proven to be the best value for bulk delete
const bulkSize = 256 // scientifically proven to be the best value for bulk delete
var (
nbUpdates int
@ -576,6 +589,7 @@ func (c *Client) BulkDeleteDecisions(decisionsToDelete []*ent.Decision, softDele
return totalUpdates, fmt.Errorf("hard delete decisions with provided filter: %w", err)
}
}
totalUpdates += nbUpdates
}
@ -612,6 +626,7 @@ func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) {
contains := true
decisions := c.Ent.Decision.Query()
decisions, err = applyStartIpEndIpFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx)
if err != nil {
return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter")
@ -667,6 +682,7 @@ func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz
decision.IPSizeEQ(int64(ip_sz)),
))
}
return decisions, nil
}
@ -674,24 +690,24 @@ func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz
/*decision contains {start_ip,end_ip}*/
if contains {
decisions = decisions.Where(decision.And(
//matching addr size
// matching addr size
decision.IPSizeEQ(int64(ip_sz)),
decision.Or(
//decision.start_ip < query.start_ip
// decision.start_ip < query.start_ip
decision.StartIPLT(start_ip),
decision.And(
//decision.start_ip == query.start_ip
// decision.start_ip == query.start_ip
decision.StartIPEQ(start_ip),
//decision.start_suffix <= query.start_suffix
// decision.start_suffix <= query.start_suffix
decision.StartSuffixLTE(start_sfx),
)),
decision.Or(
//decision.end_ip > query.end_ip
// decision.end_ip > query.end_ip
decision.EndIPGT(end_ip),
decision.And(
//decision.end_ip == query.end_ip
// decision.end_ip == query.end_ip
decision.EndIPEQ(end_ip),
//decision.end_suffix >= query.end_suffix
// decision.end_suffix >= query.end_suffix
decision.EndSuffixGTE(end_sfx),
),
),
@ -699,29 +715,30 @@ func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz
} else {
/*decision is contained within {start_ip,end_ip}*/
decisions = decisions.Where(decision.And(
//matching addr size
// matching addr size
decision.IPSizeEQ(int64(ip_sz)),
decision.Or(
//decision.start_ip > query.start_ip
// decision.start_ip > query.start_ip
decision.StartIPGT(start_ip),
decision.And(
//decision.start_ip == query.start_ip
// decision.start_ip == query.start_ip
decision.StartIPEQ(start_ip),
//decision.start_suffix >= query.start_suffix
// decision.start_suffix >= query.start_suffix
decision.StartSuffixGTE(start_sfx),
)),
decision.Or(
//decision.end_ip < query.end_ip
// decision.end_ip < query.end_ip
decision.EndIPLT(end_ip),
decision.And(
//decision.end_ip == query.end_ip
// decision.end_ip == query.end_ip
decision.EndIPEQ(end_ip),
//decision.end_suffix <= query.end_suffix
// decision.end_suffix <= query.end_suffix
decision.EndSuffixLTE(end_sfx),
),
),
))
}
return decisions, nil
}
@ -735,8 +752,10 @@ func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz
func decisionPredicatesFromStr(s string, predicateFunc func(string) predicate.Decision) []predicate.Decision {
words := strings.Split(s, ",")
predicates := make([]predicate.Decision, len(words))
for i, word := range words {
predicates[i] = predicateFunc(word)
}
return predicates
}

View file

@ -22,69 +22,70 @@ import (
type Node struct {
FormatVersion string `yaml:"format"`
//Enable config + runtime debug of node via config o/
// Enable config + runtime debug of node via config o/
Debug bool `yaml:"debug,omitempty"`
//If enabled, the node (and its child) will report their own statistics
// If enabled, the node (and its child) will report their own statistics
Profiling bool `yaml:"profiling,omitempty"`
//Name, author, description and reference(s) for parser pattern
// Name, author, description and reference(s) for parser pattern
Name string `yaml:"name,omitempty"`
Author string `yaml:"author,omitempty"`
Description string `yaml:"description,omitempty"`
References []string `yaml:"references,omitempty"`
//if debug is present in the node, keep its specific Logger in runtime structure
// if debug is present in the node, keep its specific Logger in runtime structure
Logger *log.Entry `yaml:"-"`
//This is mostly a hack to make writing less repetitive.
//relying on stage, we know which field to parse, and we
//can also promote log to next stage on success
// This is mostly a hack to make writing less repetitive.
// relying on stage, we know which field to parse, and we
// can also promote log to next stage on success
Stage string `yaml:"stage,omitempty"`
//OnSuccess allows to tag a node to be able to move log to next stage on success
// OnSuccess allows to tag a node to be able to move log to next stage on success
OnSuccess string `yaml:"onsuccess,omitempty"`
rn string //this is only for us in debug, a random generated name for each node
//Filter is executed at runtime (with current log line as context)
//and must succeed or node is exited
rn string // this is only for us in debug, a random generated name for each node
// Filter is executed at runtime (with current log line as context)
// and must succeed or node is exited
Filter string `yaml:"filter,omitempty"`
RunTimeFilter *vm.Program `yaml:"-" json:"-"` //the actual compiled filter
//If node has leafs, execute all of them until one asks for a 'break'
RunTimeFilter *vm.Program `yaml:"-" json:"-"` // the actual compiled filter
// If node has leafs, execute all of them until one asks for a 'break'
LeavesNodes []Node `yaml:"nodes,omitempty"`
//Flag used to describe when to 'break' or return an 'error'
// Flag used to describe when to 'break' or return an 'error'
EnrichFunctions EnricherCtx
/* If the node is actually a leaf, it can have : grok, enrich, statics */
//pattern_syntax are named grok patterns that are re-utilized over several grok patterns
// pattern_syntax are named grok patterns that are re-utilized over several grok patterns
SubGroks yaml.MapSlice `yaml:"pattern_syntax,omitempty"`
//Holds a grok pattern
// Holds a grok pattern
Grok GrokPattern `yaml:"grok,omitempty"`
//Statics can be present in any type of node and is executed last
// Statics can be present in any type of node and is executed last
Statics []ExtraField `yaml:"statics,omitempty"`
//Stash allows to capture data from the log line and store it in an accessible cache
// Stash allows to capture data from the log line and store it in an accessible cache
Stash []DataCapture `yaml:"stash,omitempty"`
//Whitelists
// Whitelists
Whitelist Whitelist `yaml:"whitelist,omitempty"`
Data []*types.DataSource `yaml:"data,omitempty"`
}
func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error {
//stage is being set automagically
// stage is being set automagically
if n.Stage == "" {
return fmt.Errorf("stage needs to be an existing stage")
return errors.New("stage needs to be an existing stage")
}
/* "" behaves like continue */
if n.OnSuccess != "continue" && n.OnSuccess != "next_stage" && n.OnSuccess != "" {
return fmt.Errorf("onsuccess '%s' not continue,next_stage", n.OnSuccess)
}
if n.Filter != "" && n.RunTimeFilter == nil {
return fmt.Errorf("non-empty filter '%s' was not compiled", n.Filter)
}
if n.Grok.RunTimeRegexp != nil || n.Grok.TargetField != "" {
if n.Grok.TargetField == "" && n.Grok.ExpValue == "" {
return fmt.Errorf("grok requires 'expression' or 'apply_on'")
return errors.New("grok requires 'expression' or 'apply_on'")
}
if n.Grok.RegexpName == "" && n.Grok.RegexpValue == "" {
return fmt.Errorf("grok needs 'pattern' or 'name'")
return errors.New("grok needs 'pattern' or 'name'")
}
}
@ -93,6 +94,7 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error {
if static.ExpValue == "" {
return fmt.Errorf("static %d : when method is set, expression must be present", idx)
}
if _, ok := ectx.Registered[static.Method]; !ok {
log.Warningf("the method '%s' doesn't exist or the plugin has not been initialized", static.Method)
}
@ -100,6 +102,7 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error {
if static.Meta == "" && static.Parsed == "" && static.TargetByName == "" {
return fmt.Errorf("static %d : at least one of meta/event/target must be set", idx)
}
if static.Value == "" && static.RunTimeValue == nil {
return fmt.Errorf("static %d value or expression must be set", idx)
}
@ -110,72 +113,76 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error {
if stash.Name == "" {
return fmt.Errorf("stash %d : name must be set", idx)
}
if stash.Value == "" {
return fmt.Errorf("stash %s : value expression must be set", stash.Name)
}
if stash.Key == "" {
return fmt.Errorf("stash %s : key expression must be set", stash.Name)
}
if stash.TTL == "" {
return fmt.Errorf("stash %s : ttl must be set", stash.Name)
}
if stash.Strategy == "" {
stash.Strategy = "LRU"
}
//should be configurable
// should be configurable
if stash.MaxMapSize == 0 {
stash.MaxMapSize = 100
}
}
return nil
}
func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) {
var NodeState bool
var NodeHasOKGrok bool
func (n *Node) processFilter(cachedExprEnv map[string]interface{}) (bool, error) {
clog := n.Logger
cachedExprEnv := expressionEnv
clog.Tracef("Event entering node")
if n.RunTimeFilter != nil {
//Evaluate node's filter
output, err := exprhelpers.Run(n.RunTimeFilter, cachedExprEnv, clog, n.Debug)
if err != nil {
clog.Warningf("failed to run filter : %v", err)
clog.Debugf("Event leaving node : ko")
return false, nil
}
switch out := output.(type) {
case bool:
if !out {
clog.Debugf("Event leaving node : ko (failed filter)")
return false, nil
}
default:
clog.Warningf("Expr '%s' returned non-bool, abort : %T", n.Filter, output)
clog.Debugf("Event leaving node : ko")
return false, nil
}
NodeState = true
} else {
if n.RunTimeFilter == nil {
clog.Tracef("Node has not filter, enter")
NodeState = true
return true, nil
}
if n.Name != "" {
NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
// Evaluate node's filter
output, err := exprhelpers.Run(n.RunTimeFilter, cachedExprEnv, clog, n.Debug)
if err != nil {
clog.Warningf("failed to run filter : %v", err)
clog.Debugf("Event leaving node : ko")
return false, nil
}
exprErr := error(nil)
switch out := output.(type) {
case bool:
if !out {
clog.Debugf("Event leaving node : ko (failed filter)")
return false, nil
}
default:
clog.Warningf("Expr '%s' returned non-bool, abort : %T", n.Filter, output)
clog.Debugf("Event leaving node : ko")
return false, nil
}
return true, nil
}
func (n *Node) processWhitelist(cachedExprEnv map[string]interface{}, p *types.Event) (bool, error) {
var exprErr error
isWhitelisted := n.CheckIPsWL(p)
if !isWhitelisted {
isWhitelisted, exprErr = n.CheckExprWL(cachedExprEnv, p)
}
if exprErr != nil {
// Previous code returned nil if there was an error, so we keep this behavior
return false, nil //nolint:nilerr
}
if isWhitelisted && !p.Whitelisted {
p.Whitelisted = true
p.WhitelistReason = n.Whitelist.Reason
@ -185,18 +192,51 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
for k := range p.Overflow.Sources {
ips = append(ips, k)
}
clog.Infof("Ban for %s whitelisted, reason [%s]", strings.Join(ips, ","), n.Whitelist.Reason)
n.Logger.Infof("Ban for %s whitelisted, reason [%s]", strings.Join(ips, ","), n.Whitelist.Reason)
p.Overflow.Whitelisted = true
}
}
//Process grok if present, should be exclusive with nodes :)
return isWhitelisted, nil
}
func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) {
var NodeHasOKGrok bool
clog := n.Logger
cachedExprEnv := expressionEnv
clog.Tracef("Event entering node")
NodeState, err := n.processFilter(cachedExprEnv)
if err != nil {
return false, err
}
if !NodeState {
return false, nil
}
if n.Name != "" {
NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
}
isWhitelisted, err := n.processWhitelist(cachedExprEnv, p)
if err != nil {
return false, err
}
// Process grok if present, should be exclusive with nodes :)
gstr := ""
if n.Grok.RunTimeRegexp != nil {
clog.Tracef("Processing grok pattern : %s : %p", n.Grok.RegexpName, n.Grok.RunTimeRegexp)
//for unparsed, parsed etc. set sensible defaults to reduce user hassle
// for unparsed, parsed etc. set sensible defaults to reduce user hassle
if n.Grok.TargetField != "" {
//it's a hack to avoid using real reflect
// it's a hack to avoid using real reflect
if n.Grok.TargetField == "Line.Raw" {
gstr = p.Line.Raw
} else if val, ok := p.Parsed[n.Grok.TargetField]; ok {
@ -211,6 +251,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
clog.Warningf("failed to run RunTimeValue : %v", err)
NodeState = false
}
switch out := output.(type) {
case string:
gstr = out
@ -229,12 +270,14 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
} else {
groklabel = n.Grok.RegexpName
}
grok := n.Grok.RunTimeRegexp.Parse(gstr)
if len(grok) > 0 {
/*tag explicitly that the *current* node had a successful grok pattern. it's important to know success state*/
NodeHasOKGrok = true
clog.Debugf("+ Grok '%s' returned %d entries to merge in Parsed", groklabel, len(grok))
//We managed to grok stuff, merged into parse
// We managed to grok stuff, merged into parse
for k, v := range grok {
clog.Debugf("\t.Parsed['%s'] = '%s'", k, v)
p.Parsed[k] = v
@ -246,34 +289,37 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
return false, err
}
} else {
//grok failed, node failed
// grok failed, node failed
clog.Debugf("+ Grok '%s' didn't return data on '%s'", groklabel, gstr)
NodeState = false
}
} else {
clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp)
}
//Process the stash (data collection) if : a grok was present and succeeded, or if there is no grok
// Process the stash (data collection) if : a grok was present and succeeded, or if there is no grok
if NodeHasOKGrok || n.Grok.RunTimeRegexp == nil {
for idx, stash := range n.Stash {
var value string
var key string
var (
key string
value string
)
if stash.ValueExpression == nil {
clog.Warningf("Stash %d has no value expression, skipping", idx)
continue
}
if stash.KeyExpression == nil {
clog.Warningf("Stash %d has no key expression, skipping", idx)
continue
}
//collect the data
// collect the data
output, err := exprhelpers.Run(stash.ValueExpression, cachedExprEnv, clog, n.Debug)
if err != nil {
clog.Warningf("Error while running stash val expression : %v", err)
}
//can we expect anything else than a string ?
// can we expect anything else than a string ?
switch output := output.(type) {
case string:
value = output
@ -282,12 +328,12 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
continue
}
//collect the key
// collect the key
output, err = exprhelpers.Run(stash.KeyExpression, cachedExprEnv, clog, n.Debug)
if err != nil {
clog.Warningf("Error while running stash key expression : %v", err)
}
//can we expect anything else than a string ?
// can we expect anything else than a string ?
switch output := output.(type) {
case string:
key = output
@ -299,7 +345,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
}
}
//Iterate on leafs
// Iterate on leafs
for _, leaf := range n.LeavesNodes {
ret, err := leaf.process(p, ctx, cachedExprEnv)
if err != nil {
@ -307,7 +353,9 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
clog.Debugf("Event leaving node : ko")
return false, err
}
clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess)
if ret {
NodeState = true
/* if child is successful, stop processing */
@ -328,12 +376,14 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
clog.Tracef("State after nodes : %v", NodeState)
//grok or leafs failed, don't process statics
// grok or leafs failed, don't process statics
if !NodeState {
if n.Name != "" {
NodesHitsKo.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
}
clog.Debugf("Event leaving node : ko")
return NodeState, nil
}
@ -360,9 +410,10 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
if NodeState {
clog.Debugf("Event leaving node : ok")
log.Tracef("node is successful, check strategy")
if n.OnSuccess == "next_stage" {
idx := stageidx(p.Stage, ctx.Stages)
//we're at the last stage
// we're at the last stage
if idx+1 == len(ctx.Stages) {
clog.Debugf("node reached the last stage : %s", p.Stage)
} else {
@ -375,15 +426,16 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
} else {
clog.Debugf("Event leaving node : ko")
}
clog.Tracef("Node successful, continue")
return NodeState, nil
}
func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
var err error
var valid bool
valid = false
valid := false
dumpr := spew.ConfigState{MaxDepth: 1, DisablePointerAddresses: true}
n.rn = seed.Generate()
@ -393,10 +445,11 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
/* if the node has debugging enabled, create a specific logger with debug
that will be used only for processing this node ;) */
if n.Debug {
var clog = log.New()
clog := log.New()
if err = types.ConfigureLogger(clog); err != nil {
log.Fatalf("While creating bucket-specific logger : %s", err)
}
clog.SetLevel(log.DebugLevel)
n.Logger = clog.WithFields(log.Fields{
"id": n.rn,
@ -414,7 +467,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
n.Logger.Tracef("Compiling : %s", dumpr.Sdump(n))
//compile filter if present
// compile filter if present
if n.Filter != "" {
n.RunTimeFilter, err = expr.Compile(n.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
if err != nil {
@ -425,12 +478,15 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
/* handle pattern_syntax and groks */
for _, pattern := range n.SubGroks {
n.Logger.Tracef("Adding subpattern '%s' : '%s'", pattern.Key, pattern.Value)
if err = pctx.Grok.Add(pattern.Key.(string), pattern.Value.(string)); err != nil {
if errors.Is(err, grokky.ErrAlreadyExist) {
n.Logger.Warningf("grok '%s' already registred", pattern.Key)
continue
}
n.Logger.Errorf("Unable to compile subpattern %s : %v", pattern.Key, err)
return err
}
}
@ -438,28 +494,36 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
/* load grok by name or compile in-place */
if n.Grok.RegexpName != "" {
n.Logger.Tracef("+ Regexp Compilation '%s'", n.Grok.RegexpName)
n.Grok.RunTimeRegexp, err = pctx.Grok.Get(n.Grok.RegexpName)
if err != nil {
return fmt.Errorf("unable to find grok '%s' : %v", n.Grok.RegexpName, err)
}
if n.Grok.RunTimeRegexp == nil {
return fmt.Errorf("empty grok '%s'", n.Grok.RegexpName)
}
n.Logger.Tracef("%s regexp: %s", n.Grok.RegexpName, n.Grok.RunTimeRegexp.String())
valid = true
} else if n.Grok.RegexpValue != "" {
if strings.HasSuffix(n.Grok.RegexpValue, "\n") {
n.Logger.Debugf("Beware, pattern ends with \\n : '%s'", n.Grok.RegexpValue)
}
n.Grok.RunTimeRegexp, err = pctx.Grok.Compile(n.Grok.RegexpValue)
if err != nil {
return fmt.Errorf("failed to compile grok '%s': %v", n.Grok.RegexpValue, err)
}
if n.Grok.RunTimeRegexp == nil {
// We shouldn't be here because compilation succeeded, so regexp shouldn't be nil
return fmt.Errorf("grok compilation failure: %s", n.Grok.RegexpValue)
}
n.Logger.Tracef("%s regexp : %s", n.Grok.RegexpValue, n.Grok.RunTimeRegexp.String())
valid = true
}
@ -473,7 +537,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
}
/* load grok statics */
//compile expr statics if present
// compile expr statics if present
for idx := range n.Grok.Statics {
if n.Grok.Statics[idx].ExpValue != "" {
n.Grok.Statics[idx].RunTimeValue, err = expr.Compile(n.Grok.Statics[idx].ExpValue,
@ -482,6 +546,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
return err
}
}
valid = true
}
@ -505,7 +570,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
}
logLvl := n.Logger.Logger.GetLevel()
//init the cache, does it make sense to create it here just to be sure everything is fine ?
// init the cache, does it make sense to create it here just to be sure everything is fine ?
if err = cache.CacheInit(cache.CacheCfg{
Size: n.Stash[i].MaxMapSize,
TTL: n.Stash[i].TTLVal,
@ -526,14 +591,18 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
if !n.LeavesNodes[idx].Debug && n.Debug {
n.LeavesNodes[idx].Debug = true
}
if !n.LeavesNodes[idx].Profiling && n.Profiling {
n.LeavesNodes[idx].Profiling = true
}
n.LeavesNodes[idx].Stage = n.Stage
err = n.LeavesNodes[idx].compile(pctx, ectx)
if err != nil {
return err
}
valid = true
}
@ -546,6 +615,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
return err
}
}
valid = true
}
@ -554,13 +624,15 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
if err != nil {
return err
}
valid = valid || whitelistValid
if !valid {
/* node is empty, error force return */
n.Logger.Error("Node is empty or invalid, abort")
n.Stage = ""
return fmt.Errorf("Node is empty")
return errors.New("Node is empty")
}
if err := n.validate(pctx, ectx); err != nil {

View file

@ -1,9 +1,10 @@
//go:build !windows
//go:build !windows && !freebsd
package types
import (
"fmt"
"golang.org/x/sys/unix"
)
@ -92,6 +93,7 @@ var fsTypeMapping map[int64]string = map[int64]string{
0xabba1974: "xenfs",
0x012ff7b4: "xenix",
0x58465342: "xfs",
0x2fc12fc1: "zfs",
}
func GetFSType(path string) (string, error) {

View file

@ -0,0 +1,25 @@
//go:build freebsd
package types
import (
"fmt"
"syscall"
)
func GetFSType(path string) (string, error) {
var fsStat syscall.Statfs_t
if err := syscall.Statfs(path, &fsStat); err != nil {
return "", fmt.Errorf("failed to get filesystem type: %w", err)
}
bs := fsStat.Fstypename
b := make([]byte, len(bs))
for i, v := range bs {
b[i] = byte(v)
}
return string(b), nil
}

View file

@ -66,11 +66,11 @@ bats-check-requirements: ## Check dependencies for functional tests
@$(TEST_DIR)/bin/check-requirements
bats-update-tools: ## Install/update tools required for functional tests
# yq v4.40.4
GOBIN=$(TEST_DIR)/tools go install github.com/mikefarah/yq/v4@1c3d55106075bd37df197b4bc03cb4a413fdb903
# cfssl v1.6.4
GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssl@b4d0d877cac528f63db39dfb62d5c96cd3a32a0b
GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssljson@b4d0d877cac528f63db39dfb62d5c96cd3a32a0b
# yq v4.43.1
GOBIN=$(TEST_DIR)/tools go install github.com/mikefarah/yq/v4@c35ec752e38ea0c096d3c44e13cfc0797ac394d8
# cfssl v1.6.5
GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssl@96259aa29c9cc9b2f4e04bad7d4bc152e5405dda
GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssljson@96259aa29c9cc9b2f4e04bad7d4bc152e5405dda
# Build and installs crowdsec in a local directory. Rebuilds if already exists.
bats-build: bats-environment ## Build binaries for functional tests

View file

@ -9,12 +9,20 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd)
# pre-download everything but don't install anything
echo "Pre-downloading Hub content..."
echo -n "Purging existing hub..."
types=$("$CSCLI" hub types -o raw)
for itemtype in $types; do
ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name')
"$CSCLI" "${itemtype}" delete --all --error --purge --force
done
echo " done."
echo -n "Pre-downloading Hub content..."
for itemtype in $types; do
ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | jq --arg itemtype "$itemtype" -r '.[$itemtype][].name')
if [[ -n "${ALL_ITEMS}" ]]; then
#shellcheck disable=SC2086
"$CSCLI" "$itemtype" install \
@ -24,4 +32,11 @@ for itemtype in $types; do
fi
done
# XXX: download-only works only for collections, not for parsers, scenarios, postoverflows.
# so we have to delete the links manually, and leave the downloaded files in place
for itemtype in $types; do
"$CSCLI" "$itemtype" delete --all --error
done
echo " done."