Compare commits
11 commits
master
...
1.6.2-down
Author | SHA1 | Date | |
---|---|---|---|
|
1cb7ef1a56 | ||
|
c07835fab3 | ||
|
aebf85613e | ||
|
f602c8c31e | ||
|
3bd5eac61a | ||
|
adbd55a9be | ||
|
4167a1d376 | ||
|
e907fbfb10 | ||
|
596eac6616 | ||
|
cbc0d8550d | ||
|
688fd64a93 |
49 changed files with 316 additions and 666 deletions
2
.github/workflows/bats-hub.yml
vendored
2
.github/workflows/bats-hub.yml
vendored
|
@ -33,7 +33,7 @@ jobs:
|
|||
- name: "Set up Go"
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22.2"
|
||||
go-version: "1.21.9"
|
||||
|
||||
- name: "Install bats dependencies"
|
||||
env:
|
||||
|
|
2
.github/workflows/bats-mysql.yml
vendored
2
.github/workflows/bats-mysql.yml
vendored
|
@ -36,7 +36,7 @@ jobs:
|
|||
- name: "Set up Go"
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22.2"
|
||||
go-version: "1.21.9"
|
||||
|
||||
- name: "Install bats dependencies"
|
||||
env:
|
||||
|
|
2
.github/workflows/bats-postgres.yml
vendored
2
.github/workflows/bats-postgres.yml
vendored
|
@ -45,7 +45,7 @@ jobs:
|
|||
- name: "Set up Go"
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22.2"
|
||||
go-version: "1.21.9"
|
||||
|
||||
- name: "Install bats dependencies"
|
||||
env:
|
||||
|
|
2
.github/workflows/bats-sqlite-coverage.yml
vendored
2
.github/workflows/bats-sqlite-coverage.yml
vendored
|
@ -28,7 +28,7 @@ jobs:
|
|||
- name: "Set up Go"
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22.2"
|
||||
go-version: "1.21.9"
|
||||
|
||||
- name: "Install bats dependencies"
|
||||
env:
|
||||
|
|
2
.github/workflows/ci-windows-build-msi.yml
vendored
2
.github/workflows/ci-windows-build-msi.yml
vendored
|
@ -35,7 +35,7 @@ jobs:
|
|||
- name: "Set up Go"
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22.2"
|
||||
go-version: "1.21.9"
|
||||
|
||||
- name: Build
|
||||
run: make windows_installer BUILD_RE2_WASM=1
|
||||
|
|
2
.github/workflows/codeql-analysis.yml
vendored
2
.github/workflows/codeql-analysis.yml
vendored
|
@ -52,7 +52,7 @@ jobs:
|
|||
- name: "Set up Go"
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22.2"
|
||||
go-version: "1.21.9"
|
||||
cache-dependency-path: "**/go.sum"
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
|
|
2
.github/workflows/go-tests-windows.yml
vendored
2
.github/workflows/go-tests-windows.yml
vendored
|
@ -34,7 +34,7 @@ jobs:
|
|||
- name: "Set up Go"
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22.2"
|
||||
go-version: "1.21.9"
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
|
|
2
.github/workflows/go-tests.yml
vendored
2
.github/workflows/go-tests.yml
vendored
|
@ -126,7 +126,7 @@ jobs:
|
|||
- name: "Set up Go"
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22.2"
|
||||
go-version: "1.21.9"
|
||||
|
||||
- name: Create localstack streams
|
||||
run: |
|
||||
|
|
|
@ -25,7 +25,7 @@ jobs:
|
|||
- name: "Set up Go"
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22.2"
|
||||
go-version: "1.21.9"
|
||||
|
||||
- name: Build the binaries
|
||||
run: |
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
linters-settings:
|
||||
cyclop:
|
||||
# lower this after refactoring
|
||||
max-complexity: 48
|
||||
max-complexity: 53
|
||||
|
||||
gci:
|
||||
sections:
|
||||
|
@ -22,7 +22,7 @@ linters-settings:
|
|||
|
||||
gocyclo:
|
||||
# lower this after refactoring
|
||||
min-complexity: 48
|
||||
min-complexity: 49
|
||||
|
||||
funlen:
|
||||
# Checks the number of lines in a function.
|
||||
|
@ -82,6 +82,18 @@ linters-settings:
|
|||
- "!**/pkg/apiserver/controllers/v1/errors.go"
|
||||
yaml:
|
||||
files:
|
||||
- "!**/cmd/crowdsec-cli/alerts.go"
|
||||
- "!**/cmd/crowdsec-cli/capi.go"
|
||||
- "!**/cmd/crowdsec-cli/config_show.go"
|
||||
- "!**/cmd/crowdsec-cli/hubtest.go"
|
||||
- "!**/cmd/crowdsec-cli/lapi.go"
|
||||
- "!**/cmd/crowdsec-cli/simulation.go"
|
||||
- "!**/cmd/crowdsec/crowdsec.go"
|
||||
- "!**/cmd/notification-dummy/main.go"
|
||||
- "!**/cmd/notification-email/main.go"
|
||||
- "!**/cmd/notification-http/main.go"
|
||||
- "!**/cmd/notification-slack/main.go"
|
||||
- "!**/cmd/notification-splunk/main.go"
|
||||
- "!**/pkg/acquisition/acquisition.go"
|
||||
- "!**/pkg/acquisition/acquisition_test.go"
|
||||
- "!**/pkg/acquisition/modules/appsec/appsec.go"
|
||||
|
@ -139,13 +151,6 @@ linters:
|
|||
- structcheck
|
||||
- varcheck
|
||||
|
||||
#
|
||||
# Disabled until fixed for go 1.22
|
||||
#
|
||||
|
||||
- copyloopvar # copyloopvar is a linter detects places where loop variables are copied
|
||||
- intrange # intrange is a linter to find places where for loops could make use of an integer range.
|
||||
|
||||
#
|
||||
# Enabled
|
||||
#
|
||||
|
@ -154,6 +159,7 @@ linters:
|
|||
# - asciicheck # checks that all code identifiers does not have non-ASCII symbols in the name
|
||||
# - bidichk # Checks for dangerous unicode character sequences
|
||||
# - bodyclose # checks whether HTTP response body is closed successfully
|
||||
# - copyloopvar # copyloopvar is a linter detects places where loop variables are copied
|
||||
# - cyclop # checks function and package cyclomatic complexity
|
||||
# - decorder # check declaration order and count of types, constants, variables and functions
|
||||
# - depguard # Go linter that checks if package imports are in a list of acceptable packages
|
||||
|
@ -182,6 +188,7 @@ linters:
|
|||
# - importas # Enforces consistent import aliases
|
||||
# - ineffassign # Detects when assignments to existing variables are not used
|
||||
# - interfacebloat # A linter that checks the number of methods inside an interface.
|
||||
# - intrange # intrange is a linter to find places where for loops could make use of an integer range.
|
||||
# - lll # Reports long lines
|
||||
# - loggercheck # (logrlint): Checks key value pairs for common logger libraries (kitlog,klog,logr,zap).
|
||||
# - logrlint # Check logr arguments.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# vim: set ft=dockerfile:
|
||||
FROM golang:1.22.2-alpine3.18 AS build
|
||||
FROM golang:1.21.9-alpine3.18 AS build
|
||||
|
||||
ARG BUILD_VERSION
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# vim: set ft=dockerfile:
|
||||
FROM golang:1.22.2-bookworm AS build
|
||||
FROM golang:1.21.9-bookworm AS build
|
||||
|
||||
ARG BUILD_VERSION
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ stages:
|
|||
- task: GoTool@0
|
||||
displayName: "Install Go"
|
||||
inputs:
|
||||
version: '1.22.2'
|
||||
version: '1.21.9'
|
||||
|
||||
- pwsh: |
|
||||
choco install -y make
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/go-openapi/strfmt"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/crowdsecurity/go-cs-lib/version"
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/go-openapi/strfmt"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/crowdsecurity/go-cs-lib/version"
|
||||
|
||||
|
@ -85,6 +85,7 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error {
|
|||
URL: apiurl,
|
||||
VersionPrefix: CAPIURLPrefix,
|
||||
}, nil)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("api client register ('%s'): %w", types.CAPIBaseURL, err)
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"github.com/fatih/color"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/dumps"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/emoji"
|
||||
|
@ -136,7 +136,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios
|
|||
nucleiFileName := fmt.Sprintf("%s.yaml", testName)
|
||||
nucleiFilePath := filepath.Join(testPath, nucleiFileName)
|
||||
|
||||
nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0o755)
|
||||
nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -473,11 +473,22 @@ func (cli cliItem) itemDiff(item *cwhub.Item, reverse bool) (string, error) {
|
|||
return "", fmt.Errorf("'%s' is not installed", item.FQName())
|
||||
}
|
||||
|
||||
latestContent, remoteURL, err := item.FetchLatest()
|
||||
dest, err := os.CreateTemp("", "cscli-diff-*")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("while creating temporary file: %w", err)
|
||||
}
|
||||
defer os.Remove(dest.Name())
|
||||
|
||||
_, remoteURL, err := item.FetchContentTo(dest.Name())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
latestContent, err := os.ReadFile(dest.Name())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("while reading %s: %w", dest.Name(), err)
|
||||
}
|
||||
|
||||
localContent, err := os.ReadFile(item.State.LocalPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("while reading %s: %w", item.State.LocalPath, err)
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/go-openapi/strfmt"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/crowdsecurity/go-cs-lib/version"
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
|
||||
|
@ -74,7 +74,7 @@ func (cli *cliSimulation) NewEnableCmd() *cobra.Command {
|
|||
|
||||
if len(args) > 0 {
|
||||
for _, scenario := range args {
|
||||
item := hub.GetItem(cwhub.SCENARIOS, scenario)
|
||||
var item = hub.GetItem(cwhub.SCENARIOS, scenario)
|
||||
if item == nil {
|
||||
log.Errorf("'%s' doesn't exist or is not a scenario", scenario)
|
||||
continue
|
||||
|
|
|
@ -319,7 +319,7 @@ cscli support dump -f /tmp/crowdsec-support.zip
|
|||
`,
|
||||
Args: cobra.NoArgs,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
Run: func(_ *cobra.Command, _ []string) {
|
||||
var err error
|
||||
var skipHub, skipDB, skipCAPI, skipLAPI, skipAgent bool
|
||||
infos := map[string][]byte{
|
||||
|
@ -473,19 +473,15 @@ cscli support dump -f /tmp/crowdsec-support.zip
|
|||
|
||||
err = zipWriter.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not finalize zip file: %s", err)
|
||||
log.Fatalf("could not finalize zip file: %s", err)
|
||||
}
|
||||
|
||||
if outFile == "-" {
|
||||
_, err = os.Stdout.Write(w.Bytes())
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(outFile, w.Bytes(), 0o600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not write zip file to %s: %s", outFile, err)
|
||||
log.Fatalf("could not write zip file to %s: %s", outFile, err)
|
||||
}
|
||||
|
||||
log.Infof("Written zip file to %s", outFile)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v3"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/crowdsecurity/go-cs-lib/trace"
|
||||
|
||||
|
@ -207,7 +207,7 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub
|
|||
}
|
||||
|
||||
func dumpBucketsPour() {
|
||||
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucketpour-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666)
|
||||
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucketpour-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
log.Fatalf("open: %s", err)
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ func dumpBucketsPour() {
|
|||
}
|
||||
|
||||
func dumpParserState() {
|
||||
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666)
|
||||
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
log.Fatalf("open: %s", err)
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ func dumpParserState() {
|
|||
}
|
||||
|
||||
func dumpOverflowState() {
|
||||
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666)
|
||||
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
log.Fatalf("open: %s", err)
|
||||
}
|
||||
|
|
|
@ -5,11 +5,10 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type PluginConfig struct {
|
||||
|
@ -33,7 +32,6 @@ func (s *DummyPlugin) Notify(ctx context.Context, notification *protobufs.Notifi
|
|||
if _, ok := s.PluginConfigByName[notification.Name]; !ok {
|
||||
return nil, fmt.Errorf("invalid plugin config name %s", notification.Name)
|
||||
}
|
||||
|
||||
cfg := s.PluginConfigByName[notification.Name]
|
||||
|
||||
if cfg.LogLevel != nil && *cfg.LogLevel != "" {
|
||||
|
@ -44,22 +42,19 @@ func (s *DummyPlugin) Notify(ctx context.Context, notification *protobufs.Notifi
|
|||
logger.Debug(notification.Text)
|
||||
|
||||
if cfg.OutputFile != nil && *cfg.OutputFile != "" {
|
||||
f, err := os.OpenFile(*cfg.OutputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
|
||||
f, err := os.OpenFile(*cfg.OutputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
logger.Error(fmt.Sprintf("Cannot open notification file: %s", err))
|
||||
}
|
||||
|
||||
if _, err := f.WriteString(notification.Text + "\n"); err != nil {
|
||||
f.Close()
|
||||
logger.Error(fmt.Sprintf("Cannot write notification to file: %s", err))
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
logger.Error(fmt.Sprintf("Cannot close notification file: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println(notification.Text)
|
||||
|
||||
return &protobufs.Empty{}, nil
|
||||
|
@ -69,12 +64,11 @@ func (s *DummyPlugin) Configure(ctx context.Context, config *protobufs.Config) (
|
|||
d := PluginConfig{}
|
||||
err := yaml.Unmarshal(config.Config, &d)
|
||||
s.PluginConfigByName[d.Name] = d
|
||||
|
||||
return &protobufs.Empty{}, err
|
||||
}
|
||||
|
||||
func main() {
|
||||
handshake := plugin.HandshakeConfig{
|
||||
var handshake = plugin.HandshakeConfig{
|
||||
ProtocolVersion: 1,
|
||||
MagicCookieKey: "CROWDSEC_PLUGIN_KEY",
|
||||
MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"),
|
||||
|
|
|
@ -2,17 +2,15 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
mail "github.com/xhit/go-simple-mail/v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var baseLogger hclog.Logger = hclog.New(&hclog.LoggerOptions{
|
||||
|
@ -74,20 +72,19 @@ func (n *EmailPlugin) Configure(ctx context.Context, config *protobufs.Config) (
|
|||
}
|
||||
|
||||
if d.Name == "" {
|
||||
return nil, errors.New("name is required")
|
||||
return nil, fmt.Errorf("name is required")
|
||||
}
|
||||
|
||||
if d.SMTPHost == "" {
|
||||
return nil, errors.New("SMTP host is not set")
|
||||
return nil, fmt.Errorf("SMTP host is not set")
|
||||
}
|
||||
|
||||
if d.ReceiverEmails == nil || len(d.ReceiverEmails) == 0 {
|
||||
return nil, errors.New("receiver emails are not set")
|
||||
return nil, fmt.Errorf("receiver emails are not set")
|
||||
}
|
||||
|
||||
n.ConfigByName[d.Name] = d
|
||||
baseLogger.Debug(fmt.Sprintf("Email plugin '%s' use SMTP host '%s:%d'", d.Name, d.SMTPHost, d.SMTPPort))
|
||||
|
||||
return &protobufs.Empty{}, nil
|
||||
}
|
||||
|
||||
|
@ -95,7 +92,6 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi
|
|||
if _, ok := n.ConfigByName[notification.Name]; !ok {
|
||||
return nil, fmt.Errorf("invalid plugin config name %s", notification.Name)
|
||||
}
|
||||
|
||||
cfg := n.ConfigByName[notification.Name]
|
||||
|
||||
logger := baseLogger.Named(cfg.Name)
|
||||
|
@ -121,7 +117,6 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi
|
|||
server.ConnectTimeout, err = time.ParseDuration(cfg.ConnectTimeout)
|
||||
if err != nil {
|
||||
logger.Warn(fmt.Sprintf("invalid connect timeout '%s', using default '10s'", cfg.ConnectTimeout))
|
||||
|
||||
server.ConnectTimeout = 10 * time.Second
|
||||
}
|
||||
}
|
||||
|
@ -130,18 +125,15 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi
|
|||
server.SendTimeout, err = time.ParseDuration(cfg.SendTimeout)
|
||||
if err != nil {
|
||||
logger.Warn(fmt.Sprintf("invalid send timeout '%s', using default '10s'", cfg.SendTimeout))
|
||||
|
||||
server.SendTimeout = 10 * time.Second
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug("making smtp connection")
|
||||
|
||||
smtpClient, err := server.Connect()
|
||||
if err != nil {
|
||||
return &protobufs.Empty{}, err
|
||||
}
|
||||
|
||||
logger.Debug("smtp connection done")
|
||||
|
||||
email := mail.NewMSG()
|
||||
|
@ -154,14 +146,12 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi
|
|||
if err != nil {
|
||||
return &protobufs.Empty{}, err
|
||||
}
|
||||
|
||||
logger.Info(fmt.Sprintf("sent email to %v", cfg.ReceiverEmails))
|
||||
|
||||
return &protobufs.Empty{}, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
handshake := plugin.HandshakeConfig{
|
||||
var handshake = plugin.HandshakeConfig{
|
||||
ProtocolVersion: 1,
|
||||
MagicCookieKey: "CROWDSEC_PLUGIN_KEY",
|
||||
MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"),
|
||||
|
|
|
@ -12,11 +12,10 @@ import (
|
|||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type PluginConfig struct {
|
||||
|
@ -91,23 +90,18 @@ func getTLSClient(c *PluginConfig) error {
|
|||
|
||||
tlsConfig.Certificates = []tls.Certificate{cert}
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
|
||||
if c.UnixSocket != "" {
|
||||
logger.Info(fmt.Sprintf("Using socket '%s'", c.UnixSocket))
|
||||
|
||||
transport.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
|
||||
return net.Dial("unix", strings.TrimSuffix(c.UnixSocket, "/"))
|
||||
}
|
||||
}
|
||||
|
||||
c.Client = &http.Client{
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -115,7 +109,6 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific
|
|||
if _, ok := s.PluginConfigByName[notification.Name]; !ok {
|
||||
return nil, fmt.Errorf("invalid plugin config name %s", notification.Name)
|
||||
}
|
||||
|
||||
cfg := s.PluginConfigByName[notification.Name]
|
||||
|
||||
if cfg.LogLevel != nil && *cfg.LogLevel != "" {
|
||||
|
@ -128,14 +121,11 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for headerName, headerValue := range cfg.Headers {
|
||||
logger.Debug(fmt.Sprintf("adding header %s: %s", headerName, headerValue))
|
||||
request.Header.Add(headerName, headerValue)
|
||||
}
|
||||
|
||||
logger.Debug(fmt.Sprintf("making HTTP %s call to %s with body %s", cfg.Method, cfg.URL, notification.Text))
|
||||
|
||||
resp, err := cfg.Client.Do(request.WithContext(ctx))
|
||||
if err != nil {
|
||||
logger.Error(fmt.Sprintf("Failed to make HTTP request : %s", err))
|
||||
|
@ -145,7 +135,7 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific
|
|||
|
||||
respData, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body got error %w", err)
|
||||
return nil, fmt.Errorf("failed to read response body got error %s", err)
|
||||
}
|
||||
|
||||
logger.Debug(fmt.Sprintf("got response %s", string(respData)))
|
||||
|
@ -153,7 +143,6 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific
|
|||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
logger.Warn(fmt.Sprintf("HTTP server returned non 200 status code: %d", resp.StatusCode))
|
||||
logger.Debug(fmt.Sprintf("HTTP server returned body: %s", string(respData)))
|
||||
|
||||
return &protobufs.Empty{}, nil
|
||||
}
|
||||
|
||||
|
@ -162,25 +151,21 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific
|
|||
|
||||
func (s *HTTPPlugin) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) {
|
||||
d := PluginConfig{}
|
||||
|
||||
err := yaml.Unmarshal(config.Config, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = getTLSClient(&d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.PluginConfigByName[d.Name] = d
|
||||
logger.Debug(fmt.Sprintf("HTTP plugin '%s' use URL '%s'", d.Name, d.URL))
|
||||
|
||||
return &protobufs.Empty{}, err
|
||||
}
|
||||
|
||||
func main() {
|
||||
handshake := plugin.HandshakeConfig{
|
||||
var handshake = plugin.HandshakeConfig{
|
||||
ProtocolVersion: 1,
|
||||
MagicCookieKey: "CROWDSEC_PLUGIN_KEY",
|
||||
MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"),
|
||||
|
|
|
@ -5,12 +5,12 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"github.com/slack-go/slack"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
|
||||
"github.com/slack-go/slack"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type PluginConfig struct {
|
||||
|
@ -33,16 +33,13 @@ func (n *Notify) Notify(ctx context.Context, notification *protobufs.Notificatio
|
|||
if _, ok := n.ConfigByName[notification.Name]; !ok {
|
||||
return nil, fmt.Errorf("invalid plugin config name %s", notification.Name)
|
||||
}
|
||||
|
||||
cfg := n.ConfigByName[notification.Name]
|
||||
|
||||
if cfg.LogLevel != nil && *cfg.LogLevel != "" {
|
||||
logger.SetLevel(hclog.LevelFromString(*cfg.LogLevel))
|
||||
}
|
||||
|
||||
logger.Info(fmt.Sprintf("found notify signal for %s config", notification.Name))
|
||||
logger.Debug(fmt.Sprintf("posting to %s webhook, message %s", cfg.Webhook, notification.Text))
|
||||
|
||||
err := slack.PostWebhookContext(ctx, n.ConfigByName[notification.Name].Webhook, &slack.WebhookMessage{
|
||||
Text: notification.Text,
|
||||
})
|
||||
|
@ -55,19 +52,16 @@ func (n *Notify) Notify(ctx context.Context, notification *protobufs.Notificatio
|
|||
|
||||
func (n *Notify) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) {
|
||||
d := PluginConfig{}
|
||||
|
||||
if err := yaml.Unmarshal(config.Config, &d); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.ConfigByName[d.Name] = d
|
||||
logger.Debug(fmt.Sprintf("Slack plugin '%s' use URL '%s'", d.Name, d.Webhook))
|
||||
|
||||
return &protobufs.Empty{}, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
handshake := plugin.HandshakeConfig{
|
||||
var handshake = plugin.HandshakeConfig{
|
||||
ProtocolVersion: 1,
|
||||
MagicCookieKey: "CROWDSEC_PLUGIN_KEY",
|
||||
MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"),
|
||||
|
|
|
@ -10,11 +10,11 @@ import (
|
|||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var logger hclog.Logger = hclog.New(&hclog.LoggerOptions{
|
||||
|
@ -44,7 +44,6 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio
|
|||
if _, ok := s.PluginConfigByName[notification.Name]; !ok {
|
||||
return &protobufs.Empty{}, fmt.Errorf("splunk invalid config name %s", notification.Name)
|
||||
}
|
||||
|
||||
cfg := s.PluginConfigByName[notification.Name]
|
||||
|
||||
if cfg.LogLevel != nil && *cfg.LogLevel != "" {
|
||||
|
@ -54,7 +53,6 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio
|
|||
logger.Info(fmt.Sprintf("received notify signal for %s config", notification.Name))
|
||||
|
||||
p := Payload{Event: notification.Text}
|
||||
|
||||
data, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
return &protobufs.Empty{}, err
|
||||
|
@ -67,7 +65,6 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio
|
|||
|
||||
req.Header.Add("Authorization", fmt.Sprintf("Splunk %s", cfg.Token))
|
||||
logger.Debug(fmt.Sprintf("posting event %s to %s", string(data), req.URL))
|
||||
|
||||
resp, err := s.Client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return &protobufs.Empty{}, err
|
||||
|
@ -76,19 +73,15 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio
|
|||
if resp.StatusCode != http.StatusOK {
|
||||
content, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return &protobufs.Empty{}, fmt.Errorf("got non 200 response and failed to read error %w", err)
|
||||
return &protobufs.Empty{}, fmt.Errorf("got non 200 response and failed to read error %s", err)
|
||||
}
|
||||
|
||||
return &protobufs.Empty{}, fmt.Errorf("got non 200 response %s", string(content))
|
||||
}
|
||||
|
||||
respData, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return &protobufs.Empty{}, fmt.Errorf("failed to read response body got error %w", err)
|
||||
return &protobufs.Empty{}, fmt.Errorf("failed to read response body got error %s", err)
|
||||
}
|
||||
|
||||
logger.Debug(fmt.Sprintf("got response %s", string(respData)))
|
||||
|
||||
return &protobufs.Empty{}, nil
|
||||
}
|
||||
|
||||
|
@ -97,12 +90,11 @@ func (s *Splunk) Configure(ctx context.Context, config *protobufs.Config) (*prot
|
|||
err := yaml.Unmarshal(config.Config, &d)
|
||||
s.PluginConfigByName[d.Name] = d
|
||||
logger.Debug(fmt.Sprintf("Splunk plugin '%s' use URL '%s'", d.Name, d.URL))
|
||||
|
||||
return &protobufs.Empty{}, err
|
||||
}
|
||||
|
||||
func main() {
|
||||
handshake := plugin.HandshakeConfig{
|
||||
var handshake = plugin.HandshakeConfig{
|
||||
ProtocolVersion: 1,
|
||||
MagicCookieKey: "CROWDSEC_PLUGIN_KEY",
|
||||
MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"),
|
||||
|
|
4
go.mod
4
go.mod
|
@ -1,6 +1,6 @@
|
|||
module github.com/crowdsecurity/crowdsec
|
||||
|
||||
go 1.22
|
||||
go 1.21
|
||||
|
||||
// Don't use the toolchain directive to avoid uncontrolled downloads during
|
||||
// a build, especially in sandboxed environments (freebsd, gentoo...).
|
||||
|
@ -27,7 +27,7 @@ require (
|
|||
github.com/corazawaf/libinjection-go v0.1.2
|
||||
github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607
|
||||
github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26
|
||||
github.com/crowdsecurity/go-cs-lib v0.0.10
|
||||
github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240422215546-8104b9078bfd
|
||||
github.com/crowdsecurity/grokky v0.2.1
|
||||
github.com/crowdsecurity/machineid v1.0.2
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
|
|
4
go.sum
4
go.sum
|
@ -102,8 +102,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h
|
|||
github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA=
|
||||
github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU=
|
||||
github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk=
|
||||
github.com/crowdsecurity/go-cs-lib v0.0.10 h1:Twt/y/rYCUspGY1zxDnGurL2svRSREAz+2+puLepd9c=
|
||||
github.com/crowdsecurity/go-cs-lib v0.0.10/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k=
|
||||
github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240422215546-8104b9078bfd h1:/fZYw5NQWI7OjRgoLdCMksnz7GbqHB07ykG8+9RtJNE=
|
||||
github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240422215546-8104b9078bfd/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k=
|
||||
github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4=
|
||||
github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM=
|
||||
github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc=
|
||||
|
|
|
@ -76,24 +76,26 @@ func (c *Config) LoadDBConfig(inCli bool) error {
|
|||
if c.DbConfig.UseWal == nil {
|
||||
dbDir := filepath.Dir(c.DbConfig.DbPath)
|
||||
isNetwork, fsType, err := types.IsNetworkFS(dbDir)
|
||||
switch {
|
||||
case err != nil:
|
||||
if err != nil {
|
||||
log.Warnf("unable to determine if database is on network filesystem: %s", err)
|
||||
log.Warning("You are using sqlite without WAL, this can have a performance impact. If you do not store the database in a network share, set db_config.use_wal to true. Set explicitly to false to disable this warning.")
|
||||
case isNetwork:
|
||||
return nil
|
||||
}
|
||||
if isNetwork {
|
||||
log.Debugf("database is on network filesystem (%s), setting useWal to false", fsType)
|
||||
c.DbConfig.UseWal = ptr.Of(false)
|
||||
default:
|
||||
} else {
|
||||
log.Debugf("database is on local filesystem (%s), setting useWal to true", fsType)
|
||||
c.DbConfig.UseWal = ptr.Of(true)
|
||||
}
|
||||
} else if *c.DbConfig.UseWal {
|
||||
dbDir := filepath.Dir(c.DbConfig.DbPath)
|
||||
isNetwork, fsType, err := types.IsNetworkFS(dbDir)
|
||||
switch {
|
||||
case err != nil:
|
||||
if err != nil {
|
||||
log.Warnf("unable to determine if database is on network filesystem: %s", err)
|
||||
case isNetwork:
|
||||
return nil
|
||||
}
|
||||
if isNetwork {
|
||||
log.Warnf("database seems to be stored on a network share (%s), but useWal is set to true. Proceed at your own risk.", fsType)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ func (t *hubTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
|||
|
||||
// hubClient is the HTTP client used to communicate with the CrowdSec Hub.
|
||||
var hubClient = &http.Client{
|
||||
Timeout: 120 * time.Second,
|
||||
Timeout: 120 * time.Second,
|
||||
Transport: &hubTransport{http.DefaultTransport},
|
||||
}
|
||||
|
||||
|
|
|
@ -1,19 +1,17 @@
|
|||
package cwhub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/crowdsecurity/go-cs-lib/downloader"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
)
|
||||
|
||||
|
@ -22,128 +20,6 @@ type DataSet struct {
|
|||
Data []types.DataSource `yaml:"data,omitempty"`
|
||||
}
|
||||
|
||||
// downloadFile downloads a file and writes it to disk, with no hash verification.
|
||||
func downloadFile(url string, destPath string) error {
|
||||
resp, err := hubClient.Get(url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while downloading %s: %w", url, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("bad http code %d for %s", resp.StatusCode, url)
|
||||
}
|
||||
|
||||
// Download to a temporary location to avoid corrupting files
|
||||
// that are currently in use or memory mapped.
|
||||
|
||||
tmpFile, err := os.CreateTemp(filepath.Dir(destPath), filepath.Base(destPath)+".*.tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmpFileName := tmpFile.Name()
|
||||
defer func() {
|
||||
tmpFile.Close()
|
||||
os.Remove(tmpFileName)
|
||||
}()
|
||||
|
||||
// avoid reading the whole file in memory
|
||||
_, err = io.Copy(tmpFile, resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tmpFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tmpFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// a check on stdout is used while scripting to know if the hub has been upgraded
|
||||
// and a configuration reload is required
|
||||
// TODO: use a better way to communicate this
|
||||
fmt.Printf("updated %s\n", filepath.Base(destPath))
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// On Windows, rename will fail if the destination file already exists
|
||||
// so we remove it first.
|
||||
err = os.Remove(destPath)
|
||||
switch {
|
||||
case errors.Is(err, fs.ErrNotExist):
|
||||
break
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = os.Rename(tmpFileName, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// needsUpdate checks if a data file has to be downloaded (or updated).
|
||||
// if the local file doesn't exist, update.
|
||||
// if the remote is newer than the local file, update.
|
||||
// if the remote has no modification date, but local file has been modified > a week ago, update.
|
||||
func needsUpdate(destPath string, url string, logger *logrus.Logger) bool {
|
||||
fileInfo, err := os.Stat(destPath)
|
||||
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
return true
|
||||
case err != nil:
|
||||
logger.Errorf("while getting %s: %s", destPath, err)
|
||||
return true
|
||||
}
|
||||
|
||||
resp, err := hubClient.Head(url)
|
||||
if err != nil {
|
||||
logger.Errorf("while getting %s: %s", url, err)
|
||||
// Head failed, Get would likely fail too -> no update
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logger.Errorf("bad http code %d for %s", resp.StatusCode, url)
|
||||
return false
|
||||
}
|
||||
|
||||
// update if local file is older than this
|
||||
shelfLife := 7 * 24 * time.Hour
|
||||
|
||||
lastModify := fileInfo.ModTime()
|
||||
|
||||
localIsOld := lastModify.Add(shelfLife).Before(time.Now())
|
||||
|
||||
remoteLastModified := resp.Header.Get("Last-Modified")
|
||||
if remoteLastModified == "" {
|
||||
if localIsOld {
|
||||
logger.Infof("no last modified date for %s, but local file is older than %s", url, shelfLife)
|
||||
}
|
||||
|
||||
return localIsOld
|
||||
}
|
||||
|
||||
lastAvailable, err := time.Parse(time.RFC1123, remoteLastModified)
|
||||
if err != nil {
|
||||
logger.Warningf("while parsing last modified date for %s: %s", url, err)
|
||||
return localIsOld
|
||||
}
|
||||
|
||||
if lastModify.Before(lastAvailable) {
|
||||
logger.Infof("new version available, updating %s", destPath)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// downloadDataSet downloads all the data files for an item.
|
||||
func downloadDataSet(dataFolder string, force bool, reader io.Reader, logger *logrus.Logger) error {
|
||||
dec := yaml.NewDecoder(reader)
|
||||
|
@ -165,12 +41,31 @@ func downloadDataSet(dataFolder string, force bool, reader io.Reader, logger *lo
|
|||
return err
|
||||
}
|
||||
|
||||
if force || needsUpdate(destPath, dataS.SourceURL, logger) {
|
||||
logger.Debugf("downloading %s in %s", dataS.SourceURL, destPath)
|
||||
d := downloader.
|
||||
New().
|
||||
WithHTTPClient(hubClient).
|
||||
ToFile(destPath).
|
||||
CompareContent().
|
||||
WithLogger(logrus.WithFields(logrus.Fields{"url": dataS.SourceURL}))
|
||||
|
||||
if err := downloadFile(dataS.SourceURL, destPath); err != nil {
|
||||
return fmt.Errorf("while getting data: %w", err)
|
||||
}
|
||||
if !force {
|
||||
d = d.WithLastModified().
|
||||
WithShelfLife(7 * 24 * time.Hour)
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
downloaded, err := d.Download(ctx, dataS.SourceURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while getting data: %w", err)
|
||||
}
|
||||
|
||||
if downloaded {
|
||||
logger.Infof("Downloaded %s", destPath)
|
||||
// a check on stdout is used while scripting to know if the hub has been upgraded
|
||||
// and a configuration reload is required
|
||||
// TODO: use a better way to communicate this
|
||||
fmt.Printf("updated %s\n", destPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
package cwhub
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/crowdsecurity/go-cs-lib/cstest"
|
||||
)
|
||||
|
||||
func TestDownloadFile(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/xx":
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = io.WriteString(w, "example content oneoneone")
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, _ = io.WriteString(w, "not found")
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
dest := filepath.Join(t.TempDir(), "example.txt")
|
||||
defer os.Remove(dest)
|
||||
|
||||
err := downloadFile(ts.URL+"/xx", dest)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := os.ReadFile(dest)
|
||||
assert.Equal(t, "example content oneoneone", string(content))
|
||||
require.NoError(t, err)
|
||||
|
||||
// bad uri
|
||||
err = downloadFile("https://zz.com", dest)
|
||||
cstest.RequireErrorContains(t, err, "lookup zz.com")
|
||||
cstest.RequireErrorContains(t, err, "no such host")
|
||||
|
||||
// 404
|
||||
err = downloadFile(ts.URL+"/x", dest)
|
||||
cstest.RequireErrorContains(t, err, "bad http code 404")
|
||||
|
||||
// bad target
|
||||
err = downloadFile(ts.URL+"/xx", "")
|
||||
cstest.RequireErrorContains(t, err, cstest.PathNotFoundMessage)
|
||||
|
||||
// destination directory does not exist
|
||||
err = downloadFile(ts.URL+"/xx", filepath.Join(t.TempDir(), "missing/example.txt"))
|
||||
cstest.RequireErrorContains(t, err, cstest.PathNotFoundMessage)
|
||||
}
|
|
@ -1,7 +1,6 @@
|
|||
package cwhub
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -21,8 +20,8 @@ type Hub struct {
|
|||
items HubItems // Items read from HubDir and InstallDir
|
||||
local *csconfig.LocalHubCfg
|
||||
remote *RemoteHubCfg
|
||||
Warnings []string // Warnings encountered during sync
|
||||
logger *logrus.Logger
|
||||
Warnings []string // Warnings encountered during sync
|
||||
}
|
||||
|
||||
// GetDataDir returns the data directory, where data sets are installed.
|
||||
|
@ -150,27 +149,17 @@ func (h *Hub) ItemStats() []string {
|
|||
|
||||
// updateIndex downloads the latest version of the index and writes it to disk if it changed.
|
||||
func (h *Hub) updateIndex() error {
|
||||
body, err := h.remote.fetchIndex()
|
||||
downloaded, err := h.remote.fetchIndex(h.local.HubIndexFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldContent, err := os.ReadFile(h.local.HubIndexFile)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
h.logger.Warningf("failed to read hub index: %s", err)
|
||||
}
|
||||
} else if bytes.Equal(body, oldContent) {
|
||||
if downloaded {
|
||||
h.logger.Infof("Wrote index to %s", h.local.HubIndexFile)
|
||||
} else {
|
||||
h.logger.Info("hub index is up to date")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = os.WriteFile(h.local.HubIndexFile, body, 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write hub index: %w", err)
|
||||
}
|
||||
|
||||
h.logger.Infof("Wrote index to %s, %d bytes", h.local.HubIndexFile, len(body))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,10 @@ func TestUpdateIndex(t *testing.T) {
|
|||
tmpIndex, err := os.CreateTemp("", "index.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
// close the file to avoid preventing the rename on windows
|
||||
err = tmpIndex.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
os.Remove(tmpIndex.Name())
|
||||
})
|
||||
|
@ -72,5 +76,5 @@ func TestUpdateIndex(t *testing.T) {
|
|||
hub.local.HubIndexFile = "/does/not/exist/index.json"
|
||||
|
||||
err = hub.updateIndex()
|
||||
cstest.RequireErrorContains(t, err, "failed to write hub index: open /does/not/exist/index.json:")
|
||||
cstest.RequireErrorContains(t, err, "failed to create temporary download file for /does/not/exist/index.json:")
|
||||
}
|
||||
|
|
|
@ -29,10 +29,8 @@ const (
|
|||
versionFuture // local version is higher latest, but is included in the index: should not happen
|
||||
)
|
||||
|
||||
var (
|
||||
// The order is important, as it is used to range over sub-items in collections.
|
||||
ItemTypes = []string{PARSERS, POSTOVERFLOWS, SCENARIOS, CONTEXTS, APPSEC_CONFIGS, APPSEC_RULES, COLLECTIONS}
|
||||
)
|
||||
// The order is important, as it is used to range over sub-items in collections.
|
||||
var ItemTypes = []string{PARSERS, POSTOVERFLOWS, SCENARIOS, CONTEXTS, APPSEC_CONFIGS, APPSEC_RULES, COLLECTIONS}
|
||||
|
||||
type HubItems map[string]map[string]*Item
|
||||
|
||||
|
|
|
@ -48,13 +48,13 @@ func (i *Item) Install(force bool, downloadOnly bool) error {
|
|||
}
|
||||
}
|
||||
|
||||
filePath, err := i.downloadLatest(force, true)
|
||||
downloaded, err := i.downloadLatest(force, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if downloadOnly {
|
||||
i.hub.logger.Infof("Downloaded %s to %s", i.Name, filePath)
|
||||
if downloadOnly && downloaded {
|
||||
i.hub.logger.Infof("Downloaded %s", i.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -62,6 +62,11 @@ func (i *Item) Install(force bool, downloadOnly bool) error {
|
|||
return fmt.Errorf("while enabling %s: %w", i.Name, err)
|
||||
}
|
||||
|
||||
// a check on stdout is used while scripting to know if the hub has been upgraded
|
||||
// and a configuration reload is required
|
||||
// TODO: use a better way to communicate this
|
||||
fmt.Printf("installed %s\n", i.Name)
|
||||
|
||||
i.hub.logger.Infof("Enabled %s", i.Name)
|
||||
|
||||
return nil
|
||||
|
|
|
@ -35,7 +35,8 @@ func testTaint(hub *Hub, t *testing.T, item *Item) {
|
|||
// truncate the file
|
||||
f, err := os.Create(item.State.LocalPath)
|
||||
require.NoError(t, err)
|
||||
f.Close()
|
||||
err = f.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Local sync and check status
|
||||
err = hub.localSync()
|
||||
|
|
|
@ -3,23 +3,20 @@ package cwhub
|
|||
// Install, upgrade and remove items from the hub to the local configuration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/crowdsecurity/go-cs-lib/downloader"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/emoji"
|
||||
)
|
||||
|
||||
// Upgrade downloads and applies the last version of the item from the hub.
|
||||
func (i *Item) Upgrade(force bool) (bool, error) {
|
||||
updated := false
|
||||
|
||||
if i.State.IsLocal() {
|
||||
i.hub.logger.Infof("not upgrading %s: local item", i.Name)
|
||||
return false, nil
|
||||
|
@ -54,21 +51,21 @@ func (i *Item) Upgrade(force bool) (bool, error) {
|
|||
if i.State.Tainted {
|
||||
i.hub.logger.Warningf("%v %s is tainted, --force to overwrite", emoji.Warning, i.Name)
|
||||
}
|
||||
} else {
|
||||
// a check on stdout is used while scripting to know if the hub has been upgraded
|
||||
// and a configuration reload is required
|
||||
// TODO: use a better way to communicate this
|
||||
fmt.Printf("updated %s\n", i.Name)
|
||||
i.hub.logger.Infof("%v %s: updated", emoji.Package, i.Name)
|
||||
|
||||
updated = true
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return updated, nil
|
||||
// a check on stdout is used while scripting to know if the hub has been upgraded
|
||||
// and a configuration reload is required
|
||||
// TODO: use a better way to communicate this
|
||||
fmt.Printf("updated %s\n", i.Name)
|
||||
i.hub.logger.Infof("%v %s: updated", emoji.Package, i.Name)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// downloadLatest downloads the latest version of the item to the hub directory.
|
||||
func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (string, error) {
|
||||
func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (bool, error) {
|
||||
i.hub.logger.Debugf("Downloading %s %s", i.Type, i.Name)
|
||||
|
||||
for _, sub := range i.SubItems() {
|
||||
|
@ -84,98 +81,84 @@ func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (string, error) {
|
|||
i.hub.logger.Tracef("collection, recurse")
|
||||
|
||||
if _, err := sub.downloadLatest(overwrite, updateOnly); err != nil {
|
||||
return "", err
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
downloaded := sub.State.Downloaded
|
||||
|
||||
if _, err := sub.download(overwrite); err != nil {
|
||||
return "", err
|
||||
return false, err
|
||||
}
|
||||
|
||||
// We need to enable an item when it has been added to a collection since latest release of the collection.
|
||||
// We check if sub.Downloaded is false because maybe the item has been disabled by the user.
|
||||
if !sub.State.Installed && !downloaded {
|
||||
if err := sub.enable(); err != nil {
|
||||
return "", fmt.Errorf("enabling '%s': %w", sub.Name, err)
|
||||
return false, fmt.Errorf("enabling '%s': %w", sub.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !i.State.Installed && updateOnly && i.State.Downloaded && !overwrite {
|
||||
i.hub.logger.Debugf("skipping upgrade of %s: not installed", i.Name)
|
||||
return "", nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
ret, err := i.download(overwrite)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
return i.download(overwrite)
|
||||
}
|
||||
|
||||
// FetchLatest downloads the latest item from the hub, verifies the hash and returns the content and the used url.
|
||||
func (i *Item) FetchLatest() ([]byte, string, error) {
|
||||
if i.latestHash() == "" {
|
||||
return nil, "", errors.New("latest hash missing from index")
|
||||
}
|
||||
|
||||
// FetchContentTo downloads the last version of the item's YAML file to the specified path.
|
||||
func (i *Item) FetchContentTo(destPath string) (bool, string, error) {
|
||||
url, err := i.hub.remote.urlTo(i.RemotePath)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("failed to build request: %w", err)
|
||||
return false, "", fmt.Errorf("failed to build request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := hubClient.Get(url)
|
||||
wantHash := i.latestHash()
|
||||
if wantHash == "" {
|
||||
return false, "", errors.New("latest hash missing from index")
|
||||
}
|
||||
|
||||
d := downloader.
|
||||
New().
|
||||
WithHTTPClient(hubClient).
|
||||
ToFile(destPath).
|
||||
WithMakeDirs(true).
|
||||
WithLogger(logrus.WithFields(logrus.Fields{"url": url})).
|
||||
CompareContent().
|
||||
VerifyHash("sha256", wantHash)
|
||||
|
||||
// TODO: recommend hub update if hash does not match
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
downloaded, err := d.Download(ctx, url)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, "", fmt.Errorf("bad http code %d", resp.StatusCode)
|
||||
return false, "", fmt.Errorf("while downloading %s to %s: %w", i.Name, url, err)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
hash := sha256.New()
|
||||
if _, err = hash.Write(body); err != nil {
|
||||
return nil, "", fmt.Errorf("while hashing %s: %w", i.Name, err)
|
||||
}
|
||||
|
||||
meow := hex.EncodeToString(hash.Sum(nil))
|
||||
if meow != i.Versions[i.Version].Digest {
|
||||
i.hub.logger.Errorf("Downloaded version doesn't match index, please 'hub update'")
|
||||
i.hub.logger.Debugf("got %s, expected %s", meow, i.Versions[i.Version].Digest)
|
||||
|
||||
return nil, "", errors.New("invalid download hash")
|
||||
}
|
||||
|
||||
return body, url, nil
|
||||
return downloaded, url, nil
|
||||
}
|
||||
|
||||
// download downloads the item from the hub and writes it to the hub directory.
|
||||
func (i *Item) download(overwrite bool) (string, error) {
|
||||
func (i *Item) download(overwrite bool) (bool, error) {
|
||||
// ensure that target file is within target dir
|
||||
finalPath, err := i.downloadPath()
|
||||
if err != nil {
|
||||
return "", err
|
||||
return false, err
|
||||
}
|
||||
|
||||
if i.State.IsLocal() {
|
||||
i.hub.logger.Warningf("%s is local, can't download", i.Name)
|
||||
return finalPath, nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// if user didn't --force, don't overwrite local, tainted, up-to-date files
|
||||
if !overwrite {
|
||||
if i.State.Tainted {
|
||||
i.hub.logger.Debugf("%s: tainted, not updated", i.Name)
|
||||
return "", nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if i.State.UpToDate {
|
||||
|
@ -184,45 +167,32 @@ func (i *Item) download(overwrite bool) (string, error) {
|
|||
}
|
||||
}
|
||||
|
||||
body, url, err := i.FetchLatest()
|
||||
downloaded, _, err := i.FetchContentTo(finalPath)
|
||||
if err != nil {
|
||||
what := i.Name
|
||||
if url != "" {
|
||||
what += " from " + url
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("while downloading %s: %w", what, err)
|
||||
return false, fmt.Errorf("while downloading %s: %w", i.Name, err)
|
||||
}
|
||||
|
||||
// all good, install
|
||||
|
||||
parentDir := filepath.Dir(finalPath)
|
||||
|
||||
if err = os.MkdirAll(parentDir, os.ModePerm); err != nil {
|
||||
return "", fmt.Errorf("while creating %s: %w", parentDir, err)
|
||||
}
|
||||
|
||||
// check actual file
|
||||
if _, err = os.Stat(finalPath); !os.IsNotExist(err) {
|
||||
i.hub.logger.Warningf("%s: overwrite", i.Name)
|
||||
i.hub.logger.Debugf("target: %s", finalPath)
|
||||
} else {
|
||||
i.hub.logger.Infof("%s: OK", i.Name)
|
||||
}
|
||||
|
||||
if err = os.WriteFile(finalPath, body, 0o644); err != nil {
|
||||
return "", fmt.Errorf("while writing %s: %w", finalPath, err)
|
||||
if downloaded {
|
||||
i.hub.logger.Infof("Downloaded %s", i.Name)
|
||||
}
|
||||
|
||||
i.State.Downloaded = true
|
||||
i.State.Tainted = false
|
||||
i.State.UpToDate = true
|
||||
|
||||
if err = downloadDataSet(i.hub.local.InstallDataDir, overwrite, bytes.NewReader(body), i.hub.logger); err != nil {
|
||||
return "", fmt.Errorf("while downloading data for %s: %w", i.FileName, err)
|
||||
// read content to get the list of data files
|
||||
reader, err := os.Open(finalPath)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("while opening %s: %w", finalPath, err)
|
||||
}
|
||||
|
||||
return finalPath, nil
|
||||
defer reader.Close()
|
||||
|
||||
if err = downloadDataSet(i.hub.local.InstallDataDir, overwrite, reader, i.hub.logger); err != nil {
|
||||
return false, fmt.Errorf("while downloading data for %s: %w", i.FileName, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// DownloadDataIfNeeded downloads the data set for the item.
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
package cwhub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/crowdsecurity/go-cs-lib/downloader"
|
||||
)
|
||||
|
||||
// RemoteHubCfg is used to retrieve index and items from the remote hub.
|
||||
|
@ -28,34 +31,28 @@ func (r *RemoteHubCfg) urlTo(remotePath string) (string, error) {
|
|||
}
|
||||
|
||||
// fetchIndex downloads the index from the hub and returns the content.
|
||||
func (r *RemoteHubCfg) fetchIndex() ([]byte, error) {
|
||||
func (r *RemoteHubCfg) fetchIndex(destPath string) (bool, error) {
|
||||
if r == nil {
|
||||
return nil, ErrNilRemoteHub
|
||||
return false, ErrNilRemoteHub
|
||||
}
|
||||
|
||||
url, err := r.urlTo(r.IndexPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build hub index request: %w", err)
|
||||
return false, fmt.Errorf("failed to build hub index request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := hubClient.Get(url)
|
||||
ctx := context.TODO()
|
||||
|
||||
downloaded, err := downloader.
|
||||
New().
|
||||
WithHTTPClient(hubClient).
|
||||
ToFile(destPath).
|
||||
CompareContent().
|
||||
WithLogger(logrus.WithFields(logrus.Fields{"url": url})).
|
||||
Download(ctx, url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed http request for hub index: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, IndexNotFoundError{url, r.Branch}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("bad http code %d for %s", resp.StatusCode, url)
|
||||
return false, err
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read request answer for hub index: %w", err)
|
||||
}
|
||||
|
||||
return body, nil
|
||||
return downloaded, nil
|
||||
}
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
package cwhub
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
@ -12,6 +9,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/crowdsecurity/go-cs-lib/downloader"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
@ -38,29 +36,13 @@ func linkTarget(path string, logger *logrus.Logger) (string, error) {
|
|||
return hubpath, nil
|
||||
}
|
||||
|
||||
func getSHA256(filepath string) (string, error) {
|
||||
f, err := os.Open(filepath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to open '%s': %w", filepath, err)
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, f); err != nil {
|
||||
return "", fmt.Errorf("unable to calculate sha256 of '%s': %w", filepath, err)
|
||||
}
|
||||
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// information used to create a new Item, from a file path.
|
||||
type itemFileInfo struct {
|
||||
inhub bool
|
||||
fname string
|
||||
stage string
|
||||
ftype string
|
||||
fauthor string
|
||||
inhub bool
|
||||
}
|
||||
|
||||
func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo, error) {
|
||||
|
@ -466,7 +448,7 @@ func (h *Hub) localSync() error {
|
|||
func (i *Item) setVersionState(path string, inhub bool) error {
|
||||
var err error
|
||||
|
||||
i.State.LocalHash, err = getSHA256(path)
|
||||
i.State.LocalHash, err = downloader.SHA256(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get sha256 of %s: %w", path, err)
|
||||
}
|
||||
|
|
|
@ -636,24 +636,14 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [
|
|||
if len(alertItem.Meta) > 0 {
|
||||
metaBulk := make([]*ent.MetaCreate, len(alertItem.Meta))
|
||||
for i, metaItem := range alertItem.Meta {
|
||||
key := metaItem.Key
|
||||
value := metaItem.Value
|
||||
if len(metaItem.Value) > 4095 {
|
||||
c.Log.Warningf("truncated meta %s : value too long", metaItem.Key)
|
||||
value = value[:4095]
|
||||
}
|
||||
if len(metaItem.Key) > 255 {
|
||||
c.Log.Warningf("truncated meta %s : key too long", metaItem.Key)
|
||||
key = key[:255]
|
||||
}
|
||||
metaBulk[i] = c.Ent.Meta.Create().
|
||||
SetKey(key).
|
||||
SetValue(value)
|
||||
SetKey(metaItem.Key).
|
||||
SetValue(metaItem.Value)
|
||||
}
|
||||
|
||||
metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(c.CTX)
|
||||
if err != nil {
|
||||
c.Log.Warningf("error creating alert meta: %s", err)
|
||||
return nil, errors.Wrapf(BulkError, "creating alert meta: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,70 +22,69 @@ import (
|
|||
|
||||
type Node struct {
|
||||
FormatVersion string `yaml:"format"`
|
||||
// Enable config + runtime debug of node via config o/
|
||||
//Enable config + runtime debug of node via config o/
|
||||
Debug bool `yaml:"debug,omitempty"`
|
||||
// If enabled, the node (and its child) will report their own statistics
|
||||
//If enabled, the node (and its child) will report their own statistics
|
||||
Profiling bool `yaml:"profiling,omitempty"`
|
||||
// Name, author, description and reference(s) for parser pattern
|
||||
//Name, author, description and reference(s) for parser pattern
|
||||
Name string `yaml:"name,omitempty"`
|
||||
Author string `yaml:"author,omitempty"`
|
||||
Description string `yaml:"description,omitempty"`
|
||||
References []string `yaml:"references,omitempty"`
|
||||
// if debug is present in the node, keep its specific Logger in runtime structure
|
||||
//if debug is present in the node, keep its specific Logger in runtime structure
|
||||
Logger *log.Entry `yaml:"-"`
|
||||
// This is mostly a hack to make writing less repetitive.
|
||||
// relying on stage, we know which field to parse, and we
|
||||
// can also promote log to next stage on success
|
||||
//This is mostly a hack to make writing less repetitive.
|
||||
//relying on stage, we know which field to parse, and we
|
||||
//can also promote log to next stage on success
|
||||
Stage string `yaml:"stage,omitempty"`
|
||||
// OnSuccess allows to tag a node to be able to move log to next stage on success
|
||||
//OnSuccess allows to tag a node to be able to move log to next stage on success
|
||||
OnSuccess string `yaml:"onsuccess,omitempty"`
|
||||
rn string // this is only for us in debug, a random generated name for each node
|
||||
// Filter is executed at runtime (with current log line as context)
|
||||
// and must succeed or node is exited
|
||||
rn string //this is only for us in debug, a random generated name for each node
|
||||
//Filter is executed at runtime (with current log line as context)
|
||||
//and must succeed or node is exited
|
||||
Filter string `yaml:"filter,omitempty"`
|
||||
RunTimeFilter *vm.Program `yaml:"-" json:"-"` // the actual compiled filter
|
||||
// If node has leafs, execute all of them until one asks for a 'break'
|
||||
RunTimeFilter *vm.Program `yaml:"-" json:"-"` //the actual compiled filter
|
||||
//If node has leafs, execute all of them until one asks for a 'break'
|
||||
LeavesNodes []Node `yaml:"nodes,omitempty"`
|
||||
// Flag used to describe when to 'break' or return an 'error'
|
||||
//Flag used to describe when to 'break' or return an 'error'
|
||||
EnrichFunctions EnricherCtx
|
||||
|
||||
/* If the node is actually a leaf, it can have : grok, enrich, statics */
|
||||
// pattern_syntax are named grok patterns that are re-utilized over several grok patterns
|
||||
//pattern_syntax are named grok patterns that are re-utilized over several grok patterns
|
||||
SubGroks yaml.MapSlice `yaml:"pattern_syntax,omitempty"`
|
||||
|
||||
// Holds a grok pattern
|
||||
//Holds a grok pattern
|
||||
Grok GrokPattern `yaml:"grok,omitempty"`
|
||||
// Statics can be present in any type of node and is executed last
|
||||
//Statics can be present in any type of node and is executed last
|
||||
Statics []ExtraField `yaml:"statics,omitempty"`
|
||||
// Stash allows to capture data from the log line and store it in an accessible cache
|
||||
//Stash allows to capture data from the log line and store it in an accessible cache
|
||||
Stash []DataCapture `yaml:"stash,omitempty"`
|
||||
// Whitelists
|
||||
//Whitelists
|
||||
Whitelist Whitelist `yaml:"whitelist,omitempty"`
|
||||
Data []*types.DataSource `yaml:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
||||
// stage is being set automagically
|
||||
|
||||
//stage is being set automagically
|
||||
if n.Stage == "" {
|
||||
return errors.New("stage needs to be an existing stage")
|
||||
return fmt.Errorf("stage needs to be an existing stage")
|
||||
}
|
||||
|
||||
/* "" behaves like continue */
|
||||
if n.OnSuccess != "continue" && n.OnSuccess != "next_stage" && n.OnSuccess != "" {
|
||||
return fmt.Errorf("onsuccess '%s' not continue,next_stage", n.OnSuccess)
|
||||
}
|
||||
|
||||
if n.Filter != "" && n.RunTimeFilter == nil {
|
||||
return fmt.Errorf("non-empty filter '%s' was not compiled", n.Filter)
|
||||
}
|
||||
|
||||
if n.Grok.RunTimeRegexp != nil || n.Grok.TargetField != "" {
|
||||
if n.Grok.TargetField == "" && n.Grok.ExpValue == "" {
|
||||
return errors.New("grok requires 'expression' or 'apply_on'")
|
||||
return fmt.Errorf("grok requires 'expression' or 'apply_on'")
|
||||
}
|
||||
|
||||
if n.Grok.RegexpName == "" && n.Grok.RegexpValue == "" {
|
||||
return errors.New("grok needs 'pattern' or 'name'")
|
||||
return fmt.Errorf("grok needs 'pattern' or 'name'")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -94,7 +93,6 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
|||
if static.ExpValue == "" {
|
||||
return fmt.Errorf("static %d : when method is set, expression must be present", idx)
|
||||
}
|
||||
|
||||
if _, ok := ectx.Registered[static.Method]; !ok {
|
||||
log.Warningf("the method '%s' doesn't exist or the plugin has not been initialized", static.Method)
|
||||
}
|
||||
|
@ -102,7 +100,6 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
|||
if static.Meta == "" && static.Parsed == "" && static.TargetByName == "" {
|
||||
return fmt.Errorf("static %d : at least one of meta/event/target must be set", idx)
|
||||
}
|
||||
|
||||
if static.Value == "" && static.RunTimeValue == nil {
|
||||
return fmt.Errorf("static %d value or expression must be set", idx)
|
||||
}
|
||||
|
@ -113,76 +110,72 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
|||
if stash.Name == "" {
|
||||
return fmt.Errorf("stash %d : name must be set", idx)
|
||||
}
|
||||
|
||||
if stash.Value == "" {
|
||||
return fmt.Errorf("stash %s : value expression must be set", stash.Name)
|
||||
}
|
||||
|
||||
if stash.Key == "" {
|
||||
return fmt.Errorf("stash %s : key expression must be set", stash.Name)
|
||||
}
|
||||
|
||||
if stash.TTL == "" {
|
||||
return fmt.Errorf("stash %s : ttl must be set", stash.Name)
|
||||
}
|
||||
|
||||
if stash.Strategy == "" {
|
||||
stash.Strategy = "LRU"
|
||||
}
|
||||
// should be configurable
|
||||
//should be configurable
|
||||
if stash.MaxMapSize == 0 {
|
||||
stash.MaxMapSize = 100
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) processFilter(cachedExprEnv map[string]interface{}) (bool, error) {
|
||||
func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) {
|
||||
var NodeState bool
|
||||
var NodeHasOKGrok bool
|
||||
clog := n.Logger
|
||||
if n.RunTimeFilter == nil {
|
||||
clog.Tracef("Node has not filter, enter")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Evaluate node's filter
|
||||
output, err := exprhelpers.Run(n.RunTimeFilter, cachedExprEnv, clog, n.Debug)
|
||||
if err != nil {
|
||||
clog.Warningf("failed to run filter : %v", err)
|
||||
clog.Debugf("Event leaving node : ko")
|
||||
cachedExprEnv := expressionEnv
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
switch out := output.(type) {
|
||||
case bool:
|
||||
if !out {
|
||||
clog.Debugf("Event leaving node : ko (failed filter)")
|
||||
clog.Tracef("Event entering node")
|
||||
if n.RunTimeFilter != nil {
|
||||
//Evaluate node's filter
|
||||
output, err := exprhelpers.Run(n.RunTimeFilter, cachedExprEnv, clog, n.Debug)
|
||||
if err != nil {
|
||||
clog.Warningf("failed to run filter : %v", err)
|
||||
clog.Debugf("Event leaving node : ko")
|
||||
return false, nil
|
||||
}
|
||||
default:
|
||||
clog.Warningf("Expr '%s' returned non-bool, abort : %T", n.Filter, output)
|
||||
clog.Debugf("Event leaving node : ko")
|
||||
|
||||
return false, nil
|
||||
switch out := output.(type) {
|
||||
case bool:
|
||||
if !out {
|
||||
clog.Debugf("Event leaving node : ko (failed filter)")
|
||||
return false, nil
|
||||
}
|
||||
default:
|
||||
clog.Warningf("Expr '%s' returned non-bool, abort : %T", n.Filter, output)
|
||||
clog.Debugf("Event leaving node : ko")
|
||||
return false, nil
|
||||
}
|
||||
NodeState = true
|
||||
} else {
|
||||
clog.Tracef("Node has not filter, enter")
|
||||
NodeState = true
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (n *Node) processWhitelist(cachedExprEnv map[string]interface{}, p *types.Event) (bool, error) {
|
||||
var exprErr error
|
||||
|
||||
if n.Name != "" {
|
||||
NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
|
||||
}
|
||||
exprErr := error(nil)
|
||||
isWhitelisted := n.CheckIPsWL(p)
|
||||
if !isWhitelisted {
|
||||
isWhitelisted, exprErr = n.CheckExprWL(cachedExprEnv, p)
|
||||
}
|
||||
|
||||
if exprErr != nil {
|
||||
// Previous code returned nil if there was an error, so we keep this behavior
|
||||
return false, nil //nolint:nilerr
|
||||
}
|
||||
|
||||
if isWhitelisted && !p.Whitelisted {
|
||||
p.Whitelisted = true
|
||||
p.WhitelistReason = n.Whitelist.Reason
|
||||
|
@ -192,51 +185,18 @@ func (n *Node) processWhitelist(cachedExprEnv map[string]interface{}, p *types.E
|
|||
for k := range p.Overflow.Sources {
|
||||
ips = append(ips, k)
|
||||
}
|
||||
|
||||
n.Logger.Infof("Ban for %s whitelisted, reason [%s]", strings.Join(ips, ","), n.Whitelist.Reason)
|
||||
|
||||
clog.Infof("Ban for %s whitelisted, reason [%s]", strings.Join(ips, ","), n.Whitelist.Reason)
|
||||
p.Overflow.Whitelisted = true
|
||||
}
|
||||
}
|
||||
|
||||
return isWhitelisted, nil
|
||||
}
|
||||
|
||||
func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) {
|
||||
var NodeHasOKGrok bool
|
||||
|
||||
clog := n.Logger
|
||||
|
||||
cachedExprEnv := expressionEnv
|
||||
|
||||
clog.Tracef("Event entering node")
|
||||
|
||||
NodeState, err := n.processFilter(cachedExprEnv)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !NodeState {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if n.Name != "" {
|
||||
NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
|
||||
}
|
||||
|
||||
isWhitelisted, err := n.processWhitelist(cachedExprEnv, p)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Process grok if present, should be exclusive with nodes :)
|
||||
//Process grok if present, should be exclusive with nodes :)
|
||||
gstr := ""
|
||||
|
||||
if n.Grok.RunTimeRegexp != nil {
|
||||
clog.Tracef("Processing grok pattern : %s : %p", n.Grok.RegexpName, n.Grok.RunTimeRegexp)
|
||||
// for unparsed, parsed etc. set sensible defaults to reduce user hassle
|
||||
//for unparsed, parsed etc. set sensible defaults to reduce user hassle
|
||||
if n.Grok.TargetField != "" {
|
||||
// it's a hack to avoid using real reflect
|
||||
//it's a hack to avoid using real reflect
|
||||
if n.Grok.TargetField == "Line.Raw" {
|
||||
gstr = p.Line.Raw
|
||||
} else if val, ok := p.Parsed[n.Grok.TargetField]; ok {
|
||||
|
@ -251,7 +211,6 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
|
|||
clog.Warningf("failed to run RunTimeValue : %v", err)
|
||||
NodeState = false
|
||||
}
|
||||
|
||||
switch out := output.(type) {
|
||||
case string:
|
||||
gstr = out
|
||||
|
@ -270,14 +229,12 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
|
|||
} else {
|
||||
groklabel = n.Grok.RegexpName
|
||||
}
|
||||
|
||||
grok := n.Grok.RunTimeRegexp.Parse(gstr)
|
||||
if len(grok) > 0 {
|
||||
/*tag explicitly that the *current* node had a successful grok pattern. it's important to know success state*/
|
||||
NodeHasOKGrok = true
|
||||
|
||||
clog.Debugf("+ Grok '%s' returned %d entries to merge in Parsed", groklabel, len(grok))
|
||||
// We managed to grok stuff, merged into parse
|
||||
//We managed to grok stuff, merged into parse
|
||||
for k, v := range grok {
|
||||
clog.Debugf("\t.Parsed['%s'] = '%s'", k, v)
|
||||
p.Parsed[k] = v
|
||||
|
@ -289,37 +246,34 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
|
|||
return false, err
|
||||
}
|
||||
} else {
|
||||
// grok failed, node failed
|
||||
//grok failed, node failed
|
||||
clog.Debugf("+ Grok '%s' didn't return data on '%s'", groklabel, gstr)
|
||||
NodeState = false
|
||||
}
|
||||
|
||||
} else {
|
||||
clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp)
|
||||
}
|
||||
|
||||
// Process the stash (data collection) if : a grok was present and succeeded, or if there is no grok
|
||||
//Process the stash (data collection) if : a grok was present and succeeded, or if there is no grok
|
||||
if NodeHasOKGrok || n.Grok.RunTimeRegexp == nil {
|
||||
for idx, stash := range n.Stash {
|
||||
var (
|
||||
key string
|
||||
value string
|
||||
)
|
||||
|
||||
var value string
|
||||
var key string
|
||||
if stash.ValueExpression == nil {
|
||||
clog.Warningf("Stash %d has no value expression, skipping", idx)
|
||||
continue
|
||||
}
|
||||
|
||||
if stash.KeyExpression == nil {
|
||||
clog.Warningf("Stash %d has no key expression, skipping", idx)
|
||||
continue
|
||||
}
|
||||
// collect the data
|
||||
//collect the data
|
||||
output, err := exprhelpers.Run(stash.ValueExpression, cachedExprEnv, clog, n.Debug)
|
||||
if err != nil {
|
||||
clog.Warningf("Error while running stash val expression : %v", err)
|
||||
}
|
||||
// can we expect anything else than a string ?
|
||||
//can we expect anything else than a string ?
|
||||
switch output := output.(type) {
|
||||
case string:
|
||||
value = output
|
||||
|
@ -328,12 +282,12 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
|
|||
continue
|
||||
}
|
||||
|
||||
// collect the key
|
||||
//collect the key
|
||||
output, err = exprhelpers.Run(stash.KeyExpression, cachedExprEnv, clog, n.Debug)
|
||||
if err != nil {
|
||||
clog.Warningf("Error while running stash key expression : %v", err)
|
||||
}
|
||||
// can we expect anything else than a string ?
|
||||
//can we expect anything else than a string ?
|
||||
switch output := output.(type) {
|
||||
case string:
|
||||
key = output
|
||||
|
@ -345,7 +299,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
|
|||
}
|
||||
}
|
||||
|
||||
// Iterate on leafs
|
||||
//Iterate on leafs
|
||||
for _, leaf := range n.LeavesNodes {
|
||||
ret, err := leaf.process(p, ctx, cachedExprEnv)
|
||||
if err != nil {
|
||||
|
@ -353,9 +307,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
|
|||
clog.Debugf("Event leaving node : ko")
|
||||
return false, err
|
||||
}
|
||||
|
||||
clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess)
|
||||
|
||||
if ret {
|
||||
NodeState = true
|
||||
/* if child is successful, stop processing */
|
||||
|
@ -376,14 +328,12 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
|
|||
|
||||
clog.Tracef("State after nodes : %v", NodeState)
|
||||
|
||||
// grok or leafs failed, don't process statics
|
||||
//grok or leafs failed, don't process statics
|
||||
if !NodeState {
|
||||
if n.Name != "" {
|
||||
NodesHitsKo.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
|
||||
}
|
||||
|
||||
clog.Debugf("Event leaving node : ko")
|
||||
|
||||
return NodeState, nil
|
||||
}
|
||||
|
||||
|
@ -410,10 +360,9 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
|
|||
if NodeState {
|
||||
clog.Debugf("Event leaving node : ok")
|
||||
log.Tracef("node is successful, check strategy")
|
||||
|
||||
if n.OnSuccess == "next_stage" {
|
||||
idx := stageidx(p.Stage, ctx.Stages)
|
||||
// we're at the last stage
|
||||
//we're at the last stage
|
||||
if idx+1 == len(ctx.Stages) {
|
||||
clog.Debugf("node reached the last stage : %s", p.Stage)
|
||||
} else {
|
||||
|
@ -426,16 +375,15 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
|
|||
} else {
|
||||
clog.Debugf("Event leaving node : ko")
|
||||
}
|
||||
|
||||
clog.Tracef("Node successful, continue")
|
||||
|
||||
return NodeState, nil
|
||||
}
|
||||
|
||||
func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
||||
var err error
|
||||
var valid bool
|
||||
|
||||
valid := false
|
||||
valid = false
|
||||
|
||||
dumpr := spew.ConfigState{MaxDepth: 1, DisablePointerAddresses: true}
|
||||
n.rn = seed.Generate()
|
||||
|
@ -445,11 +393,10 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
|||
/* if the node has debugging enabled, create a specific logger with debug
|
||||
that will be used only for processing this node ;) */
|
||||
if n.Debug {
|
||||
clog := log.New()
|
||||
var clog = log.New()
|
||||
if err = types.ConfigureLogger(clog); err != nil {
|
||||
log.Fatalf("While creating bucket-specific logger : %s", err)
|
||||
}
|
||||
|
||||
clog.SetLevel(log.DebugLevel)
|
||||
n.Logger = clog.WithFields(log.Fields{
|
||||
"id": n.rn,
|
||||
|
@ -467,7 +414,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
|||
|
||||
n.Logger.Tracef("Compiling : %s", dumpr.Sdump(n))
|
||||
|
||||
// compile filter if present
|
||||
//compile filter if present
|
||||
if n.Filter != "" {
|
||||
n.RunTimeFilter, err = expr.Compile(n.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
||||
if err != nil {
|
||||
|
@ -478,15 +425,12 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
|||
/* handle pattern_syntax and groks */
|
||||
for _, pattern := range n.SubGroks {
|
||||
n.Logger.Tracef("Adding subpattern '%s' : '%s'", pattern.Key, pattern.Value)
|
||||
|
||||
if err = pctx.Grok.Add(pattern.Key.(string), pattern.Value.(string)); err != nil {
|
||||
if errors.Is(err, grokky.ErrAlreadyExist) {
|
||||
n.Logger.Warningf("grok '%s' already registred", pattern.Key)
|
||||
continue
|
||||
}
|
||||
|
||||
n.Logger.Errorf("Unable to compile subpattern %s : %v", pattern.Key, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -494,36 +438,28 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
|||
/* load grok by name or compile in-place */
|
||||
if n.Grok.RegexpName != "" {
|
||||
n.Logger.Tracef("+ Regexp Compilation '%s'", n.Grok.RegexpName)
|
||||
|
||||
n.Grok.RunTimeRegexp, err = pctx.Grok.Get(n.Grok.RegexpName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to find grok '%s' : %v", n.Grok.RegexpName, err)
|
||||
}
|
||||
|
||||
if n.Grok.RunTimeRegexp == nil {
|
||||
return fmt.Errorf("empty grok '%s'", n.Grok.RegexpName)
|
||||
}
|
||||
|
||||
n.Logger.Tracef("%s regexp: %s", n.Grok.RegexpName, n.Grok.RunTimeRegexp.String())
|
||||
|
||||
valid = true
|
||||
} else if n.Grok.RegexpValue != "" {
|
||||
if strings.HasSuffix(n.Grok.RegexpValue, "\n") {
|
||||
n.Logger.Debugf("Beware, pattern ends with \\n : '%s'", n.Grok.RegexpValue)
|
||||
}
|
||||
|
||||
n.Grok.RunTimeRegexp, err = pctx.Grok.Compile(n.Grok.RegexpValue)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compile grok '%s': %v", n.Grok.RegexpValue, err)
|
||||
}
|
||||
|
||||
if n.Grok.RunTimeRegexp == nil {
|
||||
// We shouldn't be here because compilation succeeded, so regexp shouldn't be nil
|
||||
return fmt.Errorf("grok compilation failure: %s", n.Grok.RegexpValue)
|
||||
}
|
||||
|
||||
n.Logger.Tracef("%s regexp : %s", n.Grok.RegexpValue, n.Grok.RunTimeRegexp.String())
|
||||
|
||||
valid = true
|
||||
}
|
||||
|
||||
|
@ -537,7 +473,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
|||
}
|
||||
|
||||
/* load grok statics */
|
||||
// compile expr statics if present
|
||||
//compile expr statics if present
|
||||
for idx := range n.Grok.Statics {
|
||||
if n.Grok.Statics[idx].ExpValue != "" {
|
||||
n.Grok.Statics[idx].RunTimeValue, err = expr.Compile(n.Grok.Statics[idx].ExpValue,
|
||||
|
@ -546,7 +482,6 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
|
||||
valid = true
|
||||
}
|
||||
|
||||
|
@ -570,7 +505,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
|||
}
|
||||
|
||||
logLvl := n.Logger.Logger.GetLevel()
|
||||
// init the cache, does it make sense to create it here just to be sure everything is fine ?
|
||||
//init the cache, does it make sense to create it here just to be sure everything is fine ?
|
||||
if err = cache.CacheInit(cache.CacheCfg{
|
||||
Size: n.Stash[i].MaxMapSize,
|
||||
TTL: n.Stash[i].TTLVal,
|
||||
|
@ -591,18 +526,14 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
|||
if !n.LeavesNodes[idx].Debug && n.Debug {
|
||||
n.LeavesNodes[idx].Debug = true
|
||||
}
|
||||
|
||||
if !n.LeavesNodes[idx].Profiling && n.Profiling {
|
||||
n.LeavesNodes[idx].Profiling = true
|
||||
}
|
||||
|
||||
n.LeavesNodes[idx].Stage = n.Stage
|
||||
|
||||
err = n.LeavesNodes[idx].compile(pctx, ectx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
valid = true
|
||||
}
|
||||
|
||||
|
@ -615,7 +546,6 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
|
||||
valid = true
|
||||
}
|
||||
|
||||
|
@ -624,15 +554,13 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
valid = valid || whitelistValid
|
||||
|
||||
if !valid {
|
||||
/* node is empty, error force return */
|
||||
n.Logger.Error("Node is empty or invalid, abort")
|
||||
n.Stage = ""
|
||||
|
||||
return errors.New("Node is empty")
|
||||
return fmt.Errorf("Node is empty")
|
||||
}
|
||||
|
||||
if err := n.validate(pctx, ectx); err != nil {
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
//go:build !windows && !freebsd
|
||||
//go:build !windows
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -93,7 +92,6 @@ var fsTypeMapping map[int64]string = map[int64]string{
|
|||
0xabba1974: "xenfs",
|
||||
0x012ff7b4: "xenix",
|
||||
0x58465342: "xfs",
|
||||
0x2fc12fc1: "zfs",
|
||||
}
|
||||
|
||||
func GetFSType(path string) (string, error) {
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
//go:build freebsd
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func GetFSType(path string) (string, error) {
|
||||
var fsStat syscall.Statfs_t
|
||||
|
||||
if err := syscall.Statfs(path, &fsStat); err != nil {
|
||||
return "", fmt.Errorf("failed to get filesystem type: %w", err)
|
||||
}
|
||||
|
||||
bs := fsStat.Fstypename
|
||||
|
||||
b := make([]byte, len(bs))
|
||||
for i, v := range bs {
|
||||
b[i] = byte(v)
|
||||
}
|
||||
|
||||
return string(b), nil
|
||||
}
|
|
@ -125,13 +125,19 @@ teardown() {
|
|||
assert_stderr --partial "Upgraded 0 contexts"
|
||||
assert_stderr --partial "Upgrading collections"
|
||||
assert_stderr --partial "Upgraded 0 collections"
|
||||
assert_stderr --partial "Upgrading appsec-configs"
|
||||
assert_stderr --partial "Upgraded 0 appsec-configs"
|
||||
assert_stderr --partial "Upgrading appsec-rules"
|
||||
assert_stderr --partial "Upgraded 0 appsec-rules"
|
||||
assert_stderr --partial "Upgrading collections"
|
||||
assert_stderr --partial "Upgraded 0 collections"
|
||||
|
||||
rune -0 cscli parsers install crowdsecurity/syslog-logs
|
||||
rune -0 cscli hub upgrade
|
||||
assert_stderr --partial "crowdsecurity/syslog-logs: up-to-date"
|
||||
|
||||
rune -0 cscli hub upgrade --force
|
||||
assert_stderr --partial "crowdsecurity/syslog-logs: overwrite"
|
||||
assert_stderr --partial "crowdsecurity/syslog-logs: up-to-date"
|
||||
assert_stderr --partial "crowdsecurity/syslog-logs: updated"
|
||||
assert_stderr --partial "Upgraded 1 parsers"
|
||||
# this is used by the cron script to know if the hub was updated
|
||||
|
|
|
@ -180,7 +180,6 @@ teardown() {
|
|||
assert_stderr --partial "error while installing 'crowdsecurity/sshd': while enabling crowdsecurity/sshd: crowdsecurity/sshd is tainted, won't enable unless --force"
|
||||
|
||||
rune -0 cscli collections install crowdsecurity/sshd --force
|
||||
assert_stderr --partial "crowdsecurity/sshd: overwrite"
|
||||
assert_stderr --partial "Enabled crowdsecurity/sshd"
|
||||
}
|
||||
|
||||
|
|
|
@ -180,7 +180,6 @@ teardown() {
|
|||
assert_stderr --partial "error while installing 'crowdsecurity/whitelists': while enabling crowdsecurity/whitelists: crowdsecurity/whitelists is tainted, won't enable unless --force"
|
||||
|
||||
rune -0 cscli parsers install crowdsecurity/whitelists --force
|
||||
assert_stderr --partial "crowdsecurity/whitelists: overwrite"
|
||||
assert_stderr --partial "Enabled crowdsecurity/whitelists"
|
||||
}
|
||||
|
||||
|
|
|
@ -180,7 +180,6 @@ teardown() {
|
|||
assert_stderr --partial "error while installing 'crowdsecurity/rdns': while enabling crowdsecurity/rdns: crowdsecurity/rdns is tainted, won't enable unless --force"
|
||||
|
||||
rune -0 cscli postoverflows install crowdsecurity/rdns --force
|
||||
assert_stderr --partial "crowdsecurity/rdns: overwrite"
|
||||
assert_stderr --partial "Enabled crowdsecurity/rdns"
|
||||
}
|
||||
|
||||
|
|
|
@ -181,7 +181,6 @@ teardown() {
|
|||
assert_stderr --partial "error while installing 'crowdsecurity/ssh-bf': while enabling crowdsecurity/ssh-bf: crowdsecurity/ssh-bf is tainted, won't enable unless --force"
|
||||
|
||||
rune -0 cscli scenarios install crowdsecurity/ssh-bf --force
|
||||
assert_stderr --partial "crowdsecurity/ssh-bf: overwrite"
|
||||
assert_stderr --partial "Enabled crowdsecurity/ssh-bf"
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue