diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..cf30b3840 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,73 @@ +############################ +# STEP 1 build executable binary +############################ + +FROM golang:alpine AS builder + +RUN apk update && apk add make gettext gcc g++ + + +WORKDIR $GOPATH/src/JohnDoeCrowdSec/granola + +# COPY the source +COPY ./ . + +RUN make build + +RUN make install + +RUN cp ./docker/docker.yaml /etc/crowdsec/docker.yaml +RUN cp ./docker/acquis.yaml /etc/crowdsec/crowdsec/ + +############################ +# STEP 2 +############################ + +FROM alpine:latest + +COPY --from=builder /usr/local/bin/crowdsec /usr/local/bin/crowdsec +COPY --from=builder /usr/local/bin/cscli /usr/local/bin/cscli + + +COPY --from=builder /etc/crowdsec /etc/crowdsec +COPY --from=builder /var/run/crowdsec /var/run/crowdsec + +RUN apk add --update bash rsyslog && rm -rf /var/cache/apk/* + +########################### +##### Prepare rsyslog ##### +########################### + +RUN mkdir -p /etc/rsyslog.d/ +RUN mkdir -p /var/spool/rsyslog/ +RUN mkdir -p /var/log/rsyslog +RUN touch /var/log/syslog + +EXPOSE 514 514 + +COPY ./docker/rsyslog.conf /etc/rsyslog.conf + +########################################### +###### Configure crowdsec ########### +########################################### + +RUN cscli config token "6ba94afde0fbf41310f7191934bc1d920245c9f1" +RUN cscli config installdir "/etc/crowdsec/crowdsec/" +RUN cscli config dbpath "/var/run/crowdsec/crowdsec.db" + +RUN cscli update + +RUN cscli install collection crowdsec/base-http-scenarios +RUN cscli install collection crowdsec/linux +RUN cscli install collection crowdsec/nginx +RUN cscli install collection crowdsec/sshd + +###################################### +## Wrapper to launch multi services ## +###################################### + +COPY ./docker/wrapper.sh . +RUN chmod +x ./wrapper.sh + +ENTRYPOINT ["./wrapper.sh"] + diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..39c72e6b8 --- /dev/null +++ b/Makefile @@ -0,0 +1,118 @@ +PREFIX?="/tmp/crowdsec/" +CFG_PREFIX = $(PREFIX)"/etc/crowdsec/" +BIN_PREFIX = $(PREFIX)"/usr/local/bin/" +DATA_PREFIX = $(PREFIX)"/var/run/crowdsec/" + +PLUGIN_FOLDER="./plugins" +PID_DIR = $(PREFIX)"/var/run/" +CROWDSEC_FOLDER = "./cmd/crowdsec" +CSCLI_FOLDER = "./cmd/crowdsec-cli/" +CROWDSEC_BIN = "crowdsec" +CSCLI_BIN = "cscli" +BUILD_CMD="build" + +GOARCH=amd64 +GOOS=linux +REQUIRE_GOVERSION="1.13" + + +#Current versioning information from env +export BUILD_VERSION=$(shell cat RELEASE.json | jq -r .Version) +export BUILD_GOVERSION="$(shell go version | cut -d " " -f3 | sed -r 's/[go]+//g')" +export BUILD_CODENAME=$(shell cat RELEASE.json | jq -r .CodeName) +export BUILD_TIMESTAMP=$(shell date +%F"_"%T) +export BUILD_TAG="$(shell git rev-parse HEAD)" +export LD_OPTS=-ldflags "-X github.com/crowdsecurity/crowdsec/pkg/cwversion.Version=$(BUILD_VERSION) \ +-X github.com/crowdsecurity/crowdsec/pkg/cwversion.BuildDate=$(BUILD_TIMESTAMP) \ +-X github.com/crowdsecurity/crowdsec/pkg/cwversion.Codename=$(BUILD_CODENAME) \ +-X github.com/crowdsecurity/crowdsec/pkg/cwversion.Tag=$(BUILD_TAG) \ +-X github.com/crowdsecurity/crowdsec/pkg/cwversion.GoVersion=$(BUILD_GOVERSION)" +RELDIR = crowdsec-$(BUILD_VERSION) + +all: clean test build + +build: clean goversion crowdsec cscli + +static: goversion crowdsec_static cscli_static + +goversion: + CURRENT_GOVERSION="$(shell go version | cut -d " " -f3 | sed -r 's/[go]+//g')" + RESPECT_VERSION="$(shell echo "$(CURRENT_GOVERSION),$(REQUIRE_GOVERSION)" | tr ',' '\n' | sort -V)" + +clean: + @make -C $(CROWDSEC_FOLDER) clean --no-print-directory + @make -C $(CSCLI_FOLDER) clean --no-print-directory + @rm -f $(CROWDSEC_BIN) + @rm -f $(CSCLI_BIN) + @rm -f *.log + +cscli: +ifeq ($(lastword $(RESPECT_VERSION)), $(CURRENT_GOVERSION)) + @make -C $(CSCLI_FOLDER) build --no-print-directory +else + @echo "Required golang version is $(REQUIRE_GOVERSION). The current one is $(CURRENT_GOVERSION). Exiting.." + @exit 1; +endif + + +crowdsec: +ifeq ($(lastword $(RESPECT_VERSION)), $(CURRENT_GOVERSION)) + @make -C $(CROWDSEC_FOLDER) build --no-print-directory +else + @echo "Required golang version is $(REQUIRE_GOVERSION). The current one is $(CURRENT_GOVERSION). Exiting.." + @exit 1; +endif + + +cscli_static: +ifeq ($(lastword $(RESPECT_VERSION)), $(CURRENT_GOVERSION)) + @make -C $(CSCLI_FOLDER) static --no-print-directory +else + @echo "Required golang version is $(REQUIRE_GOVERSION). The current one is $(CURRENT_GOVERSION). Exiting.." + @exit 1; +endif + + +crowdsec_static: +ifeq ($(lastword $(RESPECT_VERSION)), $(CURRENT_GOVERSION)) + @make -C $(CROWDSEC_FOLDER) static --no-print-directory +else + @echo "Required golang version is $(REQUIRE_GOVERSION). The current one is $(CURRENT_GOVERSION). Exiting.." + @exit 1; +endif + + +#.PHONY: test +test: +ifeq ($(lastword $(RESPECT_VERSION)), $(CURRENT_GOVERSION)) + @make -C $(CROWDSEC_FOLDER) test --no-print-directory +else + @echo "Required golang version is $(REQUIRE_GOVERSION). The current one is $(CURRENT_GOVERSION). Exiting.." + @exit 1; +endif + +.PHONY: uninstall +uninstall: + @rm -rf "$(CFG_PREFIX)" || exit + @rm -rf "$(DATA_PREFIX)" || exit + @rm -rf "$(SYSTEMD_PATH_FILE)" || exit + +.PHONY: check_release +check_release: + @if [ -d $(RELDIR) ]; then echo "$(RELDIR) already exists, abort" ; exit 1 ; fi + +.PHONY: +release: check_release build + @echo Building Release to dir $(RELDIR) + @mkdir -p $(RELDIR)/cmd/crowdsec + @mkdir -p $(RELDIR)/cmd/crowdsec-cli + @cp $(CROWDSEC_FOLDER)/$(CROWDSEC_BIN) $(RELDIR)/cmd/crowdsec + @cp $(CSCLI_FOLDER)/$(CSCLI_BIN) $(RELDIR)/cmd/crowdsec-cli + @cp -R ./config/ $(RELDIR) + @cp -R ./data/ $(RELDIR) + @cp wizard.sh $(RELDIR) + @cp scripts/test_env.sh $(RELDIR) + @bash ./scripts/build_plugins.sh + @mkdir -p "$(RELDIR)/plugins/backend" + @find ./plugins -type f -name "*.so" -exec install -Dm 644 {} "$(RELDIR)/{}" \; || exiting + @tar cvzf crowdsec-release.tgz $(RELDIR) diff --git a/README.md b/README.md new file mode 100644 index 000000000..07c033711 --- /dev/null +++ b/README.md @@ -0,0 +1,153 @@ +![Go](https://github.com/crowdsecurity/crowdsec/workflows/Go/badge.svg) +![build-binary-package](https://github.com/crowdsecurity/crowdsec/workflows/build-binary-package/badge.svg) + +# CrowdSec project + +Please see [terminology](#terminology) if you're new to the projetct. + +## Foreword + +This repository contains the code for the two main components of crowdsec : + - `crowdsec` : the daemon a-la-fail2ban that can read, parse, enrich and apply heuristis to logs. This is the component in charge of "detecting" the attacks + - `cscli` : the cli tool mainly used to interact with crowdsec : ban/unban/view current bans, enable/disable parsers and scenarios. + +## Plugins + +The plugins are in charge of blocking that Ip/Ranges that have been tagged as malevolent. +They do so by querying a sqlite database when they see a new IP. This SQLite database is being fed by crowdsec. +The following plugins are available : + - `netfilter-plugin` : an iptables/ipset service that can be deployed by the wizard. it will allow to ban IP/Ranges as you would do with iptables. + - `nginx-plugin` : a LUA connector for nginx that can be deployed by the wizard. it will allow to ban ip/ranges at the applicative level (ie. more suitable than iptables if your website is behind a CDN). + - `wordpress-plugin` : a Wordpress/php module that can be deployed in Wordpress to block the requests at the applicative level. (it comes as a library for easy re-use). + + + + +# Software architecture + + +![global crowdsec architecture](./doc/img/crowdsec-global.png) + + +**NOTE** the API part isn't enabled by default. + +# Build + +**To build crowdsec you need golang >= 1.13.** +To build binaries : + +``` +$ make build +``` + +# Installation + +## With the wizard + + The wizard is here is significatively improve the user experience, and aims at providing a _next-next-next-finish_ installer that should work out of the box on most linux flavored systems. + + The wizard will help you in the following steps : + - detect running services + - detect their log files(by default in `/etc/crowdsec/`) + - suggest collections/scenarios according to the detect services + - deploy crowdsec service + - deploy plugins + + ```bash + $ make build + $ sudo ./wizard.sh -i + ``` + + and you're done ! + + +## Without the wizard + +> You man of little faith + +You can install crowdsec and its cli without the wizard : + +```bash +$ make build +$ make systemd +``` + +**NOTE** if you install without the wizard, it will be your responsability to configure the acquisition (which file to read for which service) and to deploy scenarios and parsers (how to parse logs, and which scenarios should be applied to which services). + +## After the installation + +Services are deployed as systemd units : + - `crowdsec` is the detection component + - `crowdsec-netfilter` is the netfilter plugin + - the nginx, wordpress etc. plugins usually are ran inside said service (ie. nginx plugin is a LUA script, wordpress plugin is a php module) + - `cscli` is deployed in standard path. + +```bash +$ sudo systemctl status crowdsec +# stop the netfilter plugin. If you didn't install other plugins, decisions won't be 'applied' anymore unless you start it again. +$ sudo systemctl stop crowdsec-netfilter +``` + +# Configuration + +crowdsec relies on the following configuration files (by default in `/etc/crowdsec/`) : + + - default.yaml : The main configuration of crowdsec, you will find here informations about logging, path to sqlite DB etc. + - acquis.yaml : Describes the files that will be read (a-la `tail -f`) and which type of logs to expect from it + - api.yaml : url & token for api push and pulls (pushes **signal occurences** and fetchs **crowd-sourced reputation**) + - profiles.yaml : (you shouldn't modify this one) Describes in which condition crowdsec should insert a ban decision in database. It's usually because a scenario has a `remediation: true` flag in its tags. + +However, the "real" configuration of crowdsec relies on the collections of scenarios and parsers that you have deployed. +Those are deployed / upgraded / removed (ideally) with `cscli`, see [its dedicated documentation](./cmd/crowdsec-cli/doc/cscli.md) + +If you used the wizard, chances are that you don't have anything specific to configure. + +# Usage / FAQ + +[See `cscli`dedicated documentation](./cmd/crowdsec-cli/doc/cscli.md) + +## stop the netfilter plugin + +**note** when netfilter plugin is disabled, no bans will be applied if no other plugins are enabled. + +``` +#view netfilter logs +$ journalctl -u -f crowdsec-netfilter +#stop service +$ systemctl stop crowdsec-netfilter +``` + +## view/add/remove bans + +``` +# cscli ban list +INFO[0000] 38 non-expired ban applications ++-----------------+---------------+--------------------+--------+---------+--------------------------------+--------------+--------------------+ +| SOURCE | SCENARIO | CURRENT BANS COUNT | ACTION | COUNTRY | AS | EVENTS COUNT | EXPIRATION | ++-----------------+---------------+--------------------+--------+---------+--------------------------------+--------------+--------------------+ +| 37.195.50.41 | ssh_user_enum | 1 | ban | RU | 31200 Novotelecom Ltd | 4 | 3h59m56.337435741s | +| 159.203.143.58 | ssh_user_enum | 1 | ban | US | 14061 DigitalOcean, LLC | 4 | 3h59m55.585257629s | +... +# cscli ban add range 37.139.4.0/24 10m spam +# cscli ban add ip 37.139.4.123 10m spam +``` + +# Terminology + + - **crowdsec** : the daemon that reads log files, parses logs and triggers scenarios, alerts and bans. + - **crowdsec database** : a local file that contains at a given time the list of banned ip/ranges. + - **plugin** : a software component that can interact with crowdsec database to block/delay attackers. + - **parser** : a configuration file that allows crowdsec to 'understand' a specific log file format. Each service will generally require its own parser (nginx, apache, sshd, mysql etc.). parsers are usually found on the **hub** and downloaded via the **cli**. + - **scenario** : a leakybucket description that allows to detect a specific attack : _more that 5 failed ssh authentication attempts from the same IP within less than 20 seconds is a ssh bruteforce and should be punished_ + - **signal** : the information resulting from a scenario being triggered, this information is shared amongst participants and will lead to consensus : _users A, B, C, D all reported that ip 1.2.3.4 targetted them with a ssh bruteforce_ + - **bucket**, **bucket overflow** : a more technical term referring to a scenario being triggered. + - **hub** : the portal on which users can find, share and publish parsers and scenarios. + - **cli** : the `cscli` tool. + +# Making a release + + - release-drafter maintains a draft release up-to-date with MRs + - when you publish the release with the "pre-release" flag, it's going to launch action to add the built release package to release. + - once extra manual steps are done, you can remove the "pre-release" flag from published release "and voila" + + diff --git a/RELEASE.json b/RELEASE.json new file mode 100644 index 000000000..eb439dbb8 --- /dev/null +++ b/RELEASE.json @@ -0,0 +1,4 @@ +{ + "Version": "v0.0.21", + "CodeName": "road2beta" +} diff --git a/cmd/crowdsec-cli/Makefile b/cmd/crowdsec-cli/Makefile new file mode 100644 index 000000000..442aa2a42 --- /dev/null +++ b/cmd/crowdsec-cli/Makefile @@ -0,0 +1,31 @@ +# Go parameters +GOCMD=go +GOBUILD=$(GOCMD) build +GOCLEAN=$(GOCMD) clean +GOTEST=$(GOCMD) test +GOGET=$(GOCMD) get +BINARY_NAME=cscli +PREFIX?="/" +BIN_PREFIX = $(PREFIX)"/usr/local/bin/" + +all: clean build + +build: clean + @$(GOBUILD) $(LD_OPTS) -o $(BINARY_NAME) -v + +static: clean + @$(GOBUILD) -o $(BINARY_NAME) -v -a -tags netgo -ldflags '-w -extldflags "-static"' + +install: install-conf install-bin + +install-conf: + +install-bin: + @install -v -m 755 -D "$(BINARY_NAME)" "$(BIN_PREFIX)/$(BINARY_NAME)" || exit + +uninstall: + @rm -rf $(CSCLI_CONFIG) + @rm -rf $(BIN_PREFIX)$(BINARY_NAME) + +clean: + @rm -f $(BINARY_NAME) diff --git a/cmd/crowdsec-cli/README.md b/cmd/crowdsec-cli/README.md new file mode 100644 index 000000000..a9218b8c4 --- /dev/null +++ b/cmd/crowdsec-cli/README.md @@ -0,0 +1 @@ +see doc in `doc/` diff --git a/cmd/crowdsec-cli/api.go b/cmd/crowdsec-cli/api.go new file mode 100644 index 000000000..952b8b12d --- /dev/null +++ b/cmd/crowdsec-cli/api.go @@ -0,0 +1,271 @@ +package main + +import ( + "encoding/json" + "fmt" + "math/rand" + "path" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/outputs" + "github.com/crowdsecurity/crowdsec/pkg/sqlite" + "github.com/crowdsecurity/crowdsec/pkg/types" + + "github.com/denisbrodbeck/machineid" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" +) + +var ( + passwordLength = 64 + upper = "ABCDEFGHIJKLMNOPQRSTUVWXY" + lower = "abcdefghijklmnopqrstuvwxyz" + digits = "0123456789" +) + +var ( + apiConfigFile = "api.yaml" +) + +var userID string // for flag parsing +var dbctx *sqlite.Context + +var outputCTX *outputs.Output + +func dumpCredentials() error { + if config.output == "json" { + credsYaml, err := json.Marshal(&outputCTX.API.Creds) + if err != nil { + log.Fatalf("Can't marshal credentials : %v", err) + } + fmt.Printf("%s\n", string(credsYaml)) + } else { + credsYaml, err := yaml.Marshal(&outputCTX.API.Creds) + if err != nil { + log.Fatalf("Can't marshal credentials : %v", err) + } + fmt.Printf("%s\n", string(credsYaml)) + } + return nil +} + +func generatePassword() string { + rand.Seed(time.Now().UnixNano()) + charset := upper + lower + digits + + buf := make([]byte, passwordLength) + buf[0] = digits[rand.Intn(len(digits))] + buf[1] = upper[rand.Intn(len(upper))] + buf[2] = lower[rand.Intn(len(lower))] + + for i := 3; i < passwordLength; i++ { + buf[i] = charset[rand.Intn(len(charset))] + } + rand.Shuffle(len(buf), func(i, j int) { + buf[i], buf[j] = buf[j], buf[i] + }) + + return string(buf) +} + +func pullTOP() error { + /*profile from cwhub*/ + var profiles []string + if _, ok := cwhub.HubIdx[cwhub.SCENARIOS]; !ok || len(cwhub.HubIdx[cwhub.SCENARIOS]) == 0 { + log.Errorf("no loaded scenarios, can't fill profiles") + return fmt.Errorf("no profiles") + } + for _, item := range cwhub.HubIdx[cwhub.SCENARIOS] { + if item.Tainted || !item.Installed { + continue + } + profiles = append(profiles, item.Name) + } + outputCTX.API.Creds.Profile = strings.Join(profiles[:], ",") + if err := outputCTX.API.Signin(); err != nil { + log.Fatalf(err.Error()) + } + + ret, err := outputCTX.API.PullTop() + if err != nil { + log.Fatalf(err.Error()) + } + log.Warningf("api pull returned %d entries", len(ret)) + for _, item := range ret { + if _, ok := item["range_ip"]; !ok { + continue + } + if _, ok := item["scenario"]; !ok { + continue + } + item["scenario"] = fmt.Sprintf("api: %s", item["scenario"]) + + if _, ok := item["action"]; !ok { + continue + } + if _, ok := item["expiration"]; !ok { + continue + } + if _, ok := item["country"]; !ok { + item["country"] = "" + } + if _, ok := item["as_org"]; !ok { + item["as_org"] = "" + } + if _, ok := item["as_num"]; !ok { + item["as_num"] = "" + } + var signalOcc types.SignalOccurence + signalOcc, err = simpleBanToSignal(item["range_ip"], item["scenario"], item["expiration"], item["action"], item["as_name"], item["as_num"], item["country"], "api") + if err := outputCTX.Insert(signalOcc); err != nil { + log.Fatalf("Unable to write pull to sqliteDB : %+s", err.Error()) + } + } + outputCTX.Flush() + log.Infof("Wrote %d bans from api to database.", len(ret)) + return nil +} + +func NewAPICmd() *cobra.Command { + + var cmdAPI = &cobra.Command{ + Use: "api [action]", + Short: "Crowdsec API interaction", + Long: ` +Allow to register your machine into crowdsec API to send and receive signal. + `, + Example: ` +cscli api register # Register to Crowdsec API +cscli api pull # Pull malevolant IPs from Crowdsec API +cscli api reset # Reset your machines credentials +cscli api enroll # Enroll your machine to the user account you created on Crowdsec backend +cscli api credentials # Display your API credentials +`, + Args: cobra.MinimumNArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + var err error + if !config.configured { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + outputConfig := outputs.OutputFactory{ + BackendFolder: config.BackendPluginFolder, + } + outputCTX, err = outputs.NewOutput(&outputConfig, false) + if err != nil { + return err + } + + err = outputCTX.LoadAPIConfig(path.Join(config.InstallFolder, apiConfigFile)) + if err != nil { + return err + } + return nil + }, + } + + var cmdAPIRegister = &cobra.Command{ + Use: "register", + Short: "Register on Crowdsec API", + Long: `This command will register your machine to crowdsec API to allow you to receive list of malveolent IPs. + The printed machine_id and password should be added to your api.yaml file.`, + Example: `cscli api register`, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + id, err := machineid.ID() + if err != nil { + log.Fatalf("failed to get machine id: %s", err) + } + password := generatePassword() + + if err := outputCTX.API.RegisterMachine(id, password); err != nil { + log.Fatalf(err.Error()) + } + fmt.Printf("machine_id: %s\n", outputCTX.API.Creds.User) + fmt.Printf("password: %s\n", outputCTX.API.Creds.Password) + return + }, + } + + var cmdAPIEnroll = &cobra.Command{ + Use: "enroll", + Short: "Associate your machine to an existing crowdsec user", + Long: `Enrolling your machine into your user account will allow for more accurate lists and threat detection. See website to create user account.`, + Example: `cscli api enroll -u 1234567890ffff`, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := outputCTX.API.Signin(); err != nil { + log.Fatalf("unable to signin : %s", err) + } + if err := outputCTX.API.Enroll(userID); err != nil { + log.Fatalf(err.Error()) + } + return + }, + } + + var cmdAPIResetPassword = &cobra.Command{ + Use: "reset", + Short: "Reset password on CrowdSec API", + Long: `Attempts to reset your credentials to the API.`, + Example: `cscli api reset`, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + id, err := machineid.ID() + if err != nil { + log.Fatalf("failed to get machine id: %s", err) + } + password := generatePassword() + if err := outputCTX.API.ResetPassword(id, password); err != nil { + log.Fatalf(err.Error()) + } + fmt.Printf("machine_id: %s\n", outputCTX.API.Creds.User) + fmt.Printf("password: %s\n", outputCTX.API.Creds.Password) + return + }, + } + + var cmdAPIPull = &cobra.Command{ + Use: "pull", + Short: "Pull crowdsec API TopX", + Long: `Pulls a list of malveolent IPs relevant to your situation and add them into the local ban database.`, + Example: `cscli api pull`, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf(err.Error()) + } + err := pullTOP() + if err != nil { + log.Fatalf(err.Error()) + } + return + }, + } + + var cmdAPICreds = &cobra.Command{ + Use: "credentials", + Short: "Display api credentials", + Long: ``, + Example: `cscli api credentials`, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := dumpCredentials(); err != nil { + log.Fatalf(err.Error()) + } + return + }, + } + + cmdAPI.AddCommand(cmdAPICreds) + cmdAPIEnroll.Flags().StringVarP(&userID, "user", "u", "", "User ID (required)") + cmdAPIEnroll.MarkFlagRequired("user") + cmdAPI.AddCommand(cmdAPIEnroll) + cmdAPI.AddCommand(cmdAPIResetPassword) + cmdAPI.AddCommand(cmdAPIRegister) + cmdAPI.AddCommand(cmdAPIPull) + return cmdAPI +} diff --git a/cmd/crowdsec-cli/backup-restore.go b/cmd/crowdsec-cli/backup-restore.go new file mode 100644 index 000000000..d46629ea5 --- /dev/null +++ b/cmd/crowdsec-cli/backup-restore.go @@ -0,0 +1,473 @@ +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/cwapi" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/outputs" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +//it's a rip of the cli version, but in silent-mode +func silenceInstallItem(name string, obtype string) (string, error) { + for _, it := range cwhub.HubIdx[obtype] { + if it.Name == name { + if download_only && it.Downloaded && it.UpToDate { + return fmt.Sprintf("%s is already downloaded and up-to-date", it.Name), nil + } + it, err := cwhub.DownloadLatest(it, cwhub.Hubdir, force_install) + if err != nil { + return "", fmt.Errorf("error while downloading %s : %v", it.Name, err) + } + cwhub.HubIdx[obtype][it.Name] = it + if download_only { + return fmt.Sprintf("Downloaded %s to %s", it.Name, cwhub.Hubdir+"/"+it.RemotePath), nil + } + it, err = cwhub.EnableItem(it, cwhub.Installdir, cwhub.Hubdir) + if err != nil { + return "", fmt.Errorf("error while enabled %s : %v", it.Name, err) + } + cwhub.HubIdx[obtype][it.Name] = it + + return fmt.Sprintf("Enabled %s", it.Name), nil + } + } + return "", fmt.Errorf("%s not found in hub index", name) +} + +/*help to copy the file, ioutil doesn't offer the feature*/ + +func copyFileContents(src, dst string) (err error) { + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return + } + defer func() { + cerr := out.Close() + if err == nil { + err = cerr + } + }() + if _, err = io.Copy(out, in); err != nil { + return + } + err = out.Sync() + return +} + +/*copy the file, ioutile doesn't offer the feature*/ +func copyFile(sourceSymLink, destinationFile string) (err error) { + + sourceFile, err := filepath.EvalSymlinks(sourceSymLink) + if err != nil { + log.Infof("Not a symlink : %s", err) + sourceFile = sourceSymLink + } + + sourceFileStat, err := os.Stat(sourceFile) + if err != nil { + return + } + if !sourceFileStat.Mode().IsRegular() { + // cannot copy non-regular files (e.g., directories, + // symlinks, devices, etc.) + return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sourceFileStat.Name(), sourceFileStat.Mode().String()) + } + destinationFileStat, err := os.Stat(destinationFile) + if err != nil { + if !os.IsNotExist(err) { + return + } + } else { + if !(destinationFileStat.Mode().IsRegular()) { + return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", destinationFileStat.Name(), destinationFileStat.Mode().String()) + } + if os.SameFile(sourceFileStat, destinationFileStat) { + return + } + } + if err = os.Link(sourceFile, destinationFile); err == nil { + return + } + err = copyFileContents(sourceFile, destinationFile) + return +} + +/*given a backup directory, restore configs (parser,collections..) both tainted and untainted. +as well attempts to restore api credentials after verifying the existing ones aren't good +finally restores the acquis.yaml file*/ +func restoreFromDirectory(source string) error { + var err error + /*backup scenarios etc.*/ + for _, itype := range cwhub.ItemTypes { + itemDirectory := fmt.Sprintf("%s/%s/", source, itype) + if _, err = os.Stat(itemDirectory); err != nil { + log.Infof("no %s in backup", itype) + continue + } + /*restore the upstream items*/ + upstreamListFN := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itype) + file, err := ioutil.ReadFile(upstreamListFN) + if err != nil { + return fmt.Errorf("error while opening %s : %s", upstreamListFN, err) + } + var upstreamList []string + err = json.Unmarshal([]byte(file), &upstreamList) + if err != nil { + return fmt.Errorf("error unmarshaling %s : %s", upstreamListFN, err) + } + for _, toinstall := range upstreamList { + label, err := silenceInstallItem(toinstall, itype) + if err != nil { + log.Errorf("Error while installing %s : %s", toinstall, err) + } else if label != "" { + log.Infof("Installed %s : %s", toinstall, label) + } else { + log.Printf("Installed %s : ok", toinstall) + } + } + /*restore the local and tainted items*/ + files, err := ioutil.ReadDir(itemDirectory) + if err != nil { + return fmt.Errorf("Failed enumerating files of %s : %s", itemDirectory, err) + } + for _, file := range files { + //dir are stages, keep track + if !file.IsDir() { + continue + } + stage := file.Name() + stagedir := fmt.Sprintf("%s/%s/%s/", config.InstallFolder, itype, stage) + log.Debugf("Found stage %s in %s, target directory : %s", stage, itype, stagedir) + if err = os.MkdirAll(stagedir, os.ModePerm); err != nil { + return fmt.Errorf("Error while creating stage directory %s : %s", stagedir, err) + } + /*find items*/ + ifiles, err := ioutil.ReadDir(itemDirectory + "/" + stage + "/") + if err != nil { + return fmt.Errorf("Failed enumerating files of %s : %s", itemDirectory+"/"+stage, err) + } + //finaly copy item + for _, tfile := range ifiles { + log.Infof("Going to restore local/tainted [%s]", tfile.Name()) + sourceFile := fmt.Sprintf("%s/%s/%s", itemDirectory, stage, tfile.Name()) + destinationFile := fmt.Sprintf("%s%s", stagedir, tfile.Name()) + if err = copyFile(sourceFile, destinationFile); err != nil { + return fmt.Errorf("failed copy %s %s to %s : %s", itype, sourceFile, destinationFile, err) + } else { + log.Infof("restored %s to %s", sourceFile, destinationFile) + } + + } + } + } + /*restore api credentials*/ + //check if credentials exists : + // - if no, restore + // - if yes, try them : + // - if it works, left untouched + // - if not, restore + // -> try login + if err := restoreAPICreds(source); err != nil { + return fmt.Errorf("Failed to restore api credentials : %s", err) + } + /* + Restore acquis + */ + yamlAcquisFile := fmt.Sprintf("%s/acquis.yaml", config.InstallFolder) + bac := fmt.Sprintf("%s/acquis.yaml", source) + if err = copyFile(bac, yamlAcquisFile); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", bac, yamlAcquisFile, err) + } + log.Infof("Restore acquis to %s", yamlAcquisFile) + + return nil +} + +func restoreAPICreds(source string) error { + var err error + + /*check existing configuration*/ + apiyaml := path.Join(config.InstallFolder, apiConfigFile) + + api := &cwapi.ApiCtx{} + if err = api.LoadConfig(apiyaml); err != nil { + return fmt.Errorf("Unable to load api config %s : %s", apiyaml, err) + } + if api.Creds.User != "" { + log.Infof("Credentials present in existing configuration, try before override") + err := api.Signin() + if err == nil { + log.Infof("Credentials present allow authentication, don't override !") + return nil + } else { + log.Infof("Credentials aren't valid : %s", err) + } + } + /*existing config isn't good, override it !*/ + ret, err := ioutil.ReadFile(path.Join(source, "api_creds.json")) + if err != nil { + return fmt.Errorf("Failed to read api creds from save : %s", err) + } + if err := json.Unmarshal(ret, &api.Creds); err != nil { + return fmt.Errorf("Failed unmarshaling saved credentials : %s", err) + } + api.CfgUser = api.Creds.User + api.CfgPassword = api.Creds.Password + /*override the existing yaml file*/ + if err := api.WriteConfig(apiyaml); err != nil { + return fmt.Errorf("Failed writing to %s : %s", apiyaml, err) + } else { + log.Infof("Overwritting %s with backup info", apiyaml) + } + + /*reload to check everything is safe*/ + if err = api.LoadConfig(apiyaml); err != nil { + return fmt.Errorf("Unable to load api config %s : %s", apiyaml, err) + } + + if err := api.Signin(); err != nil { + log.Errorf("Failed to authenticate after credentials restaurtion : %v", err) + } else { + log.Infof("Successfully auth to API after credentials restauration") + } + + return nil +} + +func backupToDirectory(target string) error { + var itemDirectory string + var upstreamParsers []string + var err error + if target == "" { + return fmt.Errorf("target directory can't be empty") + } + log.Warningf("Starting configuration backup") + _, err = os.Stat(target) + if err == nil { + return fmt.Errorf("%s already exists", target) + } + if err = os.MkdirAll(target, os.ModePerm); err != nil { + return fmt.Errorf("Error while creating %s : %s", target, err) + } + /* + backup configurations : + - parers, scenarios, collections, postoverflows + */ + + for _, itemType := range cwhub.ItemTypes { + clog := log.WithFields(log.Fields{ + "type": itemType, + }) + if _, ok := cwhub.HubIdx[itemType]; ok { + itemDirectory = fmt.Sprintf("%s/%s/", target, itemType) + if err := os.MkdirAll(itemDirectory, os.ModePerm); err != nil { + return fmt.Errorf("Error while creating %s : %s", itemDirectory, err) + } + upstreamParsers = []string{} + stage := "" + for k, v := range cwhub.HubIdx[itemType] { + clog = clog.WithFields(log.Fields{ + "file": v.Name, + }) + if !v.Installed { //only backup installed ones + clog.Debugf("[%s] : not installed", k) + continue + } + + //for the local/tainted ones, we backup the full file + if v.Tainted || v.Local || !v.UpToDate { + //we need to backup stages for parsers + if itemType == cwhub.PARSERS || itemType == cwhub.PARSERS_OVFLW { + tmp := strings.Split(v.LocalPath, "/") + stage = "/" + tmp[len(tmp)-2] + "/" + fstagedir := fmt.Sprintf("%s%s", itemDirectory, stage) + if err := os.MkdirAll(fstagedir, os.ModePerm); err != nil { + return fmt.Errorf("Error while creating stage dir %s : %s", fstagedir, err) + } + } + clog.Debugf("[%s] : backuping file (tainted:%t local:%t up-to-date:%t)", k, v.Tainted, v.Local, v.UpToDate) + tfile := fmt.Sprintf("%s%s%s", itemDirectory, stage, v.FileName) + //clog.Infof("item : %s", spew.Sdump(v)) + if err = copyFile(v.LocalPath, tfile); err != nil { + return fmt.Errorf("failed copy %s %s to %s : %s", itemType, v.LocalPath, tfile, err) + } + clog.Infof("local/tainted saved %s to %s", v.LocalPath, tfile) + continue + } + clog.Debugf("[%s] : from hub, just backup name (up-to-date:%t)", k, v.UpToDate) + clog.Infof("saving, version:%s, up-to-date:%t", v.Version, v.UpToDate) + upstreamParsers = append(upstreamParsers, v.Name) + } + //write the upstream items + upstreamParsersFname := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itemType) + upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ") + if err != nil { + return fmt.Errorf("failed marshaling upstream parsers : %s", err) + } + err = ioutil.WriteFile(upstreamParsersFname, upstreamParsersContent, 0644) + if err != nil { + return fmt.Errorf("unable to write to %s %s : %s", itemType, upstreamParsersFname, err) + } + clog.Infof("Wrote %d entries for %s to %s", len(upstreamParsers), itemType, upstreamParsersFname) + + } else { + clog.Infof("No %s to backup.", itemType) + } + } + /* + Backup acquis + */ + yamlAcquisFile := fmt.Sprintf("%s/acquis.yaml", config.InstallFolder) + bac := fmt.Sprintf("%s/acquis.yaml", target) + if err = copyFile(yamlAcquisFile, bac); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", yamlAcquisFile, bac, err) + } + log.Infof("Saved acquis to %s", bac) + /* + Backup default.yaml + */ + defyaml := fmt.Sprintf("%s/default.yaml", config.InstallFolder) + bac = fmt.Sprintf("%s/default.yaml", target) + if err = copyFile(defyaml, bac); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", yamlAcquisFile, bac, err) + } + log.Infof("Saved default yaml to %s", bac) + /* + Backup API info + */ + if outputCTX == nil { + log.Fatalf("no API output context, won't save api credentials") + } + outputCTX.API = &cwapi.ApiCtx{} + if err = outputCTX.API.LoadConfig(path.Join(config.InstallFolder, apiConfigFile)); err != nil { + return fmt.Errorf("unable to load api config %s : %s", path.Join(config.InstallFolder, apiConfigFile), err) + } + credsYaml, err := json.Marshal(&outputCTX.API.Creds) + if err != nil { + log.Fatalf("can't marshal credentials : %v", err) + } + apiCredsDumped := fmt.Sprintf("%s/api_creds.json", target) + err = ioutil.WriteFile(apiCredsDumped, credsYaml, 0600) + if err != nil { + return fmt.Errorf("unable to write credentials to %s : %s", apiCredsDumped, err) + } + log.Infof("Saved configuration to %s", target) + return nil +} + +func NewBackupCmd() *cobra.Command { + var cmdBackup = &cobra.Command{ + Use: "backup [save|restore] ", + Short: "Backup or restore configuration (api, parsers, scenarios etc.) to/from directory", + Long: `This command is here to help you save and/or restore crowdsec configurations to simple replication`, + Example: `cscli backup save ./my-backup +cscli backup restore ./my-backup`, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if !config.configured { + return fmt.Errorf("you must configure cli before interacting with hub") + } + return nil + }, + } + + var cmdBackupSave = &cobra.Command{ + Use: "save ", + Short: "Backup configuration (api, parsers, scenarios etc.) to directory", + Long: `backup command will try to save all relevant informations to crowdsec config, including : + +- List of scenarios, parsers, postoverflows and collections that are up-to-date + +- Actual backup of tainted/local/out-of-date scenarios, parsers, postoverflows and collections + +- Backup of API credentials + +- Backup of acqusition configuration + + `, + Example: `cscli backup save ./my-backup`, + Args: cobra.ExactArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if !config.configured { + return fmt.Errorf("you must configure cli before interacting with hub") + } + return nil + }, + Run: func(cmd *cobra.Command, args []string) { + var err error + + outputConfig := outputs.OutputFactory{ + BackendFolder: config.BackendPluginFolder, + } + outputCTX, err = outputs.NewOutput(&outputConfig, false) + if err != nil { + log.Fatalf("Failed to load output plugins") + } + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + if err := backupToDirectory(args[0]); err != nil { + log.Fatalf("Failed backuping to %s : %s", args[0], err) + } + }, + } + cmdBackup.AddCommand(cmdBackupSave) + + var cmdBackupRestore = &cobra.Command{ + Use: "restore ", + Short: "Restore configuration (api, parsers, scenarios etc.) from directory", + Long: `restore command will try to restore all saved information from to yor local setup, including : + +- Installation of up-to-date scenarios/parsers/... via cscli + +- Restauration of tainted/local/out-of-date scenarios/parsers/... file + +- Restauration of API credentials (if the existing ones aren't working) + +- Restauration of acqusition configuration +`, + Example: `cscli backup restore ./my-backup`, + Args: cobra.ExactArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if !config.configured { + return fmt.Errorf("you must configure cli before interacting with hub") + } + return nil + }, + Run: func(cmd *cobra.Command, args []string) { + var err error + + outputConfig := outputs.OutputFactory{ + BackendFolder: config.BackendPluginFolder, + } + outputCTX, err = outputs.NewOutput(&outputConfig, false) + if err != nil { + log.Fatalf("Failed to load output plugins") + } + + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + if err := restoreFromDirectory(args[0]); err != nil { + log.Fatalf("failed restoring from %s : %s", args[0], err) + } + }, + } + cmdBackup.AddCommand(cmdBackupRestore) + + return cmdBackup +} diff --git a/cmd/crowdsec-cli/ban.go b/cmd/crowdsec-cli/ban.go new file mode 100644 index 000000000..7eaa4f324 --- /dev/null +++ b/cmd/crowdsec-cli/ban.go @@ -0,0 +1,326 @@ +package main + +import ( + "encoding/json" + "fmt" + "net" + "os" + "strconv" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/outputs" + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" + + "github.com/olekukonko/tablewriter" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var remediationType string +var atTime string +var all bool + +func simpleBanToSignal(targetIP string, reason string, expirationStr string, action string, asName string, asNum string, country string, banSource string) (types.SignalOccurence, error) { + var signalOcc types.SignalOccurence + + expiration, err := time.ParseDuration(expirationStr) + if err != nil { + return signalOcc, err + } + + asOrgInt := 0 + if asNum != "" { + asOrgInt, err = strconv.Atoi(asNum) + if err != nil { + log.Infof("Invalid as value %s : %s", asNum, err) + } + } + + banApp := types.BanApplication{ + MeasureSource: banSource, + MeasureType: action, + Until: time.Now().Add(expiration), + IpText: targetIP, + TargetCN: country, + TargetAS: asOrgInt, + TargetASName: asName, + Reason: reason, + } + var parsedIP net.IP + var parsedRange *net.IPNet + if strings.Contains(targetIP, "/") { + if parsedIP, parsedRange, err = net.ParseCIDR(targetIP); err != nil { + return signalOcc, fmt.Errorf("'%s' is not a valid CIDR", targetIP) + } + if parsedRange == nil { + return signalOcc, fmt.Errorf("Unable to parse network : %s", err) + } + banApp.StartIp = types.IP2Int(parsedRange.IP) + banApp.EndIp = types.IP2Int(types.LastAddress(parsedRange)) + } else { + parsedIP = net.ParseIP(targetIP) + if parsedIP == nil { + return signalOcc, fmt.Errorf("'%s' is not a valid IP", targetIP) + } + } + + var banApps = make([]types.BanApplication, 1) + banApps = append(banApps, banApp) + signalOcc = types.SignalOccurence{ + Scenario: reason, + Events_count: 1, + Start_at: time.Now(), + Stop_at: time.Now(), + BanApplications: banApps, + Source_ip: targetIP, + Source_AutonomousSystemNumber: asNum, + Source_AutonomousSystemOrganization: asName, + Source_Country: country, + } + return signalOcc, nil +} + +func BanList() error { + at := time.Now() + if atTime != "" { + _, at = parser.GenDateParse(atTime) + if at.IsZero() { + return fmt.Errorf("Unable to parse date '%s'", atTime) + } + } + ret, err := outputCTX.ReadAT(at) + if err != nil { + return fmt.Errorf("unable to get records from sqlite : %v", err) + } + if config.output == "json" { + x, _ := json.MarshalIndent(ret, "", " ") + fmt.Printf("%s", string(x)) + } else if config.output == "human" { + + uniqAS := map[string]bool{} + uniqCN := map[string]bool{} + + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Source", "Ip", "Reason", "Bans", "Action", "Country", "AS", "Events", "Expiration"}) + + dispcount := 0 + totcount := 0 + apicount := 0 + for _, rm := range ret { + if !all && rm["source"] == "api" { + apicount++ + if _, ok := uniqAS[rm["as"]]; !ok { + uniqAS[rm["as"]] = true + } + if _, ok := uniqCN[rm["cn"]]; !ok { + uniqCN[rm["cn"]] = true + } + continue + } + if dispcount < 20 { + table.Append([]string{rm["source"], rm["iptext"], rm["reason"], rm["bancount"], rm["action"], rm["cn"], rm["as"], rm["events_count"], rm["until"]}) + } + totcount++ + dispcount++ + + } + if dispcount > 0 { + if !all { + fmt.Printf("%d local decisions:\n", totcount) + } + table.Render() // Send output + if dispcount > 20 { + fmt.Printf("Additional records stripped.\n") + } + } else { + fmt.Printf("No local decisions.\n") + } + if !all { + fmt.Printf("And %d records from API, %d distinct AS, %d distinct countries\n", apicount, len(uniqAS), len(uniqCN)) + } + } + return nil +} + +func BanAdd(target string, duration string, reason string, action string) error { + var signalOcc types.SignalOccurence + var err error + + signalOcc, err = simpleBanToSignal(target, reason, duration, action, "", "", "", "cli") + if err != nil { + return fmt.Errorf("Unable to insert ban : %v", err) + } + err = outputCTX.Insert(signalOcc) + if err != nil { + return err + } + err = outputCTX.Flush() + if err != nil { + return err + } + log.Infof("Wrote ban to database.") + return nil +} + +func banFlush() error { + allBa := types.BanApplication{} + records := dbctx.Db.Delete(&allBa) + if records.Error != nil { + return records.Error + } + return nil +} + +func NewBanCmds() *cobra.Command { + /*TODO : add a remediation type*/ + var cmdBan = &cobra.Command{ + Use: "ban [command] ", + Short: "Manage bans/mitigations", + Long: `This is the main interaction point with local ban database for humans. + +You can add/delete/list or flush current bans in your local ban DB.`, + Args: cobra.MinimumNArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + var err error + if !config.configured { + return fmt.Errorf("you must configure cli before using bans") + } + + outputConfig := outputs.OutputFactory{ + BackendFolder: config.BackendPluginFolder, + } + + outputCTX, err = outputs.NewOutput(&outputConfig, false) + if err != nil { + return fmt.Errorf(err.Error()) + } + return nil + }, + } + cmdBan.PersistentFlags().StringVar(&config.dbPath, "db", "", "Set path to SQLite DB.") + cmdBan.PersistentFlags().StringVar(&remediationType, "remediation", "ban", "Set specific remediation type : ban|slow|captcha") + cmdBan.Flags().SortFlags = false + cmdBan.PersistentFlags().SortFlags = false + + var cmdBanAdd = &cobra.Command{ + Use: "add [ip|range] ", + Short: "Adds a ban against a given ip/range for the provided duration", + Long: ` +Allows to add a ban against a specific ip or range target for a specific duration. + +The duration argument can be expressed in seconds(s), minutes(m) or hours (h). + +See [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) for more informations.`, + Example: `cscli ban add ip 1.2.3.4 24h "scan" +cscli ban add range 1.2.3.0/24 24h "the whole range"`, + Args: cobra.MinimumNArgs(4), + } + cmdBan.AddCommand(cmdBanAdd) + var cmdBanAddIp = &cobra.Command{ + Use: "ip ", + Short: "Adds the specific ip to the ban db", + Long: `Duration must be [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration), expressed in s/m/h.`, + Example: `cscli ban add ip 1.2.3.4 12h "the scan"`, + Args: cobra.ExactArgs(3), + Run: func(cmd *cobra.Command, args []string) { + if err := BanAdd(args[0], args[1], args[2], remediationType); err != nil { + log.Fatalf("failed to add ban to sqlite : %v", err) + } + }, + } + cmdBanAdd.AddCommand(cmdBanAddIp) + var cmdBanAddRange = &cobra.Command{ + Use: "range ", + Short: "Adds the specific ip to the ban db", + Long: `Duration must be [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) compatible, expressed in s/m/h.`, + Example: `cscli ban add range 1.2.3.0/24 12h "the whole range"`, + Args: cobra.ExactArgs(3), + Run: func(cmd *cobra.Command, args []string) { + if err := BanAdd(args[0], args[1], args[2], remediationType); err != nil { + log.Fatalf("failed to add ban to sqlite : %v", err) + } + }, + } + cmdBanAdd.AddCommand(cmdBanAddRange) + var cmdBanDel = &cobra.Command{ + Use: "del [command] ", + Short: "Delete bans from db", + Long: "The removal of the bans can be applied on a single IP address or directly on a IP range.", + Example: `cscli ban del ip 1.2.3.4 +cscli ban del range 1.2.3.0/24`, + Args: cobra.MinimumNArgs(2), + } + cmdBan.AddCommand(cmdBanDel) + + var cmdBanFlush = &cobra.Command{ + Use: "flush", + Short: "Fush ban DB", + Example: `cscli ban flush`, + Args: cobra.NoArgs, + Run: func(cmd *cobra.Command, args []string) { + if err := outputCTX.DeleteAll(); err != nil { + log.Fatalf(err.Error()) + } + log.Printf("Ban DB flushed") + }, + } + cmdBan.AddCommand(cmdBanFlush) + var cmdBanDelIp = &cobra.Command{ + Use: "ip ", + Short: "Delete bans for given ip from db", + Example: `cscli ban del ip 1.2.3.4`, + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + count, err := outputCTX.Delete(args[0]) + if err != nil { + log.Fatalf("failed to delete %s : %v", args[0], err) + } + log.Infof("Deleted %d entries", count) + }, + } + cmdBanDel.AddCommand(cmdBanDelIp) + var cmdBanDelRange = &cobra.Command{ + Use: "range ", + Short: "Delete bans for given ip from db", + Example: `cscli ban del range 1.2.3.0/24`, + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + count, err := outputCTX.Delete(args[0]) + if err != nil { + log.Fatalf("failed to delete %s : %v", args[0], err) + } + log.Infof("Deleted %d entries", count) + }, + } + cmdBanDel.AddCommand(cmdBanDelRange) + + var cmdBanList = &cobra.Command{ + Use: "list", + Short: "List local or api bans/remediations", + Long: `List the bans, by default only local decisions. + +If --all/-a is specified, api-provided bans will be displayed too. + +Time can be specified with --at and support a variety of date formats: + - Jan 2 15:04:05 + - Mon Jan 02 15:04:05.000000 2006 + - 2006-01-02T15:04:05Z07:00 + - 2006/01/02 + - 2006/01/02 15:04 + - 2006-01-02 + - 2006-01-02 15:04 +`, + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := BanList(); err != nil { + log.Fatalf("failed to list bans : %v", err) + } + }, + } + cmdBanList.PersistentFlags().StringVar(&atTime, "at", "", "List bans at given time") + cmdBanList.PersistentFlags().BoolVarP(&all, "all", "a", false, "List as well bans received from API") + cmdBan.AddCommand(cmdBanList) + return cmdBan +} diff --git a/cmd/crowdsec-cli/config.go b/cmd/crowdsec-cli/config.go new file mode 100644 index 000000000..f5ce82c5f --- /dev/null +++ b/cmd/crowdsec-cli/config.go @@ -0,0 +1,160 @@ +package main + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" +) + +/*CliCfg is the cli configuration structure, might be unexported*/ +type cliConfig struct { + configured bool + simulation bool /*are we in simulation mode*/ + configFolder string `yaml:"cliconfig,omitempty"` /*overload ~/.cscli/*/ + output string /*output is human, json*/ + logLevel log.Level /*debug,info,warning,error*/ + hubFolder string + InstallFolder string `yaml:"installdir"` /*/etc/crowdsec/*/ + BackendPluginFolder string `yaml:"backend"` + dbPath string +} + +func interactiveCfg() error { + var err error + reader := bufio.NewReader(os.Stdin) + fmt.Print("crowdsec installation directory (default: /etc/crowdsec/crowdsec/): ") + config.InstallFolder, err = reader.ReadString('\n') + config.InstallFolder = strings.Replace(config.InstallFolder, "\n", "", -1) //CRLF to LF (windows) + if config.InstallFolder == "" { + config.InstallFolder = "/etc/crowdsec/crowdsec/" + } + if err != nil { + log.Fatalf("failed to read input : %v", err.Error()) + } + + fmt.Print("crowdsec backend plugin directory (default: /etc/crowdsec/plugin/backend): ") + config.BackendPluginFolder, err = reader.ReadString('\n') + config.BackendPluginFolder = strings.Replace(config.BackendPluginFolder, "\n", "", -1) //CRLF to LF (windows) + if config.BackendPluginFolder == "" { + config.BackendPluginFolder = "/etc/crowdsec/plugin/backend" + } + if err != nil { + log.Fatalf("failed to read input : %v", err.Error()) + } + if err := writeCfg(); err != nil { + log.Fatalf("failed writting configuration file : %s", err) + } + return nil +} + +func writeCfg() error { + + if config.configFolder == "" { + return fmt.Errorf("config dir is unset") + } + + config.hubFolder = config.configFolder + "/hub/" + if _, err := os.Stat(config.hubFolder); os.IsNotExist(err) { + + log.Warningf("creating skeleton!") + if err := os.MkdirAll(config.hubFolder, os.ModePerm); err != nil { + return fmt.Errorf("failed to create missing directory : '%s'", config.hubFolder) + } + } + out := path.Join(config.configFolder, "/config") + configYaml, err := yaml.Marshal(&config) + if err != nil { + return fmt.Errorf("failed marshaling config: %s", err) + } + err = ioutil.WriteFile(out, configYaml, 0644) + if err != nil { + return fmt.Errorf("failed to write to %s : %s", out, err) + } + log.Infof("wrote config to %s ", out) + return nil +} + +func NewConfigCmd() *cobra.Command { + + var cmdConfig = &cobra.Command{ + Use: "config [command] ", + Short: "Allows to view/edit cscli config", + Long: `Allow to configure sqlite path and installation directory. +If no commands are specified, config is in interactive mode.`, + Example: ` - cscli config show +- cscli config prompt`, + Args: cobra.ExactArgs(1), + } + var cmdConfigShow = &cobra.Command{ + Use: "show", + Short: "Displays current config", + Long: `Displays the current cli configuration.`, + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if config.output == "json" { + log.WithFields(log.Fields{ + "installdir": config.InstallFolder, + "cliconfig": path.Join(config.configFolder, "/config"), + }).Warning("Current config") + } else { + x, err := yaml.Marshal(config) + if err != nil { + log.Fatalf("failed to marshal current configuration : %v", err) + } + fmt.Printf("%s", x) + fmt.Printf("#cliconfig: %s", path.Join(config.configFolder, "/config")) + } + }, + } + cmdConfig.AddCommand(cmdConfigShow) + var cmdConfigInterctive = &cobra.Command{ + Use: "prompt", + Short: "Prompt for configuration values in an interactive fashion", + Long: `Start interactive configuration of cli. It will successively ask for install dir, db path.`, + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + err := interactiveCfg() + if err != nil { + log.Fatalf("Failed to run interactive config : %s", err) + } + log.Warningf("Configured, please run update.") + }, + } + cmdConfig.AddCommand(cmdConfigInterctive) + var cmdConfigInstalldir = &cobra.Command{ + Use: "installdir [value]", + Short: `Configure installation directory`, + Long: `Configure the installation directory of crowdsec, such as /etc/crowdsec/crowdsec/`, + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + config.InstallFolder = args[0] + if err := writeCfg(); err != nil { + log.Fatalf("failed writting configuration: %s", err) + } + }, + } + cmdConfig.AddCommand(cmdConfigInstalldir) + + var cmdConfigBackendFolder = &cobra.Command{ + Use: "backend [value]", + Short: `Configure installation directory`, + Long: `Configure the backend plugin directory of crowdsec, such as /etc/crowdsec/plugins/backend`, + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + config.BackendPluginFolder = args[0] + if err := writeCfg(); err != nil { + log.Fatalf("failed writting configuration: %s", err) + } + }, + } + cmdConfig.AddCommand(cmdConfigBackendFolder) + + return cmdConfig +} diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go new file mode 100644 index 000000000..899fd94ee --- /dev/null +++ b/cmd/crowdsec-cli/dashboard.go @@ -0,0 +1,371 @@ +package main + +import ( + "archive/zip" + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/dghubble/sling" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/client" + "github.com/docker/go-connections/nat" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var ( + metabaseImage = "metabase/metabase" + metabaseDbURI = "https://crowdsec-statics-assets.s3-eu-west-1.amazonaws.com/metabase.db.zip" + metabaseDbPath = "/var/lib/crowdsec/data" + /**/ + metabaseListenAddress = "127.0.0.1" + metabaseListenPort = "3000" + metabaseContainerID = "/crowdsec-metabase" + /*informations needed to setup a random password on user's behalf*/ + metabaseURI = "http://localhost:3000/api/" + metabaseURISession = "session" + metabaseURIRescan = "database/2/rescan_values" + metabaseURIUpdatepwd = "user/1/password" + defaultPassword = "c6cmetabase" + defaultEmail = "metabase@crowdsec.net" +) + +func NewDashboardCmd() *cobra.Command { + /* ---- UPDATE COMMAND */ + var cmdDashboard = &cobra.Command{ + Use: "dashboard", + Short: "Start a dashboard (metabase) container.", + Long: `Start a metabase container exposing dashboards and metrics.`, + Args: cobra.ExactArgs(1), + Example: `cscli dashboard setup +cscli dashboard start +cscli dashboard stop +cscli dashboard setup --force`, + } + + var force bool + var cmdDashSetup = &cobra.Command{ + Use: "setup", + Short: "Setup a metabase container.", + Long: `Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container`, + Args: cobra.ExactArgs(0), + Example: `cscli dashboard setup +cscli dashboard setup --force +cscli dashboard setup -l 0.0.0.0 -p 443 + `, + Run: func(cmd *cobra.Command, args []string) { + if err := downloadMetabaseDB(force); err != nil { + log.Fatalf("Failed to download metabase DB : %s", err) + } + log.Infof("Downloaded metabase DB") + if err := createMetabase(); err != nil { + log.Fatalf("Failed to start metabase container : %s", err) + } + log.Infof("Started metabase") + newpassword := generatePassword() + if err := resetMetabasePassword(newpassword); err != nil { + log.Fatalf("Failed to reset password : %s", err) + } + log.Infof("Setup finished") + log.Infof("url : http://%s:%s", metabaseListenAddress, metabaseListenPort) + log.Infof("username: %s", defaultEmail) + log.Infof("password: %s", newpassword) + }, + } + cmdDashSetup.Flags().BoolVarP(&force, "force", "f", false, "Force setup : override existing files.") + cmdDashSetup.Flags().StringVarP(&metabaseDbPath, "dir", "d", metabaseDbPath, "Shared directory with metabase container.") + cmdDashSetup.Flags().StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container") + cmdDashSetup.Flags().StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container") + cmdDashboard.AddCommand(cmdDashSetup) + + var cmdDashStart = &cobra.Command{ + Use: "start", + Short: "Start the metabase container.", + Long: `Stats the metabase container using docker.`, + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := startMetabase(); err != nil { + log.Fatalf("Failed to start metabase container : %s", err) + } + log.Infof("Started metabase") + log.Infof("url : http://%s:%s", metabaseListenAddress, metabaseListenPort) + }, + } + cmdDashboard.AddCommand(cmdDashStart) + + var remove bool + var cmdDashStop = &cobra.Command{ + Use: "stop", + Short: "Stops the metabase container.", + Long: `Stops the metabase container using docker.`, + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := stopMetabase(remove); err != nil { + log.Fatalf("Failed to stop metabase container : %s", err) + } + }, + } + cmdDashStop.Flags().BoolVarP(&remove, "remove", "r", false, "remove (docker rm) container as well.") + cmdDashboard.AddCommand(cmdDashStop) + return cmdDashboard +} + +func downloadMetabaseDB(force bool) error { + + metabaseDBSubpath := path.Join(metabaseDbPath, "metabase.db") + + _, err := os.Stat(metabaseDBSubpath) + if err == nil && force == false { + log.Printf("%s exists, skip.", metabaseDBSubpath) + return nil + } + + if err := os.MkdirAll(metabaseDBSubpath, 0755); err != nil { + return fmt.Errorf("failed to create %s : %s", metabaseDBSubpath, err) + } + + req, err := http.NewRequest("GET", metabaseDbURI, nil) + if err != nil { + return fmt.Errorf("failed to build request to fetch metabase db : %s", err) + } + //This needs to be removed once we move the zip out of github + req.Header.Add("Accept", `application/vnd.github.v3.raw`) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("failed request to fetch metabase db : %s", err) + } + if resp.StatusCode != 200 { + return fmt.Errorf("got http %d while requesting metabase db %s, stop", resp.StatusCode, metabaseDbURI) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("failed request read while fetching metabase db : %s", err) + } + + log.Printf("Got %d bytes archive", len(body)) + if err := extractMetabaseDB(bytes.NewReader(body)); err != nil { + return fmt.Errorf("while extracting zip : %s", err) + } + return nil +} + +func extractMetabaseDB(buf *bytes.Reader) error { + r, err := zip.NewReader(buf, int64(buf.Len())) + if err != nil { + log.Fatal(err) + } + for _, f := range r.File { + tfname := fmt.Sprintf("%s/%s", metabaseDbPath, f.Name) + log.Debugf("%s -> %d", f.Name, f.UncompressedSize64) + if f.UncompressedSize64 == 0 { + continue + } + tfd, err := os.OpenFile(tfname, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0644) + if err != nil { + return fmt.Errorf("Failed opening target file '%s' : %s", tfname, err) + } + rc, err := f.Open() + if err != nil { + return fmt.Errorf("While opening zip content %s : %s", f.Name, err) + } + written, err := io.Copy(tfd, rc) + if err == io.EOF { + log.Printf("files finished ok") + } else if err != nil { + return fmt.Errorf("While copying content to %s : %s", tfname, err) + } + log.Infof("written %d bytes to %s", written, tfname) + rc.Close() + } + return nil +} + +func resetMetabasePassword(newpassword string) error { + + httpctx := sling.New().Base(metabaseURI).Set("User-Agent", fmt.Sprintf("CrowdWatch/%s", cwversion.VersionStr())) + + log.Printf("Waiting for metabase API to be up (can take up to a minute)") + for { + sessionreq, err := httpctx.New().Post(metabaseURISession).BodyJSON(map[string]string{"username": defaultEmail, "password": defaultPassword}).Request() + if err != nil { + return fmt.Errorf("api signin: HTTP request creation failed: %s", err) + } + httpClient := http.Client{Timeout: 20 * time.Second} + resp, err := httpClient.Do(sessionreq) + if err != nil { + fmt.Printf(".") + log.Debugf("While waiting for metabase to be up : %s", err) + time.Sleep(1 * time.Second) + continue + } + defer resp.Body.Close() + fmt.Printf("\n") + log.Printf("Metabase API is up") + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("metabase session unable to read API response body: '%s'", err) + } + if resp.StatusCode != 200 { + return fmt.Errorf("metabase session http error (%d): %s", resp.StatusCode, string(body)) + } + log.Printf("Successfully authenticated") + jsonResp := make(map[string]string) + err = json.Unmarshal(body, &jsonResp) + if err != nil { + return fmt.Errorf("failed to unmarshal metabase api response '%s': %s", string(body), err.Error()) + } + log.Debugf("unmarshaled response : %v", jsonResp) + httpctx = httpctx.Set("Cookie", fmt.Sprintf("metabase.SESSION=%s", jsonResp["id"])) + break + } + + /*rescan values*/ + sessionreq, err := httpctx.New().Post(metabaseURIRescan).Request() + if err != nil { + return fmt.Errorf("metabase rescan_values http error : %s", err) + } + httpClient := http.Client{Timeout: 20 * time.Second} + resp, err := httpClient.Do(sessionreq) + if err != nil { + return fmt.Errorf("While trying to do rescan api call to metabase : %s", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("While reading rescan api call response : %s", err) + } + if resp.StatusCode != 200 { + return fmt.Errorf("Got '%s' (http:%d) while trying to rescan metabase", string(body), resp.StatusCode) + } + /*update password*/ + sessionreq, err = httpctx.New().Put(metabaseURIUpdatepwd).BodyJSON(map[string]string{ + "id": "1", + "password": newpassword, + "old_password": defaultPassword}).Request() + if err != nil { + return fmt.Errorf("metabase password change http error : %s", err) + } + httpClient = http.Client{Timeout: 20 * time.Second} + resp, err = httpClient.Do(sessionreq) + if err != nil { + return fmt.Errorf("While trying to reset metabase password : %s", err) + } + defer resp.Body.Close() + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("while reading from %s: '%s'", metabaseURIUpdatepwd, err) + } + if resp.StatusCode != 200 { + log.Printf("Got %s (http:%d) while trying to reset password.", string(body), resp.StatusCode) + log.Printf("Password has probably already been changed.") + log.Printf("Use the dashboard install command to reset existing setup.") + return fmt.Errorf("got http error %d on %s : %s", resp.StatusCode, metabaseURIUpdatepwd, string(body)) + } + log.Printf("Changed password !") + return nil +} + +func startMetabase() error { + ctx := context.Background() + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("Failed to create docker client : %s", err) + } + + if err := cli.ContainerStart(ctx, metabaseContainerID, types.ContainerStartOptions{}); err != nil { + return fmt.Errorf("Failed while starting %s : %s", metabaseContainerID, err) + } + + return nil +} + +func stopMetabase(remove bool) error { + log.Printf("Stop docker metabase %s", metabaseContainerID) + ctx := context.Background() + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("Failed to create docker client : %s", err) + } + var to time.Duration = 20 * time.Second + if err := cli.ContainerStop(ctx, metabaseContainerID, &to); err != nil { + return fmt.Errorf("Failed while stopping %s : %s", metabaseContainerID, err) + } + + if remove { + log.Printf("Removing docker metabase %s", metabaseContainerID) + if err := cli.ContainerRemove(ctx, metabaseContainerID, types.ContainerRemoveOptions{}); err != nil { + return fmt.Errorf("Failed remove container %s : %s", metabaseContainerID, err) + } + } + return nil +} + +func createMetabase() error { + ctx := context.Background() + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("Failed to start docker client : %s", err) + } + + log.Printf("Pulling docker image %s", metabaseImage) + reader, err := cli.ImagePull(ctx, metabaseImage, types.ImagePullOptions{}) + if err != nil { + return fmt.Errorf("Failed to pull docker image : %s", err) + } + defer reader.Close() + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + fmt.Print(".") + } + if err := scanner.Err(); err != nil { + return fmt.Errorf("failed to read imagepull reader: %s", err) + } + fmt.Print("\n") + + hostConfig := &container.HostConfig{ + PortBindings: nat.PortMap{ + "3000/tcp": []nat.PortBinding{ + { + HostIP: metabaseListenAddress, + HostPort: metabaseListenPort, + }, + }, + }, + Mounts: []mount.Mount{ + { + Type: mount.TypeBind, + Source: metabaseDbPath, + Target: "/metabase-data", + }, + }, + } + dockerConfig := &container.Config{ + Image: metabaseImage, + Tty: true, + Env: []string{"MB_DB_FILE=/metabase-data/metabase.db"}, + } + + log.Printf("Creating container") + resp, err := cli.ContainerCreate(ctx, dockerConfig, hostConfig, nil, metabaseContainerID) + if err != nil { + return fmt.Errorf("Failed to create container : %s", err) + } + log.Printf("Starting container") + if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil { + return fmt.Errorf("Failed to start docker container : %s", err) + } + return nil +} diff --git a/cmd/crowdsec-cli/doc/cwcli.md b/cmd/crowdsec-cli/doc/cwcli.md new file mode 100644 index 000000000..8f25881df --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli.md @@ -0,0 +1,57 @@ +## cscli + +cscli allows you to manage crowdsec + +### Synopsis + +cscli is the main command to interact with your crowdsec service, scenarios & db. +It is meant to allow you to manage bans, parsers/scenarios/etc, api and generally manage you crowdsec setup. + +### Examples + +``` +View/Add/Remove bans: + - cscli ban list + - cscli ban add ip 1.2.3.4 24h 'go away' + - cscli ban del 1.2.3.4 + +View/Add/Upgrade/Remove scenarios and parsers: + - cscli list + - cscli install collection crowdsec/linux-web + - cscli remove scenario crowdsec/ssh_enum + - cscli upgrade --all + +API interaction: + - cscli api pull + - cscli api register + +``` + +### Options + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + -o, --output string Output format : human, json, raw. (default "human") + --debug Set logging to debug. + --info Set logging to info. + --warning Set logging to warning. + --error Set logging to error. + -h, --help help for cscli +``` + +### SEE ALSO + +* [cscli api](cscli_api.md) - Crowdsec API interaction +* [cscli backup](cscli_backup.md) - Backup or restore configuration (api, parsers, scenarios etc.) to/from directory +* [cscli ban](cscli_ban.md) - Manage bans/mitigations +* [cscli config](cscli_config.md) - Allows to view/edit cscli config +* [cscli dashboard](cscli_dashboard.md) - Start a dashboard (metabase) container. +* [cscli inspect](cscli_inspect.md) - Inspect configuration(s) +* [cscli install](cscli_install.md) - Install configuration(s) from hub +* [cscli list](cscli_list.md) - List enabled configs +* [cscli metrics](cscli_metrics.md) - Display crowdsec prometheus metrics. +* [cscli remove](cscli_remove.md) - Remove/disable configuration(s) +* [cscli update](cscli_update.md) - Fetch available configs from hub +* [cscli upgrade](cscli_upgrade.md) - Upgrade configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_api.md b/cmd/crowdsec-cli/doc/cwcli_api.md new file mode 100644 index 000000000..b31a06d32 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_api.md @@ -0,0 +1,49 @@ +## cscli api + +Crowdsec API interaction + +### Synopsis + + +Allow to register your machine into crowdsec API to send and receive signal. + + +### Examples + +``` + +cscli api register # Register to Crowdsec API +cscli api pull # Pull malevolant IPs from Crowdsec API +cscli api reset # Reset your machines credentials +cscli api enroll # Enroll your machine to the user account you created on Crowdsec backend +cscli api credentials # Display your API credentials + +``` + +### Options + +``` + -h, --help help for api +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli api credentials](cscli_api_credentials.md) - Display api credentials +* [cscli api enroll](cscli_api_enroll.md) - Associate your machine to an existing crowdsec user +* [cscli api pull](cscli_api_pull.md) - Pull crowdsec API TopX +* [cscli api register](cscli_api_register.md) - Register on Crowdsec API +* [cscli api reset](cscli_api_reset.md) - Reset password on CrowdSec API + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_api_credentials.md b/cmd/crowdsec-cli/doc/cwcli_api_credentials.md new file mode 100644 index 000000000..f88711a49 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_api_credentials.md @@ -0,0 +1,40 @@ +## cscli api credentials + +Display api credentials + +### Synopsis + +Display api credentials + +``` +cscli api credentials [flags] +``` + +### Examples + +``` +cscli api credentials +``` + +### Options + +``` + -h, --help help for credentials +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli api](cscli_api.md) - Crowdsec API interaction + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_api_enroll.md b/cmd/crowdsec-cli/doc/cwcli_api_enroll.md new file mode 100644 index 000000000..360c01b9c --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_api_enroll.md @@ -0,0 +1,41 @@ +## cscli api enroll + +Associate your machine to an existing crowdsec user + +### Synopsis + +Enrolling your machine into your user account will allow for more accurate lists and threat detection. See website to create user account. + +``` +cscli api enroll [flags] +``` + +### Examples + +``` +cscli api enroll -u 1234567890ffff +``` + +### Options + +``` + -h, --help help for enroll + -u, --user string User ID (required) +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli api](cscli_api.md) - Crowdsec API interaction + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_api_pull.md b/cmd/crowdsec-cli/doc/cwcli_api_pull.md new file mode 100644 index 000000000..318913216 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_api_pull.md @@ -0,0 +1,40 @@ +## cscli api pull + +Pull crowdsec API TopX + +### Synopsis + +Pulls a list of malveolent IPs relevant to your situation and add them into the local ban database. + +``` +cscli api pull [flags] +``` + +### Examples + +``` +cscli api pull +``` + +### Options + +``` + -h, --help help for pull +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli api](cscli_api.md) - Crowdsec API interaction + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_api_register.md b/cmd/crowdsec-cli/doc/cwcli_api_register.md new file mode 100644 index 000000000..633faf217 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_api_register.md @@ -0,0 +1,41 @@ +## cscli api register + +Register on Crowdsec API + +### Synopsis + +This command will register your machine to crowdsec API to allow you to receive list of malveolent IPs. + The printed machine_id and password should be added to your api.yaml file. + +``` +cscli api register [flags] +``` + +### Examples + +``` +cscli api register +``` + +### Options + +``` + -h, --help help for register +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli api](cscli_api.md) - Crowdsec API interaction + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_api_reset.md b/cmd/crowdsec-cli/doc/cwcli_api_reset.md new file mode 100644 index 000000000..3bd76fc41 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_api_reset.md @@ -0,0 +1,40 @@ +## cscli api reset + +Reset password on CrowdSec API + +### Synopsis + +Attempts to reset your credentials to the API. + +``` +cscli api reset [flags] +``` + +### Examples + +``` +cscli api reset +``` + +### Options + +``` + -h, --help help for reset +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli api](cscli_api.md) - Crowdsec API interaction + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_backup.md b/cmd/crowdsec-cli/doc/cwcli_backup.md new file mode 100644 index 000000000..022a259b2 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_backup.md @@ -0,0 +1,40 @@ +## cscli backup + +Backup or restore configuration (api, parsers, scenarios etc.) to/from directory + +### Synopsis + +This command is here to help you save and/or restore crowdsec configurations to simple replication + +### Examples + +``` +cscli backup save ./my-backup +cscli backup restore ./my-backup +``` + +### Options + +``` + --cfgdir string Configuration directory (default "/etc/crowdsec/") + -h, --help help for backup +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli backup restore](cscli_backup_restore.md) - Restore configuration (api, parsers, scenarios etc.) from directory +* [cscli backup save](cscli_backup_save.md) - Backup configuration (api, parsers, scenarios etc.) to directory + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_backup_restore.md b/cmd/crowdsec-cli/doc/cwcli_backup_restore.md new file mode 100644 index 000000000..c1830a1ef --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_backup_restore.md @@ -0,0 +1,50 @@ +## cscli backup restore + +Restore configuration (api, parsers, scenarios etc.) from directory + +### Synopsis + +restore command will try to restore all saved information from to yor local setup, including : + +- Installation of up-to-date scenarios/parsers/... via cscli + +- Restauration of tainted/local/out-of-date scenarios/parsers/... file + +- Restauration of API credentials (if the existing ones aren't working) + +- Restauration of acqusition configuration + + +``` +cscli backup restore [flags] +``` + +### Examples + +``` +cscli backup restore ./my-backup +``` + +### Options + +``` + -h, --help help for restore +``` + +### Options inherited from parent commands + +``` + --cfgdir string Configuration directory (default "/etc/crowdsec/") + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli backup](cscli_backup.md) - Backup or restore configuration (api, parsers, scenarios etc.) to/from directory + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_backup_save.md b/cmd/crowdsec-cli/doc/cwcli_backup_save.md new file mode 100644 index 000000000..af1688eff --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_backup_save.md @@ -0,0 +1,51 @@ +## cscli backup save + +Backup configuration (api, parsers, scenarios etc.) to directory + +### Synopsis + +backup command will try to save all relevant informations to crowdsec config, including : + +- List of scenarios, parsers, postoverflows and collections that are up-to-date + +- Actual backup of tainted/local/out-of-date scenarios, parsers, postoverflows and collections + +- Backup of API credentials + +- Backup of acqusition configuration + + + +``` +cscli backup save [flags] +``` + +### Examples + +``` +cscli backup save ./my-backup +``` + +### Options + +``` + -h, --help help for save +``` + +### Options inherited from parent commands + +``` + --cfgdir string Configuration directory (default "/etc/crowdsec/") + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli backup](cscli_backup.md) - Backup or restore configuration (api, parsers, scenarios etc.) to/from directory + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_ban.md b/cmd/crowdsec-cli/doc/cwcli_ban.md new file mode 100644 index 000000000..ce5b9d3f2 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_ban.md @@ -0,0 +1,38 @@ +## cscli ban + +Manage bans/mitigations + +### Synopsis + +This is the main interaction point with local ban database for humans. + +You can add/delete/list or flush current bans in your local ban DB. + +### Options + +``` + --db string Set path to SQLite DB. + --remediation string Set specific remediation type : ban|slow|captcha (default "ban") + -h, --help help for ban +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli ban add](cscli_ban_add.md) - Adds a ban against a given ip/range for the provided duration +* [cscli ban del](cscli_ban_del.md) - Delete bans from db +* [cscli ban flush](cscli_ban_flush.md) - Fush ban DB +* [cscli ban list](cscli_ban_list.md) - List local or api bans/remediations + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_ban_add.md b/cmd/crowdsec-cli/doc/cwcli_ban_add.md new file mode 100644 index 000000000..b6f57a3c5 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_ban_add.md @@ -0,0 +1,46 @@ +## cscli ban add + +Adds a ban against a given ip/range for the provided duration + +### Synopsis + + +Allows to add a ban against a specific ip or range target for a specific duration. + +The duration argument can be expressed in seconds(s), minutes(m) or hours (h). + +See [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) for more informations. + +### Examples + +``` +cscli ban add ip 1.2.3.4 24h "scan" +cscli ban add range 1.2.3.0/24 24h "the whole range" +``` + +### Options + +``` + -h, --help help for add +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --db string Set path to SQLite DB. + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --remediation string Set specific remediation type : ban|slow|captcha (default "ban") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli ban](cscli_ban.md) - Manage bans/mitigations +* [cscli ban add ip](cscli_ban_add_ip.md) - Adds the specific ip to the ban db +* [cscli ban add range](cscli_ban_add_range.md) - Adds the specific ip to the ban db + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_ban_add_ip.md b/cmd/crowdsec-cli/doc/cwcli_ban_add_ip.md new file mode 100644 index 000000000..4416a174c --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_ban_add_ip.md @@ -0,0 +1,42 @@ +## cscli ban add ip + +Adds the specific ip to the ban db + +### Synopsis + +Duration must be [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration), expressed in s/m/h. + +``` +cscli ban add ip [flags] +``` + +### Examples + +``` +cscli ban add ip 1.2.3.4 12h "the scan" +``` + +### Options + +``` + -h, --help help for ip +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --db string Set path to SQLite DB. + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --remediation string Set specific remediation type : ban|slow|captcha (default "ban") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli ban add](cscli_ban_add.md) - Adds a ban against a given ip/range for the provided duration + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_ban_add_range.md b/cmd/crowdsec-cli/doc/cwcli_ban_add_range.md new file mode 100644 index 000000000..cfe46ba09 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_ban_add_range.md @@ -0,0 +1,42 @@ +## cscli ban add range + +Adds the specific ip to the ban db + +### Synopsis + +Duration must be [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) compatible, expressed in s/m/h. + +``` +cscli ban add range [flags] +``` + +### Examples + +``` +cscli ban add range 1.2.3.0/24 12h "the whole range" +``` + +### Options + +``` + -h, --help help for range +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --db string Set path to SQLite DB. + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --remediation string Set specific remediation type : ban|slow|captcha (default "ban") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli ban add](cscli_ban_add.md) - Adds a ban against a given ip/range for the provided duration + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_ban_del.md b/cmd/crowdsec-cli/doc/cwcli_ban_del.md new file mode 100644 index 000000000..69b06dfb1 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_ban_del.md @@ -0,0 +1,41 @@ +## cscli ban del + +Delete bans from db + +### Synopsis + +The removal of the bans can be applied on a single IP address or directly on a IP range. + +### Examples + +``` +cscli ban del ip 1.2.3.4 +cscli ban del range 1.2.3.0/24 +``` + +### Options + +``` + -h, --help help for del +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --db string Set path to SQLite DB. + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --remediation string Set specific remediation type : ban|slow|captcha (default "ban") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli ban](cscli_ban.md) - Manage bans/mitigations +* [cscli ban del ip](cscli_ban_del_ip.md) - Delete bans for given ip from db +* [cscli ban del range](cscli_ban_del_range.md) - Delete bans for given ip from db + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_ban_del_ip.md b/cmd/crowdsec-cli/doc/cwcli_ban_del_ip.md new file mode 100644 index 000000000..10027eba5 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_ban_del_ip.md @@ -0,0 +1,42 @@ +## cscli ban del ip + +Delete bans for given ip from db + +### Synopsis + +Delete bans for given ip from db + +``` +cscli ban del ip [flags] +``` + +### Examples + +``` +cscli ban del ip 1.2.3.4 +``` + +### Options + +``` + -h, --help help for ip +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --db string Set path to SQLite DB. + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --remediation string Set specific remediation type : ban|slow|captcha (default "ban") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli ban del](cscli_ban_del.md) - Delete bans from db + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_ban_del_range.md b/cmd/crowdsec-cli/doc/cwcli_ban_del_range.md new file mode 100644 index 000000000..11a46f057 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_ban_del_range.md @@ -0,0 +1,42 @@ +## cscli ban del range + +Delete bans for given ip from db + +### Synopsis + +Delete bans for given ip from db + +``` +cscli ban del range [flags] +``` + +### Examples + +``` +cscli ban del range 1.2.3.0/24 +``` + +### Options + +``` + -h, --help help for range +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --db string Set path to SQLite DB. + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --remediation string Set specific remediation type : ban|slow|captcha (default "ban") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli ban del](cscli_ban_del.md) - Delete bans from db + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_ban_flush.md b/cmd/crowdsec-cli/doc/cwcli_ban_flush.md new file mode 100644 index 000000000..538fe68f6 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_ban_flush.md @@ -0,0 +1,42 @@ +## cscli ban flush + +Fush ban DB + +### Synopsis + +Fush ban DB + +``` +cscli ban flush [flags] +``` + +### Examples + +``` +cscli ban flush +``` + +### Options + +``` + -h, --help help for flush +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --db string Set path to SQLite DB. + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --remediation string Set specific remediation type : ban|slow|captcha (default "ban") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli ban](cscli_ban.md) - Manage bans/mitigations + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_ban_list.md b/cmd/crowdsec-cli/doc/cwcli_ban_list.md new file mode 100644 index 000000000..c179305a8 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_ban_list.md @@ -0,0 +1,50 @@ +## cscli ban list + +List local or api bans/remediations + +### Synopsis + +List the bans, by default only local decisions. + +If --all/-a is specified, api-provided bans will be displayed too. + +Time can be specified with --at and support a variety of date formats: + - Jan 2 15:04:05 + - Mon Jan 02 15:04:05.000000 2006 + - 2006-01-02T15:04:05Z07:00 + - 2006/01/02 + - 2006/01/02 15:04 + - 2006-01-02 + - 2006-01-02 15:04 + + +``` +cscli ban list [flags] +``` + +### Options + +``` + -a, --all List as well bans received from API + --at string List bans at given time + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --db string Set path to SQLite DB. + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --remediation string Set specific remediation type : ban|slow|captcha (default "ban") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli ban](cscli_ban.md) - Manage bans/mitigations + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_config.md b/cmd/crowdsec-cli/doc/cwcli_config.md new file mode 100644 index 000000000..ff4d2924a --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_config.md @@ -0,0 +1,42 @@ +## cscli config + +Allows to view/edit cscli config + +### Synopsis + +Allow to configure sqlite path and installation directory. +If no commands are specified, config is in interactive mode. + +### Examples + +``` + - cscli config show +- cscli config prompt +``` + +### Options + +``` + -h, --help help for config +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli config backend](cscli_config_backend.md) - Configure installation directory +* [cscli config installdir](cscli_config_installdir.md) - Configure installation directory +* [cscli config prompt](cscli_config_prompt.md) - Prompt for configuration values in an interactive fashion +* [cscli config show](cscli_config_show.md) - Displays current config + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_config_backend.md b/cmd/crowdsec-cli/doc/cwcli_config_backend.md new file mode 100644 index 000000000..726e60feb --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_config_backend.md @@ -0,0 +1,34 @@ +## cscli config backend + +Configure installation directory + +### Synopsis + +Configure the backend plugin directory of crowdsec, such as /etc/crowdsec/plugins/backend + +``` +cscli config backend [value] [flags] +``` + +### Options + +``` + -h, --help help for backend +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli config](cscli_config.md) - Allows to view/edit cscli config + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_config_installdir.md b/cmd/crowdsec-cli/doc/cwcli_config_installdir.md new file mode 100644 index 000000000..a29112a52 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_config_installdir.md @@ -0,0 +1,34 @@ +## cscli config installdir + +Configure installation directory + +### Synopsis + +Configure the installation directory of crowdsec, such as /etc/crowdsec/crowdsec/ + +``` +cscli config installdir [value] [flags] +``` + +### Options + +``` + -h, --help help for installdir +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli config](cscli_config.md) - Allows to view/edit cscli config + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_config_prompt.md b/cmd/crowdsec-cli/doc/cwcli_config_prompt.md new file mode 100644 index 000000000..90e8c013f --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_config_prompt.md @@ -0,0 +1,34 @@ +## cscli config prompt + +Prompt for configuration values in an interactive fashion + +### Synopsis + +Start interactive configuration of cli. It will successively ask for install dir, db path. + +``` +cscli config prompt [flags] +``` + +### Options + +``` + -h, --help help for prompt +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli config](cscli_config.md) - Allows to view/edit cscli config + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_config_show.md b/cmd/crowdsec-cli/doc/cwcli_config_show.md new file mode 100644 index 000000000..415125fc5 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_config_show.md @@ -0,0 +1,34 @@ +## cscli config show + +Displays current config + +### Synopsis + +Displays the current cli configuration. + +``` +cscli config show [flags] +``` + +### Options + +``` + -h, --help help for show +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli config](cscli_config.md) - Allows to view/edit cscli config + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_dashboard.md b/cmd/crowdsec-cli/doc/cwcli_dashboard.md new file mode 100644 index 000000000..f7f836f9f --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_dashboard.md @@ -0,0 +1,42 @@ +## cscli dashboard + +Start a dashboard (metabase) container. + +### Synopsis + +Start a metabase container exposing dashboards and metrics. + +### Examples + +``` +cscli dashboard setup +cscli dashboard start +cscli dashboard stop +cscli dashboard setup --force +``` + +### Options + +``` + -h, --help help for dashboard +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli dashboard setup](cscli_dashboard_setup.md) - Setup a metabase container. +* [cscli dashboard start](cscli_dashboard_start.md) - Start the metabase container. +* [cscli dashboard stop](cscli_dashboard_stop.md) - Stops the metabase container. + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_dashboard_setup.md b/cmd/crowdsec-cli/doc/cwcli_dashboard_setup.md new file mode 100644 index 000000000..ec49f70d3 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_dashboard_setup.md @@ -0,0 +1,47 @@ +## cscli dashboard setup + +Setup a metabase container. + +### Synopsis + +Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container + +``` +cscli dashboard setup [flags] +``` + +### Examples + +``` +cscli dashboard setup +cscli dashboard setup --force +cscli dashboard setup -l 0.0.0.0 -p 443 + +``` + +### Options + +``` + -d, --dir string Shared directory with metabase container. (default "/var/lib/crowdsec/data") + -f, --force Force setup : override existing files. + -h, --help help for setup + -l, --listen string Listen address of container (default "127.0.0.1") + -p, --port string Listen port of container (default "3000") +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli dashboard](cscli_dashboard.md) - Start a dashboard (metabase) container. + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_dashboard_start.md b/cmd/crowdsec-cli/doc/cwcli_dashboard_start.md new file mode 100644 index 000000000..1219dd94e --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_dashboard_start.md @@ -0,0 +1,34 @@ +## cscli dashboard start + +Start the metabase container. + +### Synopsis + +Stats the metabase container using docker. + +``` +cscli dashboard start [flags] +``` + +### Options + +``` + -h, --help help for start +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli dashboard](cscli_dashboard.md) - Start a dashboard (metabase) container. + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_dashboard_stop.md b/cmd/crowdsec-cli/doc/cwcli_dashboard_stop.md new file mode 100644 index 000000000..8924df1b0 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_dashboard_stop.md @@ -0,0 +1,35 @@ +## cscli dashboard stop + +Stops the metabase container. + +### Synopsis + +Stops the metabase container using docker. + +``` +cscli dashboard stop [flags] +``` + +### Options + +``` + -h, --help help for stop + -r, --remove remove (docker rm) container as well. +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli dashboard](cscli_dashboard.md) - Start a dashboard (metabase) container. + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_inspect.md b/cmd/crowdsec-cli/doc/cwcli_inspect.md new file mode 100644 index 000000000..c6b316928 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_inspect.md @@ -0,0 +1,47 @@ +## cscli inspect + +Inspect configuration(s) + +### Synopsis + + +Inspect give you full detail about local installed configuration. + +[type] must be parser, scenario, postoverflow, collection. + +[config_name] must be a valid config name from [Crowdsec Hub](https://hub.crowdsec.net) or locally installed. + + +### Examples + +``` +cscli inspect parser crowdsec/xxx +cscli inspect collection crowdsec/xxx +``` + +### Options + +``` + -h, --help help for inspect +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli inspect collection](cscli_inspect_collection.md) - Inspect given collection +* [cscli inspect parser](cscli_inspect_parser.md) - Inspect given log parser +* [cscli inspect postoverflow](cscli_inspect_postoverflow.md) - Inspect given postoverflow parser +* [cscli inspect scenario](cscli_inspect_scenario.md) - Inspect given scenario + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_inspect_collection.md b/cmd/crowdsec-cli/doc/cwcli_inspect_collection.md new file mode 100644 index 000000000..db165b447 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_inspect_collection.md @@ -0,0 +1,40 @@ +## cscli inspect collection + +Inspect given collection + +### Synopsis + +Inspect given collection from hub + +``` +cscli inspect collection [config] [flags] +``` + +### Examples + +``` +cscli inspect collection crowdsec/xxx +``` + +### Options + +``` + -h, --help help for collection +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli inspect](cscli_inspect.md) - Inspect configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_inspect_parser.md b/cmd/crowdsec-cli/doc/cwcli_inspect_parser.md new file mode 100644 index 000000000..9abaa349a --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_inspect_parser.md @@ -0,0 +1,40 @@ +## cscli inspect parser + +Inspect given log parser + +### Synopsis + +Inspect given parser from hub + +``` +cscli inspect parser [config] [flags] +``` + +### Examples + +``` +cscli inspect parser crowdsec/xxx +``` + +### Options + +``` + -h, --help help for parser +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli inspect](cscli_inspect.md) - Inspect configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_inspect_postoverflow.md b/cmd/crowdsec-cli/doc/cwcli_inspect_postoverflow.md new file mode 100644 index 000000000..a23cadcf5 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_inspect_postoverflow.md @@ -0,0 +1,40 @@ +## cscli inspect postoverflow + +Inspect given postoverflow parser + +### Synopsis + +Inspect given postoverflow from hub. + +``` +cscli inspect postoverflow [config] [flags] +``` + +### Examples + +``` +cscli inspect postoverflow crowdsec/xxx +``` + +### Options + +``` + -h, --help help for postoverflow +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli inspect](cscli_inspect.md) - Inspect configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_inspect_scenario.md b/cmd/crowdsec-cli/doc/cwcli_inspect_scenario.md new file mode 100644 index 000000000..b12f74c18 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_inspect_scenario.md @@ -0,0 +1,40 @@ +## cscli inspect scenario + +Inspect given scenario + +### Synopsis + +Inspect given scenario from hub + +``` +cscli inspect scenario [config] [flags] +``` + +### Examples + +``` +cscli inspect scenario crowdsec/xxx +``` + +### Options + +``` + -h, --help help for scenario +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli inspect](cscli_inspect.md) - Inspect configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_install.md b/cmd/crowdsec-cli/doc/cwcli_install.md new file mode 100644 index 000000000..72cbd3061 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_install.md @@ -0,0 +1,51 @@ +## cscli install + +Install configuration(s) from hub + +### Synopsis + + +Install configuration from the CrowdSec Hub. + +In order to download latest versions of configuration, +you should [update cscli](./cscli_update.md). + +[type] must be parser, scenario, postoverflow, collection. + +[config_name] must be a valid config name from [Crowdsec Hub](https://hub.crowdsec.net). + + +### Examples + +``` +cscli install [type] [config_name] +``` + +### Options + +``` + -d, --download-only Only download packages, don't enable + --force Force install : Overwrite tainted and outdated files + -h, --help help for install +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli install collection](cscli_install_collection.md) - Install given collection +* [cscli install parser](cscli_install_parser.md) - Install given log parser +* [cscli install postoverflow](cscli_install_postoverflow.md) - Install given postoverflow parser +* [cscli install scenario](cscli_install_scenario.md) - Install given scenario + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_install_collection.md b/cmd/crowdsec-cli/doc/cwcli_install_collection.md new file mode 100644 index 000000000..e440d98e9 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_install_collection.md @@ -0,0 +1,42 @@ +## cscli install collection + +Install given collection + +### Synopsis + +Fetch and install given collection from hub + +``` +cscli install collection [config] [flags] +``` + +### Examples + +``` +cscli install collection crowdsec/xxx +``` + +### Options + +``` + -h, --help help for collection +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + -d, --download-only Only download packages, don't enable + --error Set logging to error. + --force Force install : Overwrite tainted and outdated files + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli install](cscli_install.md) - Install configuration(s) from hub + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_install_parser.md b/cmd/crowdsec-cli/doc/cwcli_install_parser.md new file mode 100644 index 000000000..f2f8993a2 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_install_parser.md @@ -0,0 +1,42 @@ +## cscli install parser + +Install given log parser + +### Synopsis + +Fetch and install given parser from hub + +``` +cscli install parser [config] [flags] +``` + +### Examples + +``` +cscli install parser crowdsec/xxx +``` + +### Options + +``` + -h, --help help for parser +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + -d, --download-only Only download packages, don't enable + --error Set logging to error. + --force Force install : Overwrite tainted and outdated files + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli install](cscli_install.md) - Install configuration(s) from hub + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_install_postoverflow.md b/cmd/crowdsec-cli/doc/cwcli_install_postoverflow.md new file mode 100644 index 000000000..1b37eb8aa --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_install_postoverflow.md @@ -0,0 +1,43 @@ +## cscli install postoverflow + +Install given postoverflow parser + +### Synopsis + +Fetch and install given postoverflow from hub. +As a reminder, postoverflows are parsing configuration that will occur after the overflow (before a decision is applied). + +``` +cscli install postoverflow [config] [flags] +``` + +### Examples + +``` +cscli install collection crowdsec/xxx +``` + +### Options + +``` + -h, --help help for postoverflow +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + -d, --download-only Only download packages, don't enable + --error Set logging to error. + --force Force install : Overwrite tainted and outdated files + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli install](cscli_install.md) - Install configuration(s) from hub + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_install_scenario.md b/cmd/crowdsec-cli/doc/cwcli_install_scenario.md new file mode 100644 index 000000000..28548090d --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_install_scenario.md @@ -0,0 +1,42 @@ +## cscli install scenario + +Install given scenario + +### Synopsis + +Fetch and install given scenario from hub + +``` +cscli install scenario [config] [flags] +``` + +### Examples + +``` +cscli install scenario crowdsec/xxx +``` + +### Options + +``` + -h, --help help for scenario +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + -d, --download-only Only download packages, don't enable + --error Set logging to error. + --force Force install : Overwrite tainted and outdated files + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli install](cscli_install.md) - Install configuration(s) from hub + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_list.md b/cmd/crowdsec-cli/doc/cwcli_list.md new file mode 100644 index 000000000..1a04f55b2 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_list.md @@ -0,0 +1,54 @@ +## cscli list + +List enabled configs + +### Synopsis + + +List enabled configurations (parser/scenarios/collections) on your host. + +It is possible to list also configuration from [Crowdsec Hub](https://hub.crowdsec.net) with the '-a' options. + +[type] must be parsers, scenarios, postoverflows, collections + + +``` +cscli list [-a] [flags] +``` + +### Examples + +``` +cscli list # List all local configurations +cscli list [type] # List all local configuration of type [type] +cscli list -a # List all local and remote configurations + +``` + +### Options + +``` + -a, --all List as well disabled items + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli list collections](cscli_list_collections.md) - List enabled collections +* [cscli list parsers](cscli_list_parsers.md) - List enabled parsers +* [cscli list postoverflows](cscli_list_postoverflows.md) - List enabled postoverflow parsers +* [cscli list scenarios](cscli_list_scenarios.md) - List enabled scenarios + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_list_collections.md b/cmd/crowdsec-cli/doc/cwcli_list_collections.md new file mode 100644 index 000000000..7e03eba08 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_list_collections.md @@ -0,0 +1,35 @@ +## cscli list collections + +List enabled collections + +### Synopsis + +List enabled collections + +``` +cscli list collections [-a] [flags] +``` + +### Options + +``` + -h, --help help for collections +``` + +### Options inherited from parent commands + +``` + -a, --all List as well disabled items + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli list](cscli_list.md) - List enabled configs + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_list_parsers.md b/cmd/crowdsec-cli/doc/cwcli_list_parsers.md new file mode 100644 index 000000000..5d82f7e47 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_list_parsers.md @@ -0,0 +1,35 @@ +## cscli list parsers + +List enabled parsers + +### Synopsis + +List enabled parsers + +``` +cscli list parsers [-a] [flags] +``` + +### Options + +``` + -h, --help help for parsers +``` + +### Options inherited from parent commands + +``` + -a, --all List as well disabled items + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli list](cscli_list.md) - List enabled configs + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_list_postoverflows.md b/cmd/crowdsec-cli/doc/cwcli_list_postoverflows.md new file mode 100644 index 000000000..b359e567d --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_list_postoverflows.md @@ -0,0 +1,35 @@ +## cscli list postoverflows + +List enabled postoverflow parsers + +### Synopsis + +List enabled postoverflow parsers + +``` +cscli list postoverflows [-a] [flags] +``` + +### Options + +``` + -h, --help help for postoverflows +``` + +### Options inherited from parent commands + +``` + -a, --all List as well disabled items + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli list](cscli_list.md) - List enabled configs + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_list_scenarios.md b/cmd/crowdsec-cli/doc/cwcli_list_scenarios.md new file mode 100644 index 000000000..fcc9db8ff --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_list_scenarios.md @@ -0,0 +1,35 @@ +## cscli list scenarios + +List enabled scenarios + +### Synopsis + +List enabled scenarios + +``` +cscli list scenarios [-a] [flags] +``` + +### Options + +``` + -h, --help help for scenarios +``` + +### Options inherited from parent commands + +``` + -a, --all List as well disabled items + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli list](cscli_list.md) - List enabled configs + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_metrics.md b/cmd/crowdsec-cli/doc/cwcli_metrics.md new file mode 100644 index 000000000..89ff2c982 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_metrics.md @@ -0,0 +1,35 @@ +## cscli metrics + +Display crowdsec prometheus metrics. + +### Synopsis + +Fetch metrics from the prometheus server and display them in a human-friendly way + +``` +cscli metrics [flags] +``` + +### Options + +``` + -h, --help help for metrics + -u, --url string Prometheus url (default "http://127.0.0.1:6060/metrics") +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_remove.md b/cmd/crowdsec-cli/doc/cwcli_remove.md new file mode 100644 index 000000000..340b6206b --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_remove.md @@ -0,0 +1,48 @@ +## cscli remove + +Remove/disable configuration(s) + +### Synopsis + + + Remove local configuration. + +[type] must be parser, scenario, postoverflow, collection + +[config_name] must be a valid config name from [Crowdsec Hub](https://hub.crowdsec.net) or locally installed. + + +### Examples + +``` +cscli remove [type] [config_name] +``` + +### Options + +``` + --all Delete all the files in selected scope + -h, --help help for remove + --purge Delete source file in ~/.cscli/hub/ too +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli remove collection](cscli_remove_collection.md) - Remove/disable collection +* [cscli remove parser](cscli_remove_parser.md) - Remove/disable parser +* [cscli remove postoverflow](cscli_remove_postoverflow.md) - Remove/disable postoverflow parser +* [cscli remove scenario](cscli_remove_scenario.md) - Remove/disable scenario + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_remove_collection.md b/cmd/crowdsec-cli/doc/cwcli_remove_collection.md new file mode 100644 index 000000000..ad8ecacb6 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_remove_collection.md @@ -0,0 +1,36 @@ +## cscli remove collection + +Remove/disable collection + +### Synopsis + + must be a valid collection. + +``` +cscli remove collection [config] [flags] +``` + +### Options + +``` + -h, --help help for collection +``` + +### Options inherited from parent commands + +``` + --all Delete all the files in selected scope + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --purge Delete source file in ~/.cscli/hub/ too + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli remove](cscli_remove.md) - Remove/disable configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_remove_parser.md b/cmd/crowdsec-cli/doc/cwcli_remove_parser.md new file mode 100644 index 000000000..1f98d1f15 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_remove_parser.md @@ -0,0 +1,36 @@ +## cscli remove parser + +Remove/disable parser + +### Synopsis + + must be a valid parser. + +``` +cscli remove parser [flags] +``` + +### Options + +``` + -h, --help help for parser +``` + +### Options inherited from parent commands + +``` + --all Delete all the files in selected scope + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --purge Delete source file in ~/.cscli/hub/ too + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli remove](cscli_remove.md) - Remove/disable configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_remove_postoverflow.md b/cmd/crowdsec-cli/doc/cwcli_remove_postoverflow.md new file mode 100644 index 000000000..38ee7ec81 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_remove_postoverflow.md @@ -0,0 +1,36 @@ +## cscli remove postoverflow + +Remove/disable postoverflow parser + +### Synopsis + + must be a valid collection. + +``` +cscli remove postoverflow [config] [flags] +``` + +### Options + +``` + -h, --help help for postoverflow +``` + +### Options inherited from parent commands + +``` + --all Delete all the files in selected scope + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --purge Delete source file in ~/.cscli/hub/ too + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli remove](cscli_remove.md) - Remove/disable configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_remove_scenario.md b/cmd/crowdsec-cli/doc/cwcli_remove_scenario.md new file mode 100644 index 000000000..b37511ad6 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_remove_scenario.md @@ -0,0 +1,36 @@ +## cscli remove scenario + +Remove/disable scenario + +### Synopsis + + must be a valid scenario. + +``` +cscli remove scenario [config] [flags] +``` + +### Options + +``` + -h, --help help for scenario +``` + +### Options inherited from parent commands + +``` + --all Delete all the files in selected scope + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --purge Delete source file in ~/.cscli/hub/ too + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli remove](cscli_remove.md) - Remove/disable configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_update.md b/cmd/crowdsec-cli/doc/cwcli_update.md new file mode 100644 index 000000000..548dfc195 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_update.md @@ -0,0 +1,36 @@ +## cscli update + +Fetch available configs from hub + +### Synopsis + + +Fetches the [.index.json](https://github.com/crowdsecurity/hub/blob/master/.index.json) file from hub, containing the list of available configs. + + +``` +cscli update [flags] +``` + +### Options + +``` + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_upgrade.md b/cmd/crowdsec-cli/doc/cwcli_upgrade.md new file mode 100644 index 000000000..e517c240a --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_upgrade.md @@ -0,0 +1,62 @@ +## cscli upgrade + +Upgrade configuration(s) + +### Synopsis + + +Upgrade configuration from the CrowdSec Hub. + +In order to upgrade latest versions of configuration, +the Hub cache should be [updated](./cscli_update.md). + +Tainted configuration will not be updated (use --force to update them). + +[type] must be parser, scenario, postoverflow, collection. + +[config_name] must be a valid config name from [Crowdsec Hub](https://hub.crowdsec.net). + + + + +``` +cscli upgrade [type] [config] [flags] +``` + +### Examples + +``` +cscli upgrade [type] [config_name] +cscli upgrade --all # Upgrade all configurations types +cscli upgrade --force # Overwrite tainted configuration + +``` + +### Options + +``` + --all Upgrade all configuration in scope + --force Overwrite existing files, even if tainted + -h, --help help for upgrade +``` + +### Options inherited from parent commands + +``` + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli upgrade collection](cscli_upgrade_collection.md) - Upgrade collection configuration(s) +* [cscli upgrade parser](cscli_upgrade_parser.md) - Upgrade parser configuration(s) +* [cscli upgrade postoverflow](cscli_upgrade_postoverflow.md) - Upgrade postoverflow parser configuration(s) +* [cscli upgrade scenario](cscli_upgrade_scenario.md) - Upgrade scenario configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_upgrade_collection.md b/cmd/crowdsec-cli/doc/cwcli_upgrade_collection.md new file mode 100644 index 000000000..7c51a5fd7 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_upgrade_collection.md @@ -0,0 +1,44 @@ +## cscli upgrade collection + +Upgrade collection configuration(s) + +### Synopsis + +Upgrade one or more collection configurations + +``` +cscli upgrade collection [config] [flags] +``` + +### Examples + +``` + - cscli upgrade collection crowdsec/apache-lamp + - cscli upgrade collection -all + - cscli upgrade collection crowdsec/apache-lamp --force +``` + +### Options + +``` + -h, --help help for collection +``` + +### Options inherited from parent commands + +``` + --all Upgrade all configuration in scope + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --force Overwrite existing files, even if tainted + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli upgrade](cscli_upgrade.md) - Upgrade configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_upgrade_parser.md b/cmd/crowdsec-cli/doc/cwcli_upgrade_parser.md new file mode 100644 index 000000000..966cfb43f --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_upgrade_parser.md @@ -0,0 +1,44 @@ +## cscli upgrade parser + +Upgrade parser configuration(s) + +### Synopsis + +Upgrade one or more parser configurations + +``` +cscli upgrade parser [config] [flags] +``` + +### Examples + +``` + - cscli upgrade parser crowdsec/apache-logs + - cscli upgrade parser -all + - cscli upgrade parser crowdsec/apache-logs --force +``` + +### Options + +``` + -h, --help help for parser +``` + +### Options inherited from parent commands + +``` + --all Upgrade all configuration in scope + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --force Overwrite existing files, even if tainted + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli upgrade](cscli_upgrade.md) - Upgrade configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_upgrade_postoverflow.md b/cmd/crowdsec-cli/doc/cwcli_upgrade_postoverflow.md new file mode 100644 index 000000000..7412b7763 --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_upgrade_postoverflow.md @@ -0,0 +1,44 @@ +## cscli upgrade postoverflow + +Upgrade postoverflow parser configuration(s) + +### Synopsis + +Upgrade one or more postoverflow parser configurations + +``` +cscli upgrade postoverflow [config] [flags] +``` + +### Examples + +``` + - cscli upgrade postoverflow crowdsec/enrich-rdns + - cscli upgrade postoverflow -all + - cscli upgrade postoverflow crowdsec/enrich-rdns --force +``` + +### Options + +``` + -h, --help help for postoverflow +``` + +### Options inherited from parent commands + +``` + --all Upgrade all configuration in scope + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --force Overwrite existing files, even if tainted + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli upgrade](cscli_upgrade.md) - Upgrade configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/doc/cwcli_upgrade_scenario.md b/cmd/crowdsec-cli/doc/cwcli_upgrade_scenario.md new file mode 100644 index 000000000..c888d63ca --- /dev/null +++ b/cmd/crowdsec-cli/doc/cwcli_upgrade_scenario.md @@ -0,0 +1,43 @@ +## cscli upgrade scenario + +Upgrade scenario configuration(s) + +### Synopsis + +Upgrade one or more scenario configurations + +``` +cscli upgrade scenario [config] [flags] +``` + +### Examples + +``` + - cscli upgrade scenario -all + - cscli upgrade scenario crowdsec/http-404 --force +``` + +### Options + +``` + -h, --help help for scenario +``` + +### Options inherited from parent commands + +``` + --all Upgrade all configuration in scope + -c, --config-dir string Configuration directory to use. (default "/etc/crowdsec/cscli/") + --debug Set logging to debug. + --error Set logging to error. + --force Overwrite existing files, even if tainted + --info Set logging to info. + -o, --output string Output format : human, json, raw. (default "human") + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli upgrade](cscli_upgrade.md) - Upgrade configuration(s) + +###### Auto generated by spf13/cobra on 14-May-2020 diff --git a/cmd/crowdsec-cli/inspect.go b/cmd/crowdsec-cli/inspect.go new file mode 100644 index 000000000..d1c6cec04 --- /dev/null +++ b/cmd/crowdsec-cli/inspect.go @@ -0,0 +1,110 @@ +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "gopkg.in/yaml.v2" + + log "github.com/sirupsen/logrus" + + "github.com/spf13/cobra" +) + +func InspectItem(name string, objectType string) { + + for _, hubItem := range cwhub.HubIdx[objectType] { + if hubItem.Name != name { + continue + } + buff, err := yaml.Marshal(hubItem) + if err != nil { + log.Fatalf("unable to marshal item : %s", err) + } + fmt.Printf("%s", string(buff)) + } +} + +func NewInspectCmd() *cobra.Command { + var cmdInspect = &cobra.Command{ + Use: "inspect [type] [config]", + Short: "Inspect configuration(s)", + Long: ` +Inspect give you full detail about local installed configuration. + +[type] must be parser, scenario, postoverflow, collection. + +[config_name] must be a valid config name from [Crowdsec Hub](https://hub.crowdsec.net) or locally installed. +`, + Example: `cscli inspect parser crowdsec/xxx +cscli inspect collection crowdsec/xxx`, + Args: cobra.MinimumNArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if !config.configured { + return fmt.Errorf("you must configure cli before interacting with hub") + } + return nil + }, + } + + var cmdInspectParser = &cobra.Command{ + Use: "parser [config]", + Short: "Inspect given log parser", + Long: `Inspect given parser from hub`, + Example: `cscli inspect parser crowdsec/xxx`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + InspectItem(args[0], cwhub.PARSERS) + }, + } + cmdInspect.AddCommand(cmdInspectParser) + var cmdInspectScenario = &cobra.Command{ + Use: "scenario [config]", + Short: "Inspect given scenario", + Long: `Inspect given scenario from hub`, + Example: `cscli inspect scenario crowdsec/xxx`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + InspectItem(args[0], cwhub.SCENARIOS) + }, + } + cmdInspect.AddCommand(cmdInspectScenario) + + var cmdInspectCollection = &cobra.Command{ + Use: "collection [config]", + Short: "Inspect given collection", + Long: `Inspect given collection from hub`, + Example: `cscli inspect collection crowdsec/xxx`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + InspectItem(args[0], cwhub.COLLECTIONS) + }, + } + cmdInspect.AddCommand(cmdInspectCollection) + + var cmdInspectPostoverflow = &cobra.Command{ + Use: "postoverflow [config]", + Short: "Inspect given postoverflow parser", + Long: `Inspect given postoverflow from hub.`, + Example: `cscli inspect postoverflow crowdsec/xxx`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + InspectItem(args[0], cwhub.PARSERS_OVFLW) + }, + } + cmdInspect.AddCommand(cmdInspectPostoverflow) + + return cmdInspect +} diff --git a/cmd/crowdsec-cli/install.go b/cmd/crowdsec-cli/install.go new file mode 100644 index 000000000..189ea2f9d --- /dev/null +++ b/cmd/crowdsec-cli/install.go @@ -0,0 +1,150 @@ +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + + log "github.com/sirupsen/logrus" + + "github.com/spf13/cobra" +) + +var download_only, force_install bool + +func InstallItem(name string, obtype string) { + for _, it := range cwhub.HubIdx[obtype] { + if it.Name == name { + if download_only && it.Downloaded && it.UpToDate { + log.Warningf("%s is already downloaded and up-to-date", it.Name) + return + } + it, err := cwhub.DownloadLatest(it, cwhub.Hubdir, force_install) + if err != nil { + log.Fatalf("error while downloading %s : %v", it.Name, err) + } + cwhub.HubIdx[obtype][it.Name] = it + if download_only { + log.Infof("Downloaded %s to %s", it.Name, cwhub.Hubdir+"/"+it.RemotePath) + return + } + it, err = cwhub.EnableItem(it, cwhub.Installdir, cwhub.Hubdir) + if err != nil { + log.Fatalf("error while enabled %s : %v.", it.Name, err) + } + cwhub.HubIdx[obtype][it.Name] = it + log.Infof("Enabled %s", it.Name) + return + } + } + log.Warningf("%s not found in hub index", name) + /*iterate of pkg index data*/ +} + +func InstallScenario(name string) { + InstallItem(name, cwhub.SCENARIOS) +} + +func InstallCollection(name string) { + InstallItem(name, cwhub.COLLECTIONS) + +} + +func InstallParser(name string) { + InstallItem(name, cwhub.PARSERS) +} + +func InstallPostoverflow(name string) { + InstallItem(name, cwhub.PARSERS_OVFLW) +} + +func NewInstallCmd() *cobra.Command { + /* ---- INSTALL COMMAND */ + + var cmdInstall = &cobra.Command{ + Use: "install [type] [config]", + Short: "Install configuration(s) from hub", + Long: ` +Install configuration from the CrowdSec Hub. + +In order to download latest versions of configuration, +you should [update cscli](./cscli_update.md). + +[type] must be parser, scenario, postoverflow, collection. + +[config_name] must be a valid config name from [Crowdsec Hub](https://hub.crowdsec.net). +`, + Example: `cscli install [type] [config_name]`, + Args: cobra.MinimumNArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if !config.configured { + return fmt.Errorf("you must configure cli before interacting with hub") + } + return nil + }, + } + cmdInstall.PersistentFlags().BoolVarP(&download_only, "download-only", "d", false, "Only download packages, don't enable") + cmdInstall.PersistentFlags().BoolVar(&force_install, "force", false, "Force install : Overwrite tainted and outdated files") + + var cmdInstallParser = &cobra.Command{ + Use: "parser [config]", + Short: "Install given log parser", + Long: `Fetch and install given parser from hub`, + Example: `cscli install parser crowdsec/xxx`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + InstallItem(args[0], cwhub.PARSERS) + }, + } + cmdInstall.AddCommand(cmdInstallParser) + var cmdInstallScenario = &cobra.Command{ + Use: "scenario [config]", + Short: "Install given scenario", + Long: `Fetch and install given scenario from hub`, + Example: `cscli install scenario crowdsec/xxx`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + InstallItem(args[0], cwhub.SCENARIOS) + }, + } + cmdInstall.AddCommand(cmdInstallScenario) + + var cmdInstallCollection = &cobra.Command{ + Use: "collection [config]", + Short: "Install given collection", + Long: `Fetch and install given collection from hub`, + Example: `cscli install collection crowdsec/xxx`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + InstallItem(args[0], cwhub.COLLECTIONS) + }, + } + cmdInstall.AddCommand(cmdInstallCollection) + + var cmdInstallPostoverflow = &cobra.Command{ + Use: "postoverflow [config]", + Short: "Install given postoverflow parser", + Long: `Fetch and install given postoverflow from hub. +As a reminder, postoverflows are parsing configuration that will occur after the overflow (before a decision is applied).`, + Example: `cscli install collection crowdsec/xxx`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + InstallItem(args[0], cwhub.PARSERS_OVFLW) + }, + } + cmdInstall.AddCommand(cmdInstallPostoverflow) + + return cmdInstall +} diff --git a/cmd/crowdsec-cli/list.go b/cmd/crowdsec-cli/list.go new file mode 100644 index 000000000..eb43ff2bc --- /dev/null +++ b/cmd/crowdsec-cli/list.go @@ -0,0 +1,152 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + + "github.com/enescakir/emoji" + "github.com/olekukonko/tablewriter" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var listAll bool + +func doListing(ttype string, args []string) { + + var pkgst []map[string]string + + if len(args) == 1 { + pkgst = cwhub.HubStatus(ttype, args[0], listAll) + } else { + pkgst = cwhub.HubStatus(ttype, "", listAll) + } + + if config.output == "human" { + + table := tablewriter.NewWriter(os.Stdout) + table.SetCenterSeparator("") + table.SetColumnSeparator("") + + table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetHeader([]string{"Name", fmt.Sprintf("%v Status", emoji.Package), "Version", "Local Path"}) + for _, v := range pkgst { + table.Append([]string{v["name"], v["utf8_status"], v["local_version"], v["local_path"]}) + } + table.Render() + } else if config.output == "json" { + x, err := json.MarshalIndent(pkgst, "", " ") + if err != nil { + log.Fatalf("failed to unmarshal") + } + fmt.Printf("%s", string(x)) + } else if config.output == "raw" { + for _, v := range pkgst { + fmt.Printf("%s %s\n", v["name"], v["description"]) + } + } +} + +func NewListCmd() *cobra.Command { + /* ---- LIST COMMAND */ + var cmdList = &cobra.Command{ + Use: "list [-a]", + Short: "List enabled configs", + Long: ` +List enabled configurations (parser/scenarios/collections) on your host. + +It is possible to list also configuration from [Crowdsec Hub](https://hub.crowdsec.net) with the '-a' options. + +[type] must be parsers, scenarios, postoverflows, collections + `, + Example: `cscli list # List all local configurations +cscli list [type] # List all local configuration of type [type] +cscli list -a # List all local and remote configurations + `, + Args: cobra.ExactArgs(0), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if !config.configured { + return fmt.Errorf("you must configure cli before interacting with hub") + } + return nil + }, + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + + cwhub.DisplaySummary() + log.Printf("PARSERS:") + doListing(cwhub.PARSERS, args) + log.Printf("SCENARIOS:") + doListing(cwhub.SCENARIOS, args) + log.Printf("COLLECTIONS:") + doListing(cwhub.COLLECTIONS, args) + log.Printf("POSTOVERFLOWS:") + doListing(cwhub.PARSERS_OVFLW, args) + }, + } + cmdList.PersistentFlags().BoolVarP(&listAll, "all", "a", false, "List as well disabled items") + + var cmdListParsers = &cobra.Command{ + Use: "parsers [-a]", + Short: "List enabled parsers", + Long: ``, + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + doListing(cwhub.PARSERS, args) + }, + } + cmdList.AddCommand(cmdListParsers) + + var cmdListScenarios = &cobra.Command{ + Use: "scenarios [-a]", + Short: "List enabled scenarios", + Long: ``, + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + doListing(cwhub.SCENARIOS, args) + }, + } + cmdList.AddCommand(cmdListScenarios) + + var cmdListCollections = &cobra.Command{ + Use: "collections [-a]", + Short: "List enabled collections", + Long: ``, + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + doListing(cwhub.COLLECTIONS, args) + }, + } + cmdList.AddCommand(cmdListCollections) + + var cmdListPostoverflows = &cobra.Command{ + Use: "postoverflows [-a]", + Short: "List enabled postoverflow parsers", + Long: ``, + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + doListing(cwhub.PARSERS_OVFLW, args) + }, + } + cmdList.AddCommand(cmdListPostoverflows) + + return cmdList +} diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go new file mode 100644 index 000000000..798350eda --- /dev/null +++ b/cmd/crowdsec-cli/main.go @@ -0,0 +1,139 @@ +package main + +import ( + "io/ioutil" + "os/user" + "path/filepath" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" + "gopkg.in/yaml.v2" +) + +var dbg_lvl, nfo_lvl, wrn_lvl, err_lvl bool + +var config cliConfig + +func initConfig() { + + if dbg_lvl { + log.SetLevel(log.DebugLevel) + } else if nfo_lvl { + log.SetLevel(log.InfoLevel) + } else if wrn_lvl { + log.SetLevel(log.WarnLevel) + } else if err_lvl { + log.SetLevel(log.ErrorLevel) + } + if config.output == "json" { + log.SetLevel(log.WarnLevel) + log.SetFormatter(&log.JSONFormatter{}) + } else if config.output == "raw" { + log.SetLevel(log.ErrorLevel) + } + + if strings.HasPrefix(config.configFolder, "~/") { + usr, err := user.Current() + if err != nil { + log.Fatalf("failed to resolve path ~/ : %s", err) + } + config.configFolder = usr.HomeDir + "/" + config.configFolder[2:] + } + /*read config*/ + buf, err := ioutil.ReadFile(filepath.Clean(config.configFolder + "/config")) + if err != nil { + log.Infof("Failed to open config %s : %s", filepath.Clean(config.configFolder+"/config"), err) + } else { + err = yaml.UnmarshalStrict(buf, &config) + if err != nil { + log.Fatalf("Failed to parse config %s : %s, please configure", filepath.Clean(config.configFolder+"/config"), err) + } + config.InstallFolder = filepath.Clean(config.InstallFolder) + config.hubFolder = filepath.Clean(config.configFolder + "/hub/") + config.BackendPluginFolder = filepath.Clean(config.BackendPluginFolder) + // + cwhub.Installdir = config.InstallFolder + cwhub.Cfgdir = config.configFolder + cwhub.Hubdir = config.hubFolder + config.configured = true + } +} + +func main() { + + var rootCmd = &cobra.Command{ + Use: "cscli", + Short: "cscli allows you to manage crowdsec", + Long: `cscli is the main command to interact with your crowdsec service, scenarios & db. +It is meant to allow you to manage bans, parsers/scenarios/etc, api and generally manage you crowdsec setup.`, + Example: `View/Add/Remove bans: + - cscli ban list + - cscli ban add ip 1.2.3.4 24h 'go away' + - cscli ban del 1.2.3.4 + +View/Add/Upgrade/Remove scenarios and parsers: + - cscli list + - cscli install collection crowdsec/linux-web + - cscli remove scenario crowdsec/ssh_enum + - cscli upgrade --all + +API interaction: + - cscli api pull + - cscli api register + `} + /*TODO : add a remediation type*/ + var cmdDocGen = &cobra.Command{ + Use: "doc", + Short: "Generate the documentation in `./doc/`. Directory must exist.", + Args: cobra.ExactArgs(0), + Hidden: true, + Run: func(cmd *cobra.Command, args []string) { + doc.GenMarkdownTree(rootCmd, "./doc/") + }, + } + rootCmd.AddCommand(cmdDocGen) + /*usage*/ + var cmdVersion = &cobra.Command{ + Use: "version", + Short: "Display version and exit.", + Args: cobra.ExactArgs(0), + Hidden: true, + Run: func(cmd *cobra.Command, args []string) { + cwversion.Show() + }, + } + rootCmd.AddCommand(cmdVersion) + + //rootCmd.PersistentFlags().BoolVarP(&config.simulation, "simulate", "s", false, "No action; perform a simulation of events that would occur based on the current arguments.") + rootCmd.PersistentFlags().StringVarP(&config.configFolder, "config-dir", "c", "/etc/crowdsec/cscli/", "Configuration directory to use.") + rootCmd.PersistentFlags().StringVarP(&config.output, "output", "o", "human", "Output format : human, json, raw.") + rootCmd.PersistentFlags().BoolVar(&dbg_lvl, "debug", false, "Set logging to debug.") + rootCmd.PersistentFlags().BoolVar(&nfo_lvl, "info", false, "Set logging to info.") + rootCmd.PersistentFlags().BoolVar(&wrn_lvl, "warning", false, "Set logging to warning.") + rootCmd.PersistentFlags().BoolVar(&err_lvl, "error", false, "Set logging to error.") + + cobra.OnInitialize(initConfig) + /*don't sort flags so we can enforce order*/ + rootCmd.Flags().SortFlags = false + rootCmd.PersistentFlags().SortFlags = false + + rootCmd.AddCommand(NewBanCmds()) + rootCmd.AddCommand(NewConfigCmd()) + rootCmd.AddCommand(NewInstallCmd()) + rootCmd.AddCommand(NewListCmd()) + rootCmd.AddCommand(NewRemoveCmd()) + rootCmd.AddCommand(NewUpdateCmd()) + rootCmd.AddCommand(NewUpgradeCmd()) + rootCmd.AddCommand(NewAPICmd()) + rootCmd.AddCommand(NewMetricsCmd()) + rootCmd.AddCommand(NewBackupCmd()) + rootCmd.AddCommand(NewDashboardCmd()) + rootCmd.AddCommand(NewInspectCmd()) + + rootCmd.Execute() +} diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go new file mode 100644 index 000000000..f2d38b80d --- /dev/null +++ b/cmd/crowdsec-cli/metrics.go @@ -0,0 +1,229 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "strconv" + "strings" + "time" + + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" + + "github.com/olekukonko/tablewriter" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/prom2json" + "github.com/spf13/cobra" +) + +/*This is a complete rip from prom2json*/ +func ShowPrometheus(url string) { + mfChan := make(chan *dto.MetricFamily, 1024) + + // Start with the DefaultTransport for sane defaults. + transport := http.DefaultTransport.(*http.Transport).Clone() + // Conservatively disable HTTP keep-alives as this program will only + // ever need a single HTTP request. + transport.DisableKeepAlives = true + // Timeout early if the server doesn't even return the headers. + transport.ResponseHeaderTimeout = time.Minute + + go func() { + err := prom2json.FetchMetricFamilies(url, mfChan, transport) + if err != nil { + log.Fatalf("failed to fetch prometheus metrics : %v", err) + } + }() + + result := []*prom2json.Family{} + for mf := range mfChan { + result = append(result, prom2json.NewFamily(mf)) + } + log.Debugf("Finished reading prometheus output, %d entries", len(result)) + /*walk*/ + acquis_stats := map[string]map[string]int{} + parsers_stats := map[string]map[string]int{} + buckets_stats := map[string]map[string]int{} + for idx, fam := range result { + if !strings.HasPrefix(fam.Name, "cs_") { + continue + } + log.Debugf("round %d", idx) + for _, m := range fam.Metrics { + metric := m.(prom2json.Metric) + name, ok := metric.Labels["name"] + if !ok { + log.Debugf("no name in Metric") + } + source, ok := metric.Labels["source"] + if !ok { + log.Debugf("no source in Metric") + } + value := m.(prom2json.Metric).Value + ival, err := strconv.Atoi(value) + if err != nil { + log.Errorf("Unexpected int value %s : %s", value, err) + } + switch fam.Name { + /*buckets*/ + case "cs_bucket_create": + if _, ok := buckets_stats[name]; !ok { + buckets_stats[name] = make(map[string]int) + } + buckets_stats[name]["instanciation"] += ival + case "cs_bucket_overflow": + if _, ok := buckets_stats[name]; !ok { + buckets_stats[name] = make(map[string]int) + } + buckets_stats[name]["overflow"] += ival + case "cs_bucket_pour": + if _, ok := buckets_stats[name]; !ok { + buckets_stats[name] = make(map[string]int) + } + if _, ok := acquis_stats[source]; !ok { + acquis_stats[source] = make(map[string]int) + } + buckets_stats[name]["pour"] += ival + acquis_stats[source]["pour"] += ival + case "cs_bucket_underflow": + if _, ok := buckets_stats[name]; !ok { + buckets_stats[name] = make(map[string]int) + } + buckets_stats[name]["underflow"] += ival + /*acquis*/ + case "cs_reader_hits": + if _, ok := acquis_stats[source]; !ok { + acquis_stats[source] = make(map[string]int) + } + acquis_stats[source]["reads"] += ival + case "cs_parser_hits_ok": + if _, ok := acquis_stats[source]; !ok { + acquis_stats[source] = make(map[string]int) + } + acquis_stats[source]["parsed"] += ival + case "cs_parser_hits_ko": + if _, ok := acquis_stats[source]; !ok { + acquis_stats[source] = make(map[string]int) + } + acquis_stats[source]["unparsed"] += ival + case "cs_node_hits": + if _, ok := parsers_stats[name]; !ok { + parsers_stats[name] = make(map[string]int) + } + parsers_stats[name]["hits"] += ival + case "cs_node_hits_ok": + if _, ok := parsers_stats[name]; !ok { + parsers_stats[name] = make(map[string]int) + } + parsers_stats[name]["parsed"] += ival + default: + continue + } + + } + } + if config.output == "human" { + atable := tablewriter.NewWriter(os.Stdout) + atable.SetHeader([]string{"Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket"}) + for alabel, astats := range acquis_stats { + + if alabel == "" { + continue + } + row := []string{} + row = append(row, alabel) //name + for _, sl := range []string{"reads", "parsed", "unparsed", "pour"} { + if v, ok := astats[sl]; ok { + row = append(row, fmt.Sprintf("%d", v)) + } else { + row = append(row, "-") + } + } + atable.Append(row) + } + btable := tablewriter.NewWriter(os.Stdout) + btable.SetHeader([]string{"Bucket", "Overflows", "Instanciated", "Poured", "Expired"}) + for blabel, bstats := range buckets_stats { + if blabel == "" { + continue + } + row := []string{} + row = append(row, blabel) //name + for _, sl := range []string{"overflow", "instanciation", "pour", "underflow"} { + if v, ok := bstats[sl]; ok { + row = append(row, fmt.Sprintf("%d", v)) + } else { + row = append(row, "-") + } + } + btable.Append(row) + } + ptable := tablewriter.NewWriter(os.Stdout) + ptable.SetHeader([]string{"Parsers", "Hits", "Parsed", "Unparsed"}) + for plabel, pstats := range parsers_stats { + if plabel == "" { + continue + } + row := []string{} + row = append(row, plabel) //name + hits := 0 + parsed := 0 + for _, sl := range []string{"hits", "parsed"} { + if v, ok := pstats[sl]; ok { + row = append(row, fmt.Sprintf("%d", v)) + if sl == "hits" { + hits = v + } else if sl == "parsed" { + parsed = v + } + } else { + row = append(row, "-") + } + } + row = append(row, fmt.Sprintf("%d", hits-parsed)) + ptable.Append(row) + } + log.Printf("Buckets Metrics:") + btable.Render() // Send output + log.Printf("Acquisition Metrics:") + atable.Render() // Send output + log.Printf("Parser Metrics:") + ptable.Render() // Send output + } else if config.output == "json" { + for _, val := range []map[string]map[string]int{acquis_stats, parsers_stats, buckets_stats} { + x, err := json.MarshalIndent(val, "", " ") + if err != nil { + log.Fatalf("failed to unmarshal metrics : %v", err) + } + fmt.Printf("%s\n", string(x)) + } + } else if config.output == "raw" { + for _, val := range []map[string]map[string]int{acquis_stats, parsers_stats, buckets_stats} { + x, err := yaml.Marshal(val) + if err != nil { + log.Fatalf("failed to unmarshal metrics : %v", err) + } + fmt.Printf("%s\n", string(x)) + } + } +} + +var purl string + +func NewMetricsCmd() *cobra.Command { + /* ---- UPDATE COMMAND */ + var cmdMetrics = &cobra.Command{ + Use: "metrics", + Short: "Display crowdsec prometheus metrics.", + Long: `Fetch metrics from the prometheus server and display them in a human-friendly way`, + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + ShowPrometheus(purl) + }, + } + cmdMetrics.PersistentFlags().StringVarP(&purl, "url", "u", "http://127.0.0.1:6060/metrics", "Prometheus url") + + return cmdMetrics +} diff --git a/cmd/crowdsec-cli/remove.go b/cmd/crowdsec-cli/remove.go new file mode 100644 index 000000000..ad8a07cd1 --- /dev/null +++ b/cmd/crowdsec-cli/remove.go @@ -0,0 +1,150 @@ +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var purge_remove, remove_all bool + +func RemoveMany(ttype string, name string) { + var err error + var disabled int + for _, v := range cwhub.HubIdx[ttype] { + if name != "" && v.Name == name { + v, err = cwhub.DisableItem(v, cwhub.Installdir, cwhub.Hubdir, purge_remove) + if err != nil { + log.Fatalf("unable to disable %s : %v", v.Name, err) + } + disabled += 1 + cwhub.HubIdx[ttype][v.Name] = v + return + } else if name == "" && remove_all { + v, err = cwhub.DisableItem(v, cwhub.Installdir, cwhub.Hubdir, purge_remove) + if err != nil { + log.Fatalf("unable to disable %s : %v", v.Name, err) + } + cwhub.HubIdx[ttype][v.Name] = v + disabled += 1 + } + } + if name != "" && !remove_all { + log.Errorf("%s not found", name) + return + } + log.Infof("Disabled %d items", disabled) +} + +func NewRemoveCmd() *cobra.Command { + + var cmdRemove = &cobra.Command{ + Use: "remove [type] ", + Short: "Remove/disable configuration(s)", + Long: ` + Remove local configuration. + +[type] must be parser, scenario, postoverflow, collection + +[config_name] must be a valid config name from [Crowdsec Hub](https://hub.crowdsec.net) or locally installed. + `, + Example: `cscli remove [type] [config_name]`, + Args: cobra.MinimumNArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if !config.configured { + return fmt.Errorf("you must configure cli before interacting with hub") + } + return nil + }, + } + cmdRemove.PersistentFlags().BoolVar(&purge_remove, "purge", false, "Delete source file in ~/.cscli/hub/ too") + cmdRemove.PersistentFlags().BoolVar(&remove_all, "all", false, "Delete all the files in selected scope") + var cmdRemoveParser = &cobra.Command{ + Use: "parser ", + Short: "Remove/disable parser", + Long: ` must be a valid parser.`, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + + if remove_all && len(args) == 0 { + RemoveMany(cwhub.PARSERS, "") + } else if len(args) == 1 { + RemoveMany(cwhub.PARSERS, args[0]) + } else { + _ = cmd.Help() + return + } + //fmt.Println("remove/disable parser: " + strings.Join(args, " ")) + }, + } + cmdRemove.AddCommand(cmdRemoveParser) + var cmdRemoveScenario = &cobra.Command{ + Use: "scenario [config]", + Short: "Remove/disable scenario", + Long: ` must be a valid scenario.`, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + if remove_all && len(args) == 0 { + RemoveMany(cwhub.SCENARIOS, "") + } else if len(args) == 1 { + RemoveMany(cwhub.SCENARIOS, args[0]) + } else { + _ = cmd.Help() + return + } + }, + } + cmdRemove.AddCommand(cmdRemoveScenario) + var cmdRemoveCollection = &cobra.Command{ + Use: "collection [config]", + Short: "Remove/disable collection", + Long: ` must be a valid collection.`, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + if remove_all && len(args) == 0 { + RemoveMany(cwhub.COLLECTIONS, "") + } else if len(args) == 1 { + RemoveMany(cwhub.COLLECTIONS, args[0]) + } else { + _ = cmd.Help() + return + } + }, + } + cmdRemove.AddCommand(cmdRemoveCollection) + + var cmdRemovePostoverflow = &cobra.Command{ + Use: "postoverflow [config]", + Short: "Remove/disable postoverflow parser", + Long: ` must be a valid collection.`, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + if remove_all && len(args) == 0 { + RemoveMany(cwhub.PARSERS_OVFLW, "") + } else if len(args) == 1 { + RemoveMany(cwhub.PARSERS_OVFLW, args[0]) + } else { + _ = cmd.Help() + return + } + }, + } + cmdRemove.AddCommand(cmdRemovePostoverflow) + + return cmdRemove +} diff --git a/cmd/crowdsec-cli/update.go b/cmd/crowdsec-cli/update.go new file mode 100644 index 000000000..ad5e4f7f6 --- /dev/null +++ b/cmd/crowdsec-cli/update.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +func NewUpdateCmd() *cobra.Command { + /* ---- UPDATE COMMAND */ + var cmdUpdate = &cobra.Command{ + Use: "update", + Short: "Fetch available configs from hub", + Long: ` +Fetches the [.index.json](https://github.com/crowdsecurity/hub/blob/master/.index.json) file from hub, containing the list of available configs. +`, + Args: cobra.ExactArgs(0), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if !config.configured { + return fmt.Errorf("You must configure cli before interacting with hub.") + } + return nil + }, + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.UpdateHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + }, + } + return cmdUpdate +} diff --git a/cmd/crowdsec-cli/upgrade.go b/cmd/crowdsec-cli/upgrade.go new file mode 100644 index 000000000..456ddb47c --- /dev/null +++ b/cmd/crowdsec-cli/upgrade.go @@ -0,0 +1,205 @@ +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + + "github.com/enescakir/emoji" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var upgrade_all, force_upgrade bool + +func UpgradeConfig(ttype string, name string) { + var err error + var updated int + var found bool + + for _, v := range cwhub.HubIdx[ttype] { + //name mismatch + if name != "" && name != v.Name { + continue + } + if !v.Installed { + log.Debugf("skip %s, not installed", v.Name) + continue + } + if !v.Downloaded { + log.Warningf("%s : not downloaded, please install.", v.Name) + continue + } + found = true + if v.UpToDate { + log.Infof("%s : up-to-date", v.Name) + continue + } + v, err = cwhub.DownloadLatest(v, cwhub.Hubdir, force_upgrade) + if err != nil { + log.Fatalf("%s : download failed : %v", v.Name, err) + } + if !v.UpToDate { + if v.Tainted { + log.Infof("%v %s is tainted, --force to overwrite", emoji.Warning, v.Name) + } else if v.Local { + log.Infof("%v %s is local", emoji.Prohibited, v.Name) + } + } else { + log.Infof("%v %s : updated", emoji.Package, v.Name) + updated += 1 + } + cwhub.HubIdx[ttype][v.Name] = v + } + if found == false { + log.Errorf("Didn't find %s", name) + } else if updated == 0 && found == true { + log.Errorf("Nothing to update") + } else if updated != 0 { + log.Infof("Upgraded %d items", updated) + } + +} + +func NewUpgradeCmd() *cobra.Command { + + var cmdUpgrade = &cobra.Command{ + Use: "upgrade [type] [config]", + Short: "Upgrade configuration(s)", + Long: ` +Upgrade configuration from the CrowdSec Hub. + +In order to upgrade latest versions of configuration, +the Hub cache should be [updated](./cscli_update.md). + +Tainted configuration will not be updated (use --force to update them). + +[type] must be parser, scenario, postoverflow, collection. + +[config_name] must be a valid config name from [Crowdsec Hub](https://hub.crowdsec.net). + + + `, + Example: `cscli upgrade [type] [config_name] +cscli upgrade --all # Upgrade all configurations types +cscli upgrade --force # Overwrite tainted configuration + `, + + Args: cobra.MinimumNArgs(0), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if !config.configured { + return fmt.Errorf("you must configure cli before interacting with hub") + } + return nil + }, + Run: func(cmd *cobra.Command, args []string) { + if upgrade_all == false && len(args) < 2 { + _ = cmd.Help() + return + } + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + if upgrade_all == true && len(args) == 0 { + log.Warningf("Upgrade all : parsers, scenarios, collections.") + UpgradeConfig(cwhub.PARSERS, "") + UpgradeConfig(cwhub.PARSERS_OVFLW, "") + UpgradeConfig(cwhub.SCENARIOS, "") + UpgradeConfig(cwhub.COLLECTIONS, "") + } + //fmt.Println("upgrade all ?!: " + strings.Join(args, " ")) + }, + } + cmdUpgrade.PersistentFlags().BoolVar(&upgrade_all, "all", false, "Upgrade all configuration in scope") + cmdUpgrade.PersistentFlags().BoolVar(&force_upgrade, "force", false, "Overwrite existing files, even if tainted") + var cmdUpgradeParser = &cobra.Command{ + Use: "parser [config]", + Short: "Upgrade parser configuration(s)", + Long: `Upgrade one or more parser configurations`, + Example: ` - cscli upgrade parser crowdsec/apache-logs + - cscli upgrade parser -all + - cscli upgrade parser crowdsec/apache-logs --force`, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + if len(args) == 1 { + UpgradeConfig(cwhub.PARSERS, args[0]) + //UpgradeConfig(cwhub.PARSERS_OVFLW, "") + } else if upgrade_all == true { + UpgradeConfig(cwhub.PARSERS, "") + } else { + _ = cmd.Help() + } + }, + } + cmdUpgrade.AddCommand(cmdUpgradeParser) + var cmdUpgradeScenario = &cobra.Command{ + Use: "scenario [config]", + Short: "Upgrade scenario configuration(s)", + Long: `Upgrade one or more scenario configurations`, + Example: ` - cscli upgrade scenario -all + - cscli upgrade scenario crowdsec/http-404 --force `, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + if len(args) == 1 { + UpgradeConfig(cwhub.SCENARIOS, args[0]) + } else if upgrade_all == true { + UpgradeConfig(cwhub.SCENARIOS, "") + } else { + _ = cmd.Help() + } + }, + } + cmdUpgrade.AddCommand(cmdUpgradeScenario) + var cmdUpgradeCollection = &cobra.Command{ + Use: "collection [config]", + Short: "Upgrade collection configuration(s)", + Long: `Upgrade one or more collection configurations`, + Example: ` - cscli upgrade collection crowdsec/apache-lamp + - cscli upgrade collection -all + - cscli upgrade collection crowdsec/apache-lamp --force`, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + if len(args) == 1 { + UpgradeConfig(cwhub.COLLECTIONS, args[0]) + } else if upgrade_all == true { + UpgradeConfig(cwhub.COLLECTIONS, "") + } else { + _ = cmd.Help() + } + }, + } + cmdUpgrade.AddCommand(cmdUpgradeCollection) + + var cmdUpgradePostoverflow = &cobra.Command{ + Use: "postoverflow [config]", + Short: "Upgrade postoverflow parser configuration(s)", + Long: `Upgrade one or more postoverflow parser configurations`, + Example: ` - cscli upgrade postoverflow crowdsec/enrich-rdns + - cscli upgrade postoverflow -all + - cscli upgrade postoverflow crowdsec/enrich-rdns --force`, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + if len(args) == 1 { + UpgradeConfig(cwhub.PARSERS_OVFLW, args[0]) + } else if upgrade_all == true { + UpgradeConfig(cwhub.PARSERS_OVFLW, "") + } else { + _ = cmd.Help() + } + }, + } + cmdUpgrade.AddCommand(cmdUpgradePostoverflow) + return cmdUpgrade +} diff --git a/cmd/crowdsec/Makefile b/cmd/crowdsec/Makefile new file mode 100644 index 000000000..4fe8797b5 --- /dev/null +++ b/cmd/crowdsec/Makefile @@ -0,0 +1,68 @@ +# Go parameters +GOCMD=go +GOBUILD=$(GOCMD) build +GOCLEAN=$(GOCMD) clean +GOTEST=$(GOCMD) test +GOGET=$(GOCMD) get + +CROWDSEC_BIN=crowdsec +PREFIX?="/" +CFG_PREFIX = $(PREFIX)"/etc/crowdsec/crowdsec/" +BIN_PREFIX = $(PREFIX)"/usr/local/bin/" +DATA_PREFIX = $(PREFIX)"/var/run/crowdsec/" +PID_DIR = $(PREFIX)"/var/run/" + +SYSTEMD_PATH_FILE="/etc/systemd/system/crowdsec.service" + + +all: clean test build + +build: clean + $(GOBUILD) $(LD_OPTS) -o $(CROWDSEC_BIN) -v + +static: clean + $(GOBUILD) -o $(CROWDSEC_BIN) -v -a -tags netgo -ldflags '-w -extldflags "-static"' + +test: + $(GOTEST) -v ./... + +clean: + rm -f $(CROWDSEC_BIN) + +.PHONY: install +install: install-conf install-bin + +.PHONY: install-conf +install-conf: + mkdir -p $(DATA_PREFIX) || exit + (cd ../.. / && find ./data -type f -exec install -Dm 755 "{}" "$(DATA_PREFIX){}" \; && cd ./cmd/crowdsec) || exit + (cd ../../config && find ./patterns -type f -exec install -Dm 755 "{}" "$(CFG_PREFIX){}" \; && cd ../cmd/crowdsec) || exit + mkdir -p "$(CFG_PREFIX)" || exit + mkdir -p "$(CFG_PREFIX)/parsers" || exit + mkdir -p "$(CFG_PREFIX)/scenarios" || exit + mkdir -p "$(CFG_PREFIX)/postoverflows" || exit + mkdir -p "$(CFG_PREFIX)/collections" || exit + mkdir -p "$(CFG_PREFIX)/patterns" || exit + install -v -m 755 -D ../../config/prod.yaml "$(CFG_PREFIX)" || exit + install -v -m 755 -D ../../config/dev.yaml "$(CFG_PREFIX)" || exit + install -v -m 755 -D ../../config/acquis.yaml "$(CFG_PREFIX)" || exit + install -v -m 755 -D ../../config/profiles.yaml "$(CFG_PREFIX)" || exit + install -v -m 755 -D ../../config/api.yaml "$(CFG_PREFIX)" || exit + mkdir -p $(PID_DIR) || exit + PID=$(PID_DIR) DATA=$(DATA_PREFIX)"/data/" CFG=$(CFG_PREFIX) envsubst < ../../config/prod.yaml > $(CFG_PREFIX)"/default.yaml" + +.PHONY: install-bin +install-bin: + install -v -m 755 -D "$(CROWDSEC_BIN)" "$(BIN_PREFIX)/$(CROWDSEC_BIN)" || exit + +.PHONY: systemd"$(BIN_PREFI"$(BIN_PREFIX)/$(CROWDSEC_BIN)""$(BIN_PREFIX)/$(CROWDSEC_BIN)"X)/$(CROWDSEC_BIN)" +systemd: install + CFG=$(CFG_PREFIX) PID=$(PID_DIR) BIN=$(BIN_PREFIX)"/"$(CROWDSEC_BIN) envsubst < ../../config/crowdsec.service > "$(SYSTEMD_PATH_FILE)" + systemctl daemon-reload + +.PHONY: uninstall +uninstall: + rm -rf $(CFG_PREFIX) + rm -rf $(DATA_PREFIX) + rm -f "$(BIN_PREFIX)/$(CROWDSEC_BIN)" + rm -f "$(SYSTEMD_PATH_FILE)" diff --git a/cmd/crowdsec/acquisition.go b/cmd/crowdsec/acquisition.go new file mode 100644 index 000000000..55b1d411d --- /dev/null +++ b/cmd/crowdsec/acquisition.go @@ -0,0 +1,33 @@ +package main + +import ( + "fmt" + "github.com/crowdsecurity/crowdsec/pkg/acquisition" +) + +func loadAcquisition() (*acquisition.FileAcquisCtx, error) { + var acquisitionCTX *acquisition.FileAcquisCtx + var err error + /*Init the acqusition : from cli or from acquis.yaml file*/ + if cConfig.SingleFile != "" { + var input acquisition.FileCtx + input.Filename = cConfig.SingleFile + input.Mode = acquisition.CATMODE + input.Labels = make(map[string]string) + input.Labels["type"] = cConfig.SingleFileLabel + acquisitionCTX, err = acquisition.InitReaderFromFileCtx([]acquisition.FileCtx{input}) + } else { /* Init file reader if we tail */ + acquisitionCTX, err = acquisition.InitReader(cConfig.AcquisitionFile) + } + if err != nil { + return nil, fmt.Errorf("unable to start file acquisition, bailout %v", err) + } + if acquisitionCTX == nil { + return nil, fmt.Errorf("no inputs to process") + } + if cConfig.Profiling == true { + acquisitionCTX.Profiling = true + } + + return acquisitionCTX, nil +} diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go new file mode 100644 index 000000000..bda8168cc --- /dev/null +++ b/cmd/crowdsec/main.go @@ -0,0 +1,320 @@ +package main + +import ( + "fmt" + "strings" + + "io/ioutil" + + _ "net/http/pprof" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition" + config "github.com/crowdsecurity/crowdsec/pkg/config/crowdsec" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/outputs" + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" + + log "github.com/sirupsen/logrus" + + "gopkg.in/natefinch/lumberjack.v2" + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" +) + +var ( + /*tombs for the parser, buckets and outputs.*/ + acquisTomb tomb.Tomb + parsersTomb tomb.Tomb + bucketsTomb tomb.Tomb + outputsTomb tomb.Tomb + + holders []leaky.BucketFactory + buckets *leaky.Buckets + cConfig *config.Crowdwatch + + /*settings*/ + lastProcessedItem time.Time /*keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them.*/ +) + +func configureLogger(logMode string, logFolder string, logLevel log.Level) error { + /*Configure logs*/ + if logMode == "file" { + log.SetOutput(&lumberjack.Logger{ + Filename: logFolder + "/crowdsec.log", + MaxSize: 500, //megabytes + MaxBackups: 3, + MaxAge: 28, //days + Compress: true, //disabled by default + }) + log.SetFormatter(&log.TextFormatter{TimestampFormat: "02-01-2006 15:04:05", FullTimestamp: true}) + } else if logMode != "stdout" { + return fmt.Errorf("log mode '%s' unknown", logMode) + } + + log.Printf("setting loglevel to %s", logLevel) + log.SetLevel(logLevel) + log.SetFormatter(&log.TextFormatter{FullTimestamp: true}) + if logLevel >= log.InfoLevel { + log.SetFormatter(&log.TextFormatter{TimestampFormat: "02-01-2006 15:04:05", FullTimestamp: true}) + } + if logLevel >= log.DebugLevel { + log.SetReportCaller(true) + } + return nil +} + +func main() { + var ( + err error + p parser.UnixParser + parserNodes []parser.Node = make([]parser.Node, 0) + postOverflowNodes []parser.Node = make([]parser.Node, 0) + nbParser int = 1 + parserCTX *parser.UnixParserCtx + postOverflowCTX *parser.UnixParserCtx + acquisitionCTX *acquisition.FileAcquisCtx + CustomParsers []parser.Stagefile + CustomPostoverflows []parser.Stagefile + CustomScenarios []parser.Stagefile + outputEventChan chan types.Event + ) + + inputLineChan := make(chan types.Event) + inputEventChan := make(chan types.Event) + + cConfig = config.NewCrowdwatchConfig() + + // Handle command line arguments + if err := cConfig.GetOPT(); err != nil { + log.Fatalf(err.Error()) + } + + if err = configureLogger(cConfig.LogMode, cConfig.LogFolder, cConfig.LogLevel); err != nil { + log.Fatal(err.Error()) + } + + log.Infof("Crowdwatch %s", cwversion.VersionStr()) + + if cConfig.Prometheus == true { + registerPrometheus() + cConfig.Profiling = true + } + + log.Infof("Loading grok library") + /* load base regexps for two grok parsers */ + parserCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/")}) + if err != nil { + log.Errorf("failed to initialize parser : %v", err) + return + } + postOverflowCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/")}) + if err != nil { + log.Errorf("failed to initialize postoverflow : %v", err) + return + } + + /*enable profiling*/ + if cConfig.Profiling == true { + go runTachymeter(cConfig.HTTPListen) + parserCTX.Profiling = true + postOverflowCTX.Profiling = true + } + + /* + Load enrichers + */ + log.Infof("Loading enrich plugins") + parserPlugins, err := parser.Loadplugin(cConfig.DataFolder) + if err != nil { + log.Errorf("Failed to load plugin geoip : %v", err) + } + parser.ECTX = append(parser.ECTX, parserPlugins) + + /*parser the validatormode option if present. mostly used for testing purposes*/ + if cConfig.ValidatorMode != "" { + //beurk : provided 'parser:file.yaml,postoverflow:file.yaml,scenario:file.yaml load only those + validators := strings.Split(cConfig.ValidatorMode, ",") + for _, val := range validators { + splittedValidator := strings.Split(val, ":") + if len(splittedValidator) != 2 { + log.Fatalf("parser:file,scenario:file,postoverflow:file") + } + + configType := splittedValidator[0] + configFile := splittedValidator[1] + + var parsedFile []parser.Stagefile + dataFile, err := ioutil.ReadFile(configFile) + + if err != nil { + log.Fatalf("failed opening %s : %s", configFile, err) + } + if err := yaml.UnmarshalStrict(dataFile, &parsedFile); err != nil { + log.Fatalf("failed unmarshalling %s : %s", configFile, err) + } + switch configType { + case "parser": + CustomParsers = parsedFile + case "scenario": + CustomScenarios = parsedFile + case "postoverflow": + CustomPostoverflows = parsedFile + default: + log.Fatalf("wrong type, format is parser:file,scenario:file,postoverflow:file") + } + + } + } + + /* load the parser nodes */ + if cConfig.ValidatorMode != "" && len(CustomParsers) > 0 { + log.Infof("Loading (validatormode) parsers") + parserNodes, err = parser.LoadStages(CustomParsers, parserCTX) + } else { + log.Infof("Loading parsers") + parserNodes, err = parser.LoadStageDir(cConfig.ConfigFolder+"/parsers/", parserCTX) + } + if err != nil { + log.Fatalf("failed to load parser config : %v", err) + } + /* parsers loaded */ + + /* load the post-overflow stages*/ + if cConfig.ValidatorMode != "" && len(CustomPostoverflows) > 0 { + log.Infof("Loading (validatormode) postoverflow parsers") + postOverflowNodes, err = parser.LoadStages(CustomPostoverflows, postOverflowCTX) + } else { + log.Infof("Loading postoverflow parsers") + postOverflowNodes, err = parser.LoadStageDir(cConfig.ConfigFolder+"/postoverflows/", postOverflowCTX) + } + if err != nil { + log.Fatalf("failed to load postoverflow config : %v", err) + } + + log.Infof("Loaded Nodes : %d parser, %d postoverflow", len(parserNodes), len(postOverflowNodes)) + /* post overflow loaded */ + + /* Loading buckets / scenarios */ + if cConfig.ValidatorMode != "" && len(CustomScenarios) > 0 { + log.Infof("Loading (validatormode) scenarios") + bucketFiles := []string{} + for _, scenarios := range CustomScenarios { + bucketFiles = append(bucketFiles, scenarios.Filename) + } + holders, outputEventChan, err = leaky.LoadBuckets(bucketFiles) + + } else { + log.Infof("Loading scenarios") + holders, outputEventChan, err = leaky.Init(map[string]string{"patterns": cConfig.ConfigFolder + "/scenarios/"}) + } + if err != nil { + log.Fatalf("Scenario loading failed : %v", err) + } + /* buckets/scenarios loaded */ + + /*keep track of scenarios name for consensus profiling*/ + var scenariosEnabled string + for _, x := range holders { + if scenariosEnabled != "" { + scenariosEnabled += "," + } + scenariosEnabled += x.Name + } + + buckets = leaky.NewBuckets() + + /*restore as well previous state if present*/ + if cConfig.RestoreMode != "" { + log.Warningf("Restoring buckets state from %s", cConfig.RestoreMode) + if err := leaky.LoadBucketsState(cConfig.RestoreMode, buckets, holders); err != nil { + log.Fatalf("unable to restore buckets : %s", err) + } + } + if cConfig.Profiling == true { + //force the profiling in all buckets + for holderIndex := range holders { + holders[holderIndex].Profiling = true + } + } + + /* + Load output profiles + */ + log.Infof("Loading output profiles") + outputProfiles, err := outputs.LoadOutputProfiles(cConfig.ConfigFolder + "/profiles.yaml") + if err != nil || len(outputProfiles) == 0 { + log.Fatalf("Failed to load output profiles : %v", err) + } + /* Linting is done */ + if cConfig.Linter { + return + } + + outputRunner, err := outputs.NewOutput(cConfig.OutputConfig, cConfig.Daemonize) + if err != nil { + log.Fatalf("output plugins initialization error : %s", err.Error()) + } + + /* Init the API connector */ + if cConfig.APIMode { + log.Infof("Loading API client") + var apiConfig = map[string]string{ + "path": cConfig.ConfigFolder + "/api.yaml", + "profile": scenariosEnabled, + } + if err := outputRunner.InitAPI(apiConfig); err != nil { + log.Fatalf(err.Error()) + } + } + + /*if the user is in "single file mode" (might be writting scenario or parsers), allow loading **without** parsers or scenarios */ + if cConfig.SingleFile == "" { + if len(parserNodes) == 0 { + log.Fatalf("no parser(s) loaded, abort.") + } + + if len(holders) == 0 { + log.Fatalf("no bucket(s) loaded, abort.") + } + + if len(outputProfiles) == 0 { + log.Fatalf("no output profile(s) loaded, abort.") + } + } + + log.Infof("Starting processing routines") + //start go-routines for parsing, buckets pour and ouputs. + for i := 0; i < nbParser; i++ { + parsersTomb.Go(func() error { + return runParse(inputLineChan, inputEventChan, *parserCTX, parserNodes) + }) + } + + for i := 0; i < nbParser; i++ { + bucketsTomb.Go(func() error { + return runPour(inputEventChan, holders, buckets) + }) + } + + for i := 0; i < nbParser; i++ { + outputsTomb.Go(func() error { + return runOutput(inputEventChan, outputEventChan, holders, buckets, *postOverflowCTX, postOverflowNodes, outputProfiles, outputRunner) + }) + } + + log.Warningf("Starting processing data") + + //Init the acqusition : from cli or from acquis.yaml file + acquisitionCTX, err = loadAcquisition() + + //start reading in the background + acquisition.AcquisStartReading(acquisitionCTX, inputLineChan, &acquisTomb) + + if err = serve(*outputRunner); err != nil { + log.Fatalf(err.Error()) + } + +} diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go new file mode 100644 index 000000000..0c1608b06 --- /dev/null +++ b/cmd/crowdsec/metrics.go @@ -0,0 +1,121 @@ +package main + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/jamiealquiza/tachymeter" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + log "github.com/sirupsen/logrus" + "net/http" + + "runtime" +) + +var ( + parseStat *tachymeter.Tachymeter + bucketStat *tachymeter.Tachymeter + outputStat *tachymeter.Tachymeter + linesReadOK uint64 + linesReadKO uint64 + linesParsedOK uint64 + linesParsedKO uint64 + linesPouredOK uint64 + linesPouredKO uint64 +) + +/*prometheus*/ +var globalParserHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_parser_hits", + Help: "How many time an event entered the parser.", + }, + []string{"source"}, +) +var globalParserHitsOk = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_parser_hits_ok", + Help: "How many time an event was successfully parsed.", + }, + []string{"source"}, +) +var globalParserHitsKo = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_parser_hits_ko", + Help: "How many time an event was unsuccessfully parsed.", + }, + []string{"source"}, +) + +var globalBucketPourKo = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "cs_bucket_pour_ko", + Help: "How many time an event was poured in no bucket.", + }, +) + +var globalBucketPourOk = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "cs_bucket_pour_ok", + Help: "How many time an event was poured in at least one bucket.", + }, +) + +func dumpMetrics() { + + if cConfig.DumpBuckets == true { + log.Infof("!! Dumping buckets state") + if err := leaky.DumpBucketsStateAt("buckets_state.json", time.Now(), buckets); err != nil { + log.Fatalf("Failed dumping bucket state : %s", err) + } + } + + if cConfig.Profiling { + var memoryStats runtime.MemStats + runtime.ReadMemStats(&memoryStats) + + log.Infof("parser evt/s : %s", parseStat.Calc()) + log.Infof("bucket pour evt/s : %s", bucketStat.Calc()) + log.Infof("outputs evt/s : %s", outputStat.Calc()) + log.Infof("Alloc = %v MiB", bToMb(memoryStats.Alloc)) + log.Infof("TotalAlloc = %v MiB", bToMb(memoryStats.TotalAlloc)) + log.Infof("Sys = %v MiB", bToMb(memoryStats.Sys)) + log.Infof("NumGC = %v", memoryStats.NumGC) + log.Infof("Lines read ok : %d", linesReadOK) + if linesReadKO > 0 { + log.Infof("Lines discarded : %d (%.2f%%)", linesReadKO, float64(linesReadKO)/float64(linesReadOK)*100.0) + } + log.Infof("Lines parsed ok : %d", linesParsedOK) + if linesParsedKO > 0 { + log.Infof("Lines unparsed : %d (%.2f%%)", linesParsedKO, float64(linesParsedKO)/float64(linesParsedOK)*100.0) + } + log.Infof("Lines poured ok : %d", linesPouredOK) + if linesPouredKO > 0 { + log.Infof("Lines never poured : %d (%.2f%%)", linesPouredKO, float64(linesPouredKO)/float64(linesPouredOK)*100.0) + } + log.Infof("Writting metrics dump to %s", cConfig.WorkingFolder+"/crowdsec.profile") + prometheus.WriteToTextfile(cConfig.WorkingFolder+"/crowdsec.profile", prometheus.DefaultGatherer) + } +} + +func runTachymeter(HTTPListen string) { + log.Warningf("Starting profiling and http server") + /*Tachymeter for global perfs */ + parseStat = tachymeter.New(&tachymeter.Config{Size: 100}) + bucketStat = tachymeter.New(&tachymeter.Config{Size: 100}) + outputStat = tachymeter.New(&tachymeter.Config{Size: 100}) + log.Fatal(http.ListenAndServe(HTTPListen, nil)) +} + +func registerPrometheus() { + /*Registering prometheus*/ + log.Warningf("Loading prometheus collectors") + prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo, parser.NodesHits, parser.NodesHitsOk, + parser.NodesHitsKo, acquisition.ReaderHits, leaky.BucketsPour, leaky.BucketsUnderflow, leaky.BucketsInstanciation, + leaky.BucketsOverflow) + http.Handle("/metrics", promhttp.Handler()) +} diff --git a/cmd/crowdsec/output.go b/cmd/crowdsec/output.go new file mode 100644 index 000000000..af7c738ea --- /dev/null +++ b/cmd/crowdsec/output.go @@ -0,0 +1,54 @@ +package main + +import ( + log "github.com/sirupsen/logrus" + + "time" + + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/outputs" + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func runOutput(input chan types.Event, overflow chan types.Event, holders []leaky.BucketFactory, buckets *leaky.Buckets, + poctx parser.UnixParserCtx, ponodes []parser.Node, outputProfiles []types.Profile, output *outputs.Output) error { + var ( + //action string + start time.Time + ) + +LOOP: + for { + select { + case <-bucketsTomb.Dying(): + log.Infof("Exiting output processing") + output.FlushAll() + break LOOP + case event := <-overflow: + if cConfig.Profiling { + start = time.Now() + } + + if event.Overflow.Reprocess { + log.Debugf("Overflow being reprocessed.") + input <- event + } + + if event.Overflow.Scenario == "" && event.Overflow.MapKey != "" { + //log.Infof("Deleting expired entry %s", event.Overflow.MapKey) + buckets.Bucket_map.Delete(event.Overflow.MapKey) + } else { + /*let's handle output profiles */ + if err := output.ProcessOutput(event.Overflow, outputProfiles); err != nil { + log.Warningf("Error while processing overflow/output : %s", err) + } + } + } + if cConfig.Profiling { + outputStat.AddTime(time.Since(start)) + } + } + return nil + +} diff --git a/cmd/crowdsec/parse.go b/cmd/crowdsec/parse.go new file mode 100644 index 000000000..59ea723a3 --- /dev/null +++ b/cmd/crowdsec/parse.go @@ -0,0 +1,74 @@ +package main + +import ( + "errors" + "sync/atomic" + "time" + + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func runParse(input chan types.Event, output chan types.Event, parserCTX parser.UnixParserCtx, nodes []parser.Node) error { + var start time.Time + var discardCPT, processCPT int + +LOOP: + for { + select { + case <-parsersTomb.Dying(): + log.Infof("Killing parser routines") + break LOOP + case event := <-input: + if cConfig.Profiling { + start = time.Now() + } + if event.Process == false { + if cConfig.Profiling { + atomic.AddUint64(&linesReadKO, 1) + } + continue + } + if cConfig.Profiling { + atomic.AddUint64(&linesReadOK, 1) + globalParserHits.With(prometheus.Labels{"source": event.Line.Src}).Inc() + + } + /* parse the log using magic */ + parsed, error := parser.Parse(parserCTX, event, nodes) + if error != nil { + log.Errorf("failed parsing : %v\n", error) + return errors.New("parsing failed :/") + } + if parsed.Process == false { + if cConfig.Profiling { + globalParserHitsKo.With(prometheus.Labels{"source": event.Line.Src}).Inc() + atomic.AddUint64(&linesParsedKO, 1) + } + log.Debugf("Discarding line %+v", parsed) + discardCPT++ + continue + } + if cConfig.Profiling { + globalParserHitsOk.With(prometheus.Labels{"source": event.Line.Src}).Inc() + atomic.AddUint64(&linesParsedOK, 1) + } + processCPT++ + if parsed.Whitelisted == true { + log.Debugf("event whitelisted, discard") + continue + } + if processCPT%1000 == 0 { + log.Debugf("%d lines processed, %d lines discarded (unparsed)", processCPT, discardCPT) + } + output <- parsed + if cConfig.Profiling { + parseStat.AddTime(time.Since(start)) + } + } + } + return nil +} diff --git a/cmd/crowdsec/pour.go b/cmd/crowdsec/pour.go new file mode 100644 index 000000000..4bca09447 --- /dev/null +++ b/cmd/crowdsec/pour.go @@ -0,0 +1,65 @@ +package main + +import ( + "fmt" + "sync/atomic" + "time" + + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *leaky.Buckets) error { + var ( + start time.Time + count int + ) +LOOP: + for { + //bucket is now ready + select { + case <-bucketsTomb.Dying(): + log.Infof("Exiting Bucketify") + break LOOP + case parsed := <-input: + count++ + if cConfig.Profiling { + start = time.Now() + } + + if count%5000 == 0 { + log.Warningf("%d existing LeakyRoutine", leaky.LeakyRoutineCount) + //when in forensics mode, garbage collect buckets + if parsed.MarshaledTime != "" && cConfig.SingleFile != "" { + var z *time.Time = &time.Time{} + if err := z.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { + log.Warningf("Failed to unmarshal time from event '%s' : %s", parsed.MarshaledTime, err) + } else { + log.Warningf("Starting buckets garbage collection ...") + leaky.GarbageCollectBuckets(*z, buckets) + } + } + } + //here we can bucketify with parsed + poured, err := leaky.PourItemToHolders(parsed, holders, buckets) + if err != nil { + log.Fatalf("bucketify failed for: %v", parsed) + return fmt.Errorf("process of event failed : %v", err) + } + if poured { + globalBucketPourOk.Inc() + atomic.AddUint64(&linesPouredOK, 1) + } else { + globalBucketPourKo.Inc() + atomic.AddUint64(&linesPouredKO, 1) + } + if cConfig.Profiling { + bucketStat.AddTime(time.Since(start)) + } + lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime)) + } + } + log.Infof("Sending signal Bucketify") + return nil +} diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go new file mode 100644 index 000000000..fd7ab7edd --- /dev/null +++ b/cmd/crowdsec/serve.go @@ -0,0 +1,130 @@ +package main + +import ( + "fmt" + "os" + "syscall" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/outputs" + log "github.com/sirupsen/logrus" + + "github.com/sevlyar/go-daemon" +) + +func reloadHandler(sig os.Signal) error { + dumpMetrics() + return nil +} + +func termHandler(sig os.Signal) error { + log.Warningf("Shutting down routines") + + acquisTomb.Kill(nil) + log.Infof("waiting for acquisition to finish") + if err := acquisTomb.Wait(); err != nil { + log.Warningf("Acquisition returned error : %s", err) + } + log.Infof("acquisition is finished, wait for parser/bucket/ouputs.") + //let's wait more than enough for in-flight events to be parsed. + time.Sleep(5 * time.Second) + parsersTomb.Kill(nil) + if err := parsersTomb.Wait(); err != nil { + log.Warningf("Parsers returned error : %s", err) + } + log.Infof("parsers is done") + bucketsTomb.Kill(nil) + if err := bucketsTomb.Wait(); err != nil { + log.Warningf("Buckets returned error : %s", err) + } + log.Infof("buckets is done") + outputsTomb.Kill(nil) + if err := outputsTomb.Wait(); err != nil { + log.Warningf("Ouputs returned error : %s", err) + + } + log.Infof("ouputs is done") + dumpMetrics() + log.Warningf("all routines are done, bye.") + return daemon.ErrStop +} + +func serveDaemon() error { + var daemonCTX *daemon.Context + + daemon.SetSigHandler(termHandler, syscall.SIGTERM) + daemon.SetSigHandler(reloadHandler, syscall.SIGHUP) + + daemonCTX = &daemon.Context{ + PidFileName: cConfig.PIDFolder + "/crowdsec.pid", + PidFilePerm: 0644, + WorkDir: "./", + Umask: 027, + } + + d, err := daemonCTX.Reborn() + if err != nil { + return fmt.Errorf("unable to run daemon: %s ", err.Error()) + } + if d != nil { + return nil + } + defer daemonCTX.Release() + err = daemon.ServeSignals() + if err != nil { + return fmt.Errorf("serveDaemon error : %s", err.Error()) + } + return nil +} + +func serveOneTimeRun(outputRunner outputs.Output) error { + log.Infof("waiting for acquisition to finish") + + if err := acquisTomb.Wait(); err != nil { + log.Warningf("acquisition returned error : %s", err) + } + log.Infof("acquisition is finished, wait for parser/bucket/ouputs.") + + //let's wait more than enough for in-flight events to be parsed. + time.Sleep(5 * time.Second) + + // wait for the parser to parse all events + parsersTomb.Kill(nil) + if err := parsersTomb.Wait(); err != nil { + log.Warningf("parsers returned error : %s", err) + } + log.Infof("parsers is done") + + // wait for the bucket to pour all events + bucketsTomb.Kill(nil) + if err := bucketsTomb.Wait(); err != nil { + log.Warningf("buckets returned error : %s", err) + } + log.Infof("buckets is done") + + // wait for output to output all event + outputsTomb.Kill(nil) + if err := outputsTomb.Wait(); err != nil { + log.Warningf("ouputs returned error : %s", err) + + } + log.Infof("ouputs is done") + dumpMetrics() + outputRunner.Flush() + log.Warningf("all routines are done, bye.") + return nil +} + +func serve(outputRunner outputs.Output) error { + var err error + if cConfig.Daemonize == true { + if err = serveDaemon(); err != nil { + return fmt.Errorf(err.Error()) + } + } else { + if err = serveOneTimeRun(outputRunner); err != nil { + return fmt.Errorf(err.Error()) + } + } + return nil +} diff --git a/cmd/crowdsec/utils.go b/cmd/crowdsec/utils.go new file mode 100644 index 000000000..9a31a6011 --- /dev/null +++ b/cmd/crowdsec/utils.go @@ -0,0 +1,5 @@ +package main + +func bToMb(b uint64) uint64 { + return b / 1024 / 1024 +} diff --git a/config/acquis.yaml b/config/acquis.yaml new file mode 100644 index 000000000..99cba7107 --- /dev/null +++ b/config/acquis.yaml @@ -0,0 +1,52 @@ +filenames: + - /var/log/nginx/*.log + - ./tests/nginx/nginx.log +#this is not a syslog log, indicate which kind of logs it is +labels: + prog_name: nginx + type: nginx_raw_log +--- +filenames: + - /var/log/auth.log + - /var/log/syslog +#no need to set the prog_name, syslog format contains this info +labels: + type: syslog +--- +filename: /var/log/apache2/*.log +labels: + prog_name: apache2 + type: nginx_raw_log +--- +filenames: + - ./tests/tcpdump.out + - /root/granola/tcpdump.out +labels: + prog_name: tcpdump + type: tcpdump_raw_log +--- +filename: ./tests/apache.log +labels: + prog_name: apache2 +--- +filename: ./tests/nginx.log +labels: + prog_name: nginx + + + + + # #list of files to be tailed + # #it's ok to add files that don't exist, they will juste be skipped :) + # - /var/log/nginx/*.log + # - /root/granola/tcpdump.out + # - /var/log/auth.log + # - tests/*.log + # - tests/tcpdump.out + # - tests/nginx/nginx.log + + # # for honeypots + # - /data/logs/*.log + # - /var/log/tcpdump.out + # - /var/log/auth.log + # - /var/log/syslog diff --git a/config/api.yaml b/config/api.yaml new file mode 100644 index 000000000..08577237b --- /dev/null +++ b/config/api.yaml @@ -0,0 +1,8 @@ +version: v1 +url: https://tmsov6x2n9.execute-api.eu-west-1.amazonaws.com +signin_path: signin +push_path: signals +pull_path: pull +enroll_path: enroll +reset_pwd_path: resetpassword +register_path: register diff --git a/config/crowdsec.service b/config/crowdsec.service new file mode 100644 index 000000000..3fe2386a0 --- /dev/null +++ b/config/crowdsec.service @@ -0,0 +1,13 @@ +[Unit] +Description=Crowdwatch agent +After=syslog.target network.target remote-fs.target nss-lookup.target + +[Service] +Type=forking +#PIDFile=${PID}/crowdsec.pid +ExecStartPre=${BIN} -c ${CFG}/default.yaml -t +ExecStart=${BIN} -c ${CFG}/default.yaml +ExecStartPost=/bin/sleep 0.1 + +[Install] +WantedBy=multi-user.target diff --git a/config/crowdsec_pull b/config/crowdsec_pull new file mode 100644 index 000000000..bf17954cc --- /dev/null +++ b/config/crowdsec_pull @@ -0,0 +1,6 @@ +# /etc/cron.d/crowdsec_pull: crontab to pull crowdsec API +# bad IP in ban DB. + +# Run everyday at 08:00 A.M + +0 8 * * * root /usr/local/bin/cscli api pull >> /var/log/cscli.log 2>&1 diff --git a/config/dev.yaml b/config/dev.yaml new file mode 100644 index 000000000..deee7f972 --- /dev/null +++ b/config/dev.yaml @@ -0,0 +1,12 @@ +working_dir: "." +data_dir: "./data" +config_dir: "./config" +pid_dir: "./" +log_dir: "./logs" +log_mode: "stdout" +log_level: info +profiling: false +sqlite_path: "./test.db" +apimode: false +plugin: + backend: "./config/plugins/backend" diff --git a/config/patterns/aws b/config/patterns/aws new file mode 100644 index 000000000..5816ce1a0 --- /dev/null +++ b/config/patterns/aws @@ -0,0 +1,11 @@ +S3_REQUEST_LINE (?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest}) + +S3_ACCESS_LOG %{WORD:owner} %{NOTSPACE:bucket} \[%{HTTPDATE:timestamp}\] %{IP:clientip} %{NOTSPACE:requester} %{NOTSPACE:request_id} %{NOTSPACE:operation} %{NOTSPACE:key} (?:"%{S3_REQUEST_LINE}"|-) (?:%{INT:response:int}|-) (?:-|%{NOTSPACE:error_code}) (?:%{INT:bytes:int}|-) (?:%{INT:object_size:int}|-) (?:%{INT:request_time_ms:int}|-) (?:%{INT:turnaround_time_ms:int}|-) (?:%{QS:referrer}|-) (?:"?%{QS:agent}"?|-) (?:-|%{NOTSPACE:version_id}) + +ELB_URIPATHPARAM %{URIPATH:path}(?:%{URIPARAM:params})? + +ELB_URI %{URIPROTO:proto}://(?:%{USER}(?::[^@]*)?@)?(?:%{URIHOST:urihost})?(?:%{ELB_URIPATHPARAM})? + +ELB_REQUEST_LINE (?:%{WORD:verb} %{ELB_URI:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest}) + +ELB_ACCESS_LOG %{TIMESTAMP_ISO8601:timestamp} %{NOTSPACE:elb} %{IP:clientip}:%{INT:clientport:int} (?:(%{IP:backendip}:?:%{INT:backendport:int})|-) %{NUMBER:request_processing_time:float} %{NUMBER:backend_processing_time:float} %{NUMBER:response_processing_time:float} %{INT:response:int} %{INT:backend_response:int} %{INT:received_bytes:int} %{INT:bytes:int} "%{ELB_REQUEST_LINE}" \ No newline at end of file diff --git a/config/patterns/bacula b/config/patterns/bacula new file mode 100644 index 000000000..96ff0e0a8 --- /dev/null +++ b/config/patterns/bacula @@ -0,0 +1,50 @@ +BACULA_TIMESTAMP %{MONTHDAY}-%{MONTH} %{HOUR}:%{MINUTE} +BACULA_HOST [a-zA-Z0-9-]+ +BACULA_VOLUME %{USER} +BACULA_DEVICE %{USER} +BACULA_DEVICEPATH %{UNIXPATH} +BACULA_CAPACITY %{INT}{1,3}(,%{INT}{3})* +BACULA_VERSION %{USER} +BACULA_JOB %{USER} + +BACULA_LOG_MAX_CAPACITY User defined maximum volume capacity %{BACULA_CAPACITY} exceeded on device \"%{BACULA_DEVICE:device}\" \(%{BACULA_DEVICEPATH}\) +BACULA_LOG_END_VOLUME End of medium on Volume \"%{BACULA_VOLUME:volume}\" Bytes=%{BACULA_CAPACITY} Blocks=%{BACULA_CAPACITY} at %{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}. +BACULA_LOG_NEW_VOLUME Created new Volume \"%{BACULA_VOLUME:volume}\" in catalog. +BACULA_LOG_NEW_LABEL Labeled new Volume \"%{BACULA_VOLUME:volume}\" on device \"%{BACULA_DEVICE:device}\" \(%{BACULA_DEVICEPATH}\). +BACULA_LOG_WROTE_LABEL Wrote label to prelabeled Volume \"%{BACULA_VOLUME:volume}\" on device \"%{BACULA_DEVICE}\" \(%{BACULA_DEVICEPATH}\) +BACULA_LOG_NEW_MOUNT New volume \"%{BACULA_VOLUME:volume}\" mounted on device \"%{BACULA_DEVICE:device}\" \(%{BACULA_DEVICEPATH}\) at %{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}. +BACULA_LOG_NOOPEN \s+Cannot open %{DATA}: ERR=%{GREEDYDATA:berror} +BACULA_LOG_NOOPENDIR \s+Could not open directory %{DATA}: ERR=%{GREEDYDATA:berror} +BACULA_LOG_NOSTAT \s+Could not stat %{DATA}: ERR=%{GREEDYDATA:berror} +BACULA_LOG_NOJOBS There are no more Jobs associated with Volume \"%{BACULA_VOLUME:volume}\". Marking it purged. +BACULA_LOG_ALL_RECORDS_PRUNED All records pruned from Volume \"%{BACULA_VOLUME:volume}\"; marking it \"Purged\" +BACULA_LOG_BEGIN_PRUNE_JOBS Begin pruning Jobs older than %{INT} month %{INT} days . +BACULA_LOG_BEGIN_PRUNE_FILES Begin pruning Files. +BACULA_LOG_PRUNED_JOBS Pruned %{INT} Jobs* for client %{BACULA_HOST:client} from catalog. +BACULA_LOG_PRUNED_FILES Pruned Files from %{INT} Jobs* for client %{BACULA_HOST:client} from catalog. +BACULA_LOG_ENDPRUNE End auto prune. +BACULA_LOG_STARTJOB Start Backup JobId %{INT}, Job=%{BACULA_JOB:job} +BACULA_LOG_STARTRESTORE Start Restore Job %{BACULA_JOB:job} +BACULA_LOG_USEDEVICE Using Device \"%{BACULA_DEVICE:device}\" +BACULA_LOG_DIFF_FS \s+%{UNIXPATH} is a different filesystem. Will not descend from %{UNIXPATH} into it. +BACULA_LOG_JOBEND Job write elapsed time = %{DATA:elapsed}, Transfer rate = %{NUMBER} (K|M|G)? Bytes/second +BACULA_LOG_NOPRUNE_JOBS No Jobs found to prune. +BACULA_LOG_NOPRUNE_FILES No Files found to prune. +BACULA_LOG_VOLUME_PREVWRITTEN Volume \"%{BACULA_VOLUME:volume}\" previously written, moving to end of data. +BACULA_LOG_READYAPPEND Ready to append to end of Volume \"%{BACULA_VOLUME:volume}\" size=%{INT} +BACULA_LOG_CANCELLING Cancelling duplicate JobId=%{INT}. +BACULA_LOG_MARKCANCEL JobId %{INT}, Job %{BACULA_JOB:job} marked to be canceled. +BACULA_LOG_CLIENT_RBJ shell command: run ClientRunBeforeJob \"%{GREEDYDATA:runjob}\" +BACULA_LOG_VSS (Generate )?VSS (Writer)? +BACULA_LOG_MAXSTART Fatal error: Job canceled because max start delay time exceeded. +BACULA_LOG_DUPLICATE Fatal error: JobId %{INT:duplicate} already running. Duplicate job not allowed. +BACULA_LOG_NOJOBSTAT Fatal error: No Job status returned from FD. +BACULA_LOG_FATAL_CONN Fatal error: bsock.c:133 Unable to connect to (Client: %{BACULA_HOST:client}|Storage daemon) on %{HOSTNAME}:%{POSINT}. ERR=%{GREEDYDATA:berror} +BACULA_LOG_NO_CONNECT Warning: bsock.c:127 Could not connect to (Client: %{BACULA_HOST:client}|Storage daemon) on %{HOSTNAME}:%{POSINT}. ERR=%{GREEDYDATA:berror} +BACULA_LOG_NO_AUTH Fatal error: Unable to authenticate with File daemon at %{HOSTNAME}. Possible causes: +BACULA_LOG_NOSUIT No prior or suitable Full backup found in catalog. Doing FULL backup. +BACULA_LOG_NOPRIOR No prior Full backup Job record found. + +BACULA_LOG_JOB (Error: )?Bacula %{BACULA_HOST} %{BACULA_VERSION} \(%{BACULA_VERSION}\): + +BACULA_LOGLINE %{BACULA_TIMESTAMP:bts} %{BACULA_HOST:hostname} JobId %{INT:jobid}: (%{BACULA_LOG_MAX_CAPACITY}|%{BACULA_LOG_END_VOLUME}|%{BACULA_LOG_NEW_VOLUME}|%{BACULA_LOG_NEW_LABEL}|%{BACULA_LOG_WROTE_LABEL}|%{BACULA_LOG_NEW_MOUNT}|%{BACULA_LOG_NOOPEN}|%{BACULA_LOG_NOOPENDIR}|%{BACULA_LOG_NOSTAT}|%{BACULA_LOG_NOJOBS}|%{BACULA_LOG_ALL_RECORDS_PRUNED}|%{BACULA_LOG_BEGIN_PRUNE_JOBS}|%{BACULA_LOG_BEGIN_PRUNE_FILES}|%{BACULA_LOG_PRUNED_JOBS}|%{BACULA_LOG_PRUNED_FILES}|%{BACULA_LOG_ENDPRUNE}|%{BACULA_LOG_STARTJOB}|%{BACULA_LOG_STARTRESTORE}|%{BACULA_LOG_USEDEVICE}|%{BACULA_LOG_DIFF_FS}|%{BACULA_LOG_JOBEND}|%{BACULA_LOG_NOPRUNE_JOBS}|%{BACULA_LOG_NOPRUNE_FILES}|%{BACULA_LOG_VOLUME_PREVWRITTEN}|%{BACULA_LOG_READYAPPEND}|%{BACULA_LOG_CANCELLING}|%{BACULA_LOG_MARKCANCEL}|%{BACULA_LOG_CLIENT_RBJ}|%{BACULA_LOG_VSS}|%{BACULA_LOG_MAXSTART}|%{BACULA_LOG_DUPLICATE}|%{BACULA_LOG_NOJOBSTAT}|%{BACULA_LOG_FATAL_CONN}|%{BACULA_LOG_NO_CONNECT}|%{BACULA_LOG_NO_AUTH}|%{BACULA_LOG_NOSUIT}|%{BACULA_LOG_JOB}|%{BACULA_LOG_NOPRIOR}) \ No newline at end of file diff --git a/config/patterns/bro b/config/patterns/bro new file mode 100644 index 000000000..e8d374958 --- /dev/null +++ b/config/patterns/bro @@ -0,0 +1,13 @@ +# https://www.bro.org/sphinx/script-reference/log-files.html + +# http.log +BRO_HTTP %{NUMBER:ts}\t%{NOTSPACE:uid}\t%{IP:orig_h}\t%{INT:orig_p}\t%{IP:resp_h}\t%{INT:resp_p}\t%{INT:trans_depth}\t%{GREEDYDATA:method}\t%{GREEDYDATA:domain}\t%{GREEDYDATA:uri}\t%{GREEDYDATA:referrer}\t%{GREEDYDATA:user_agent}\t%{NUMBER:request_body_len}\t%{NUMBER:response_body_len}\t%{GREEDYDATA:status_code}\t%{GREEDYDATA:status_msg}\t%{GREEDYDATA:info_code}\t%{GREEDYDATA:info_msg}\t%{GREEDYDATA:filename}\t%{GREEDYDATA:bro_tags}\t%{GREEDYDATA:username}\t%{GREEDYDATA:password}\t%{GREEDYDATA:proxied}\t%{GREEDYDATA:orig_fuids}\t%{GREEDYDATA:orig_mime_types}\t%{GREEDYDATA:resp_fuids}\t%{GREEDYDATA:resp_mime_types} + +# dns.log +BRO_DNS %{NUMBER:ts}\t%{NOTSPACE:uid}\t%{IP:orig_h}\t%{INT:orig_p}\t%{IP:resp_h}\t%{INT:resp_p}\t%{WORD:proto}\t%{INT:trans_id}\t%{GREEDYDATA:query}\t%{GREEDYDATA:qclass}\t%{GREEDYDATA:qclass_name}\t%{GREEDYDATA:qtype}\t%{GREEDYDATA:qtype_name}\t%{GREEDYDATA:rcode}\t%{GREEDYDATA:rcode_name}\t%{GREEDYDATA:AA}\t%{GREEDYDATA:TC}\t%{GREEDYDATA:RD}\t%{GREEDYDATA:RA}\t%{GREEDYDATA:Z}\t%{GREEDYDATA:answers}\t%{GREEDYDATA:TTLs}\t%{GREEDYDATA:rejected} + +# conn.log +BRO_CONN %{NUMBER:ts}\t%{NOTSPACE:uid}\t%{IP:orig_h}\t%{INT:orig_p}\t%{IP:resp_h}\t%{INT:resp_p}\t%{WORD:proto}\t%{GREEDYDATA:service}\t%{NUMBER:duration}\t%{NUMBER:orig_bytes}\t%{NUMBER:resp_bytes}\t%{GREEDYDATA:conn_state}\t%{GREEDYDATA:local_orig}\t%{GREEDYDATA:missed_bytes}\t%{GREEDYDATA:history}\t%{GREEDYDATA:orig_pkts}\t%{GREEDYDATA:orig_ip_bytes}\t%{GREEDYDATA:resp_pkts}\t%{GREEDYDATA:resp_ip_bytes}\t%{GREEDYDATA:tunnel_parents} + +# files.log +BRO_FILES %{NUMBER:ts}\t%{NOTSPACE:fuid}\t%{IP:tx_hosts}\t%{IP:rx_hosts}\t%{NOTSPACE:conn_uids}\t%{GREEDYDATA:source}\t%{GREEDYDATA:depth}\t%{GREEDYDATA:analyzers}\t%{GREEDYDATA:mime_type}\t%{GREEDYDATA:filename}\t%{GREEDYDATA:duration}\t%{GREEDYDATA:local_orig}\t%{GREEDYDATA:is_orig}\t%{GREEDYDATA:seen_bytes}\t%{GREEDYDATA:total_bytes}\t%{GREEDYDATA:missing_bytes}\t%{GREEDYDATA:overflow_bytes}\t%{GREEDYDATA:timedout}\t%{GREEDYDATA:parent_fuid}\t%{GREEDYDATA:md5}\t%{GREEDYDATA:sha1}\t%{GREEDYDATA:sha256}\t%{GREEDYDATA:extracted} \ No newline at end of file diff --git a/config/patterns/cowrie_honeypot b/config/patterns/cowrie_honeypot new file mode 100644 index 000000000..eda0c9e60 --- /dev/null +++ b/config/patterns/cowrie_honeypot @@ -0,0 +1 @@ +COWRIE_NEW_CO New connection: %{IPV4:source_ip}:[0-9]+ \(%{IPV4:dest_ip}:%{INT:dest_port}\) \[session: %{DATA:telnet_session}\]$ \ No newline at end of file diff --git a/config/patterns/exim b/config/patterns/exim new file mode 100644 index 000000000..f135561d0 --- /dev/null +++ b/config/patterns/exim @@ -0,0 +1,12 @@ +EXIM_MSGID [0-9A-Za-z]{6}-[0-9A-Za-z]{6}-[0-9A-Za-z]{2} +EXIM_FLAGS (<=|[-=>*]>|[*]{2}|==) +EXIM_DATE %{YEAR:exim_year}-%{MONTHNUM:exim_month}-%{MONTHDAY:exim_day} %{TIME:exim_time} +EXIM_PID \[%{POSINT}\] +EXIM_QT ((\d+y)?(\d+w)?(\d+d)?(\d+h)?(\d+m)?(\d+s)?) +EXIM_EXCLUDE_TERMS (Message is frozen|(Start|End) queue run| Warning: | retry time not reached | no (IP address|host name) found for (IP address|host) | unexpected disconnection while reading SMTP command | no immediate delivery: |another process is handling this message) +EXIM_REMOTE_HOST (H=(%{NOTSPACE:remote_hostname} )?(\(%{NOTSPACE:remote_heloname}\) )?\[%{IP:remote_host}\]) +EXIM_INTERFACE (I=\[%{IP:exim_interface}\](:%{NUMBER:exim_interface_port})) +EXIM_PROTOCOL (P=%{NOTSPACE:protocol}) +EXIM_MSG_SIZE (S=%{NUMBER:exim_msg_size}) +EXIM_HEADER_ID (id=%{NOTSPACE:exim_header_id}) +EXIM_SUBJECT (T=%{QS:exim_subject}) \ No newline at end of file diff --git a/config/patterns/firewalls b/config/patterns/firewalls new file mode 100644 index 000000000..fafa7ec01 --- /dev/null +++ b/config/patterns/firewalls @@ -0,0 +1,86 @@ +# NetScreen firewall logs +NETSCREENSESSIONLOG %{SYSLOGTIMESTAMP:date} %{IPORHOST:device} %{IPORHOST}: NetScreen device_id=%{WORD:device_id}%{DATA}: start_time=%{QUOTEDSTRING:start_time} duration=%{INT:duration} policy_id=%{INT:policy_id} service=%{DATA:service} proto=%{INT:proto} src zone=%{WORD:src_zone} dst zone=%{WORD:dst_zone} action=%{WORD:action} sent=%{INT:sent} rcvd=%{INT:rcvd} src=%{IPORHOST:src_ip} dst=%{IPORHOST:dst_ip} src_port=%{INT:src_port} dst_port=%{INT:dst_port} src-xlated ip=%{IPORHOST:src_xlated_ip} port=%{INT:src_xlated_port} dst-xlated ip=%{IPORHOST:dst_xlated_ip} port=%{INT:dst_xlated_port} session_id=%{INT:session_id} reason=%{GREEDYDATA:reason} + +#== Cisco ASA == +CISCOTAG [A-Z0-9]+-%{INT}-(?:[A-Z0-9_]+) +CISCOTIMESTAMP %{MONTH} +%{MONTHDAY}(?: %{YEAR})? %{TIME} +CISCO_TAGGED_SYSLOG ^<%{POSINT:syslog_pri}>%{CISCOTIMESTAMP:timestamp}( %{SYSLOGHOST:sysloghost})? ?: %%{CISCOTAG:ciscotag}: +# Common Particles +CISCO_ACTION Built|Teardown|Deny|Denied|denied|requested|permitted|denied by ACL|discarded|est-allowed|Dropping|created|deleted +CISCO_REASON Duplicate TCP SYN|Failed to locate egress interface|Invalid transport field|No matching connection|DNS Response|DNS Query|(?:%{WORD}\s*)* +CISCO_DIRECTION Inbound|inbound|Outbound|outbound +CISCO_INTERVAL first hit|%{INT}-second interval +CISCO_XLATE_TYPE static|dynamic +# ASA-1-104001 +CISCOFW104001 \((?:Primary|Secondary)\) Switching to ACTIVE - %{GREEDYDATA:switch_reason} +# ASA-1-104002 +CISCOFW104002 \((?:Primary|Secondary)\) Switching to STANDBY - %{GREEDYDATA:switch_reason} +# ASA-1-104003 +CISCOFW104003 \((?:Primary|Secondary)\) Switching to FAILED\. +# ASA-1-104004 +CISCOFW104004 \((?:Primary|Secondary)\) Switching to OK\. +# ASA-1-105003 +CISCOFW105003 \((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{GREEDYDATA:interface_name} waiting +# ASA-1-105004 +CISCOFW105004 \((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{GREEDYDATA:interface_name} normal +# ASA-1-105005 +CISCOFW105005 \((?:Primary|Secondary)\) Lost Failover communications with mate on [Ii]nterface %{GREEDYDATA:interface_name} +# ASA-1-105008 +CISCOFW105008 \((?:Primary|Secondary)\) Testing [Ii]nterface %{GREEDYDATA:interface_name} +# ASA-1-105009 +CISCOFW105009 \((?:Primary|Secondary)\) Testing on [Ii]nterface %{GREEDYDATA:interface_name} (?:Passed|Failed) +# ASA-2-106001 +CISCOFW106001 %{CISCO_DIRECTION:direction} %{WORD:protocol} connection %{CISCO_ACTION:action} from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} flags %{GREEDYDATA:tcp_flags} on interface %{GREEDYDATA:interface} +# ASA-2-106006, ASA-2-106007, ASA-2-106010 +CISCOFW106006_106007_106010 %{CISCO_ACTION:action} %{CISCO_DIRECTION:direction} %{WORD:protocol} (?:from|src) %{IP:src_ip}/%{INT:src_port}(\(%{DATA:src_fwuser}\))? (?:to|dst) %{IP:dst_ip}/%{INT:dst_port}(\(%{DATA:dst_fwuser}\))? (?:on interface %{DATA:interface}|due to %{CISCO_REASON:reason}) +# ASA-3-106014 +CISCOFW106014 %{CISCO_ACTION:action} %{CISCO_DIRECTION:direction} %{WORD:protocol} src %{DATA:src_interface}:%{IP:src_ip}(\(%{DATA:src_fwuser}\))? dst %{DATA:dst_interface}:%{IP:dst_ip}(\(%{DATA:dst_fwuser}\))? \(type %{INT:icmp_type}, code %{INT:icmp_code}\) +# ASA-6-106015 +CISCOFW106015 %{CISCO_ACTION:action} %{WORD:protocol} \(%{DATA:policy_id}\) from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} flags %{DATA:tcp_flags} on interface %{GREEDYDATA:interface} +# ASA-1-106021 +CISCOFW106021 %{CISCO_ACTION:action} %{WORD:protocol} reverse path check from %{IP:src_ip} to %{IP:dst_ip} on interface %{GREEDYDATA:interface} +# ASA-4-106023 +CISCOFW106023 %{CISCO_ACTION:action}( protocol)? %{WORD:protocol} src %{DATA:src_interface}:%{DATA:src_ip}(/%{INT:src_port})?(\(%{DATA:src_fwuser}\))? dst %{DATA:dst_interface}:%{DATA:dst_ip}(/%{INT:dst_port})?(\(%{DATA:dst_fwuser}\))?( \(type %{INT:icmp_type}, code %{INT:icmp_code}\))? by access-group "?%{DATA:policy_id}"? \[%{DATA:hashcode1}, %{DATA:hashcode2}\] +# ASA-4-106100, ASA-4-106102, ASA-4-106103 +CISCOFW106100_2_3 access-list %{NOTSPACE:policy_id} %{CISCO_ACTION:action} %{WORD:protocol} for user '%{DATA:src_fwuser}' %{DATA:src_interface}/%{IP:src_ip}\(%{INT:src_port}\) -> %{DATA:dst_interface}/%{IP:dst_ip}\(%{INT:dst_port}\) hit-cnt %{INT:hit_count} %{CISCO_INTERVAL:interval} \[%{DATA:hashcode1}, %{DATA:hashcode2}\] +# ASA-5-106100 +CISCOFW106100 access-list %{NOTSPACE:policy_id} %{CISCO_ACTION:action} %{WORD:protocol} %{DATA:src_interface}/%{IP:src_ip}\(%{INT:src_port}\)(\(%{DATA:src_fwuser}\))? -> %{DATA:dst_interface}/%{IP:dst_ip}\(%{INT:dst_port}\)(\(%{DATA:src_fwuser}\))? hit-cnt %{INT:hit_count} %{CISCO_INTERVAL:interval} \[%{DATA:hashcode1}, %{DATA:hashcode2}\] +# ASA-6-110002 +CISCOFW110002 %{CISCO_REASON:reason} for %{WORD:protocol} from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} +# ASA-6-302010 +CISCOFW302010 %{INT:connection_count} in use, %{INT:connection_count_max} most used +# ASA-6-302013, ASA-6-302014, ASA-6-302015, ASA-6-302016 +CISCOFW302013_302014_302015_302016 %{CISCO_ACTION:action}(?: %{CISCO_DIRECTION:direction})? %{WORD:protocol} connection %{INT:connection_id} for %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port}( \(%{IP:src_mapped_ip}/%{INT:src_mapped_port}\))?(\(%{DATA:src_fwuser}\))? to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port}( \(%{IP:dst_mapped_ip}/%{INT:dst_mapped_port}\))?(\(%{DATA:dst_fwuser}\))?( duration %{TIME:duration} bytes %{INT:bytes})?(?: %{CISCO_REASON:reason})?( \(%{DATA:user}\))? +# ASA-6-302020, ASA-6-302021 +CISCOFW302020_302021 %{CISCO_ACTION:action}(?: %{CISCO_DIRECTION:direction})? %{WORD:protocol} connection for faddr %{IP:dst_ip}/%{INT:icmp_seq_num}(?:\(%{DATA:fwuser}\))? gaddr %{IP:src_xlated_ip}/%{INT:icmp_code_xlated} laddr %{IP:src_ip}/%{INT:icmp_code}( \(%{DATA:user}\))? +# ASA-6-305011 +CISCOFW305011 %{CISCO_ACTION:action} %{CISCO_XLATE_TYPE:xlate_type} %{WORD:protocol} translation from %{DATA:src_interface}:%{IP:src_ip}(/%{INT:src_port})?(\(%{DATA:src_fwuser}\))? to %{DATA:src_xlated_interface}:%{IP:src_xlated_ip}/%{DATA:src_xlated_port} +# ASA-3-313001, ASA-3-313004, ASA-3-313008 +CISCOFW313001_313004_313008 %{CISCO_ACTION:action} %{WORD:protocol} type=%{INT:icmp_type}, code=%{INT:icmp_code} from %{IP:src_ip} on interface %{DATA:interface}( to %{IP:dst_ip})? +# ASA-4-313005 +CISCOFW313005 %{CISCO_REASON:reason} for %{WORD:protocol} error message: %{WORD:err_protocol} src %{DATA:err_src_interface}:%{IP:err_src_ip}(\(%{DATA:err_src_fwuser}\))? dst %{DATA:err_dst_interface}:%{IP:err_dst_ip}(\(%{DATA:err_dst_fwuser}\))? \(type %{INT:err_icmp_type}, code %{INT:err_icmp_code}\) on %{DATA:interface} interface\. Original IP payload: %{WORD:protocol} src %{IP:orig_src_ip}/%{INT:orig_src_port}(\(%{DATA:orig_src_fwuser}\))? dst %{IP:orig_dst_ip}/%{INT:orig_dst_port}(\(%{DATA:orig_dst_fwuser}\))? +# ASA-5-321001 +CISCOFW321001 Resource '%{WORD:resource_name}' limit of %{POSINT:resource_limit} reached for system +# ASA-4-402117 +CISCOFW402117 %{WORD:protocol}: Received a non-IPSec packet \(protocol= %{WORD:orig_protocol}\) from %{IP:src_ip} to %{IP:dst_ip} +# ASA-4-402119 +CISCOFW402119 %{WORD:protocol}: Received an %{WORD:orig_protocol} packet \(SPI= %{DATA:spi}, sequence number= %{DATA:seq_num}\) from %{IP:src_ip} \(user= %{DATA:user}\) to %{IP:dst_ip} that failed anti-replay checking +# ASA-4-419001 +CISCOFW419001 %{CISCO_ACTION:action} %{WORD:protocol} packet from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port}, reason: %{GREEDYDATA:reason} +# ASA-4-419002 +CISCOFW419002 %{CISCO_REASON:reason} from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port} with different initial sequence number +# ASA-4-500004 +CISCOFW500004 %{CISCO_REASON:reason} for protocol=%{WORD:protocol}, from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} +# ASA-6-602303, ASA-6-602304 +CISCOFW602303_602304 %{WORD:protocol}: An %{CISCO_DIRECTION:direction} %{GREEDYDATA:tunnel_type} SA \(SPI= %{DATA:spi}\) between %{IP:src_ip} and %{IP:dst_ip} \(user= %{DATA:user}\) has been %{CISCO_ACTION:action} +# ASA-7-710001, ASA-7-710002, ASA-7-710003, ASA-7-710005, ASA-7-710006 +CISCOFW710001_710002_710003_710005_710006 %{WORD:protocol} (?:request|access) %{CISCO_ACTION:action} from %{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port} +# ASA-6-713172 +CISCOFW713172 Group = %{GREEDYDATA:group}, IP = %{IP:src_ip}, Automatic NAT Detection Status:\s+Remote end\s*%{DATA:is_remote_natted}\s*behind a NAT device\s+This\s+end\s*%{DATA:is_local_natted}\s*behind a NAT device +# ASA-4-733100 +CISCOFW733100 \[\s*%{DATA:drop_type}\s*\] drop %{DATA:drop_rate_id} exceeded. Current burst rate is %{INT:drop_rate_current_burst} per second, max configured rate is %{INT:drop_rate_max_burst}; Current average rate is %{INT:drop_rate_current_avg} per second, max configured rate is %{INT:drop_rate_max_avg}; Cumulative total count is %{INT:drop_total_count} +#== End Cisco ASA == + +# Shorewall firewall logs +SHOREWALL (%{SYSLOGTIMESTAMP:timestamp}) (%{WORD:nf_host}) kernel:.*Shorewall:(%{WORD:nf_action1})?:(%{WORD:nf_action2})?.*IN=(%{USERNAME:nf_in_interface})?.*(OUT= *MAC=(%{COMMONMAC:nf_dst_mac}):(%{COMMONMAC:nf_src_mac})?|OUT=%{USERNAME:nf_out_interface}).*SRC=(%{IPV4:nf_src_ip}).*DST=(%{IPV4:nf_dst_ip}).*LEN=(%{WORD:nf_len}).*?TOS=(%{WORD:nf_tos}).*?PREC=(%{WORD:nf_prec}).*?TTL=(%{INT:nf_ttl}).*?ID=(%{INT:nf_id}).*?PROTO=(%{WORD:nf_protocol}).*?SPT=(%{INT:nf_src_port}?.*DPT=%{INT:nf_dst_port}?.*) +#== End Shorewall \ No newline at end of file diff --git a/config/patterns/haproxy b/config/patterns/haproxy new file mode 100644 index 000000000..c71bc3171 --- /dev/null +++ b/config/patterns/haproxy @@ -0,0 +1,39 @@ +## These patterns were tested w/ haproxy-1.4.15 + +## Documentation of the haproxy log formats can be found at the following links: +## http://code.google.com/p/haproxy-docs/wiki/HTTPLogFormat +## http://code.google.com/p/haproxy-docs/wiki/TCPLogFormat + +HAPROXYTIME %{HOUR:haproxy_hour}:%{MINUTE:haproxy_minute}(?::%{SECOND:haproxy_second}) +HAPROXYDATE %{MONTHDAY:haproxy_monthday}/%{MONTH:haproxy_month}/%{YEAR:haproxy_year}:%{HAPROXYTIME:haproxy_time}.%{INT:haproxy_milliseconds} + +# Override these default patterns to parse out what is captured in your haproxy.cfg +HAPROXYCAPTUREDREQUESTHEADERS %{DATA:captured_request_headers} +HAPROXYCAPTUREDRESPONSEHEADERS %{DATA:captured_response_headers} + +# Example: +# These haproxy config lines will add data to the logs that are captured +# by the patterns below. Place them in your custom patterns directory to +# override the defaults. +# +# capture request header Host len 40 +# capture request header X-Forwarded-For len 50 +# capture request header Accept-Language len 50 +# capture request header Referer len 200 +# capture request header User-Agent len 200 +# +# capture response header Content-Type len 30 +# capture response header Content-Encoding len 10 +# capture response header Cache-Control len 200 +# capture response header Last-Modified len 200 +# +# HAPROXYCAPTUREDREQUESTHEADERS %{DATA:request_header_host}\|%{DATA:request_header_x_forwarded_for}\|%{DATA:request_header_accept_language}\|%{DATA:request_header_referer}\|%{DATA:request_header_user_agent} +# HAPROXYCAPTUREDRESPONSEHEADERS %{DATA:response_header_content_type}\|%{DATA:response_header_content_encoding}\|%{DATA:response_header_cache_control}\|%{DATA:response_header_last_modified} + +# parse a haproxy 'httplog' line +HAPROXYHTTPBASE %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_request}/%{INT:time_queue}/%{INT:time_backend_connect}/%{INT:time_backend_response}/%{NOTSPACE:time_duration} %{INT:http_status_code} %{NOTSPACE:bytes_read} %{DATA:captured_request_cookie} %{DATA:captured_response_cookie} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} (\{%{HAPROXYCAPTUREDREQUESTHEADERS}\})?( )?(\{%{HAPROXYCAPTUREDRESPONSEHEADERS}\})?( )?"(|(%{WORD:http_verb} (%{URIPROTO:http_proto}://)?(?:%{USER:http_user}(?::[^@]*)?@)?(?:%{URIHOST:http_host})?(?:%{URIPATHPARAM:http_request})?( HTTP/%{NUMBER:http_version})?))?" + +HAPROXYHTTP (?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{HAPROXYHTTPBASE} + +# parse a haproxy 'tcplog' line +HAPROXYTCP (?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_queue}/%{INT:time_backend_connect}/%{NOTSPACE:time_duration} %{NOTSPACE:bytes_read} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} \ No newline at end of file diff --git a/config/patterns/java b/config/patterns/java new file mode 100644 index 000000000..d0ad391be --- /dev/null +++ b/config/patterns/java @@ -0,0 +1,20 @@ +JAVACLASS (?:[a-zA-Z$_][a-zA-Z$_0-9]*\.)*[a-zA-Z$_][a-zA-Z$_0-9]* +#Space is an allowed character to match special cases like 'Native Method' or 'Unknown Source' +JAVAFILE (?:[A-Za-z0-9_. -]+) +#Allow special method +JAVAMETHOD (?:()|[a-zA-Z$_][a-zA-Z$_0-9]*) +#Line number is optional in special cases 'Native method' or 'Unknown source' +JAVASTACKTRACEPART %{SPACE}at %{JAVACLASS:class}\.%{JAVAMETHOD:method}\(%{JAVAFILE:file}(?::%{NUMBER:line})?\) +# Java Logs +JAVATHREAD (?:[A-Z]{2}-Processor[\d]+) +##JAVACLASS (?:[a-zA-Z0-9-]+\.)+[A-Za-z0-9$]+ +##JAVAFILE (?:[A-Za-z0-9_.-]+) +##JAVASTACKTRACEPART at %{JAVACLASS:class}\.%{WORD:method}\(%{JAVAFILE:file}:%{NUMBER:line}\) +JAVALOGMESSAGE (.*) +# MMM dd, yyyy HH:mm:ss eg: Jan 9, 2014 7:13:13 AM +CATALINA_DATESTAMP %{MONTH} %{MONTHDAY}, 20%{YEAR} %{HOUR}:?%{MINUTE}(?::?%{SECOND}) (?:AM|PM) +# yyyy-MM-dd HH:mm:ss,SSS ZZZ eg: 2014-01-09 17:32:25,527 -0800 +TOMCAT_DATESTAMP 20%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}(?::?%{SECOND}) %{ISO8601_TIMEZONE} +CATALINALOG %{CATALINA_DATESTAMP:timestamp} %{JAVACLASS:class} %{JAVALOGMESSAGE:logmessage} +# 2014-01-09 20:03:28,269 -0800 | ERROR | com.example.service.ExampleService - something compeletely unexpected happened... +TOMCATLOG %{TOMCAT_DATESTAMP:timestamp} \| %{LOGLEVEL:level} \| %{JAVACLASS:class} - %{JAVALOGMESSAGE:logmessage} \ No newline at end of file diff --git a/config/patterns/junos b/config/patterns/junos new file mode 100644 index 000000000..2da91cc6c --- /dev/null +++ b/config/patterns/junos @@ -0,0 +1,8 @@ +# JUNOS 11.4 RT_FLOW patterns +RT_FLOW_EVENT (RT_FLOW_SESSION_CREATE|RT_FLOW_SESSION_CLOSE|RT_FLOW_SESSION_DENY) + +RT_FLOW1 %{RT_FLOW_EVENT:event}: %{GREEDYDATA:close-reason}: %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{IP:nat-src-ip}/%{INT:nat-src-port}->%{IP:nat-dst-ip}/%{INT:nat-dst-port} %{DATA:src-nat-rule-name} %{DATA:dst-nat-rule-name} %{INT:protocol-id} %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} %{INT:session-id} \d+\(%{DATA:sent}\) \d+\(%{DATA:received}\) %{INT:elapsed-time} .* + +RT_FLOW2 %{RT_FLOW_EVENT:event}: session created %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{IP:nat-src-ip}/%{INT:nat-src-port}->%{IP:nat-dst-ip}/%{INT:nat-dst-port} %{DATA:src-nat-rule-name} %{DATA:dst-nat-rule-name} %{INT:protocol-id} %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} %{INT:session-id} .* + +RT_FLOW3 %{RT_FLOW_EVENT:event}: session denied %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{INT:protocol-id}\(\d\) %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} .* diff --git a/config/patterns/linux-syslog b/config/patterns/linux-syslog new file mode 100644 index 000000000..0911964ee --- /dev/null +++ b/config/patterns/linux-syslog @@ -0,0 +1,16 @@ +SYSLOG5424PRINTASCII [!-~]+ + +SYSLOGBASE2 (?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) (?:%{SYSLOGFACILITY} )?%{SYSLOGHOST:logsource}+(?: %{SYSLOGPROG}:|) +SYSLOGPAMSESSION %{SYSLOGBASE} %{GREEDYDATA:message}%{WORD:pam_module}\(%{DATA:pam_caller}\): session %{WORD:pam_session_state} for user %{USERNAME:username}(?: by %{GREEDYDATA:pam_by})? + +CRON_ACTION [A-Z ]+ +CRONLOG %{SYSLOGBASE} \(%{USER:user}\) %{CRON_ACTION:action} \(%{DATA:message}\) + +SYSLOGLINE %{SYSLOGBASE2} %{GREEDYDATA:message} + +# IETF 5424 syslog(8) format (see http://www.rfc-editor.org/info/rfc5424) +SYSLOG5424PRI <%{NONNEGINT:syslog5424_pri}> +SYSLOG5424SD \[%{DATA}\]+ +SYSLOG5424BASE %{SYSLOG5424PRI}%{NONNEGINT:syslog5424_ver} +(?:%{TIMESTAMP_ISO8601:syslog5424_ts}|-) +(?:%{HOSTNAME:syslog5424_host}|-) +(-|%{SYSLOG5424PRINTASCII:syslog5424_app}) +(-|%{SYSLOG5424PRINTASCII:syslog5424_proc}) +(-|%{SYSLOG5424PRINTASCII:syslog5424_msgid}) +(?:%{SYSLOG5424SD:syslog5424_sd}|-|) + +SYSLOG5424LINE %{SYSLOG5424BASE} +%{GREEDYDATA:syslog5424_msg} \ No newline at end of file diff --git a/config/patterns/mcollective b/config/patterns/mcollective new file mode 100644 index 000000000..0389cc391 --- /dev/null +++ b/config/patterns/mcollective @@ -0,0 +1,4 @@ +# Remember, these can be multi-line events. +MCOLLECTIVE ., \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:pid}\]%{SPACE}%{LOGLEVEL:event_level} + +MCOLLECTIVEAUDIT %{TIMESTAMP_ISO8601:timestamp}: \ No newline at end of file diff --git a/config/patterns/modsecurity b/config/patterns/modsecurity new file mode 100644 index 000000000..0c614dc18 --- /dev/null +++ b/config/patterns/modsecurity @@ -0,0 +1,18 @@ +APACHEERRORTIME %{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{YEAR} +APACHEERRORPREFIX \[%{APACHEERRORTIME:timestamp}\] \[%{NOTSPACE:apacheseverity}\] (\[pid %{INT}:tid %{INT}\] )?\[client %{IPORHOST:sourcehost}(:%{INT:source_port})?\] (\[client %{IPORHOST}\])? +GENERICAPACHEERROR %{APACHEERRORPREFIX} %{GREEDYDATA:message} +MODSECPREFIX %{APACHEERRORPREFIX} ModSecurity: %{NOTSPACE:modsecseverity}\. %{GREEDYDATA:modsecmessage} +MODSECRULEFILE \[file %{QUOTEDSTRING:rulefile}\] +MODSECRULELINE \[line %{QUOTEDSTRING:ruleline}\] +MODSECMATCHOFFSET \[offset %{QUOTEDSTRING:matchoffset}\] +MODSECRULEID \[id %{QUOTEDSTRING:ruleid}\] +MODSECRULEREV \[rev %{QUOTEDSTRING:rulerev}\] +MODSECRULEMSG \[msg %{QUOTEDSTRING:rulemessage}\] +MODSECRULEDATA \[data %{QUOTEDSTRING:ruledata}\] +MODSECRULESEVERITY \[severity ["']%{WORD:ruleseverity}["']\] +MODSECRULEVERS \[ver "[^"]+"\] +MODSECRULETAGS (?:\[tag %{QUOTEDSTRING:ruletag0}\] )?(?:\[tag %{QUOTEDSTRING:ruletag1}\] )?(?:\[tag %{QUOTEDSTRING:ruletag2}\] )?(?:\[tag %{QUOTEDSTRING:ruletag3}\] )?(?:\[tag %{QUOTEDSTRING:ruletag4}\] )?(?:\[tag %{QUOTEDSTRING:ruletag5}\] )?(?:\[tag %{QUOTEDSTRING:ruletag6}\] )?(?:\[tag %{QUOTEDSTRING:ruletag7}\] )?(?:\[tag %{QUOTEDSTRING:ruletag8}\] )?(?:\[tag %{QUOTEDSTRING:ruletag9}\] )?(?:\[tag %{QUOTEDSTRING}\] )* +MODSECHOSTNAME \[hostname ['"]%{DATA:targethost}["']\] +MODSECURI \[uri ["']%{DATA:targeturi}["']\] +MODSECUID \[unique_id %{QUOTEDSTRING:uniqueid}\] +MODSECAPACHEERROR %{MODSECPREFIX} %{MODSECRULEFILE} %{MODSECRULELINE} (?:%{MODSECMATCHOFFSET} )?(?:%{MODSECRULEID} )?(?:%{MODSECRULEREV} )?(?:%{MODSECRULEMSG} )?(?:%{MODSECRULEDATA} )?(?:%{MODSECRULESEVERITY} )?(?:%{MODSECRULEVERS} )?%{MODSECRULETAGS}%{MODSECHOSTNAME} %{MODSECURI} %{MODSECUID} \ No newline at end of file diff --git a/config/patterns/mongodb b/config/patterns/mongodb new file mode 100644 index 000000000..126a2a57a --- /dev/null +++ b/config/patterns/mongodb @@ -0,0 +1,7 @@ +MONGO_LOG %{SYSLOGTIMESTAMP:timestamp} \[%{WORD:component}\] %{GREEDYDATA:message} +MONGO_QUERY \{ \{ .* \} ntoreturn: \} +MONGO_WORDDASH \b[\w-]+\b +MONGO_SLOWQUERY %{WORD} %{MONGO_WORDDASH:database}\.%{MONGO_WORDDASH:collection} %{WORD}: %{MONGO_QUERY:query} %{WORD}:%{NONNEGINT:ntoreturn} %{WORD}:%{NONNEGINT:ntoskip} %{WORD}:%{NONNEGINT:nscanned}.*nreturned:%{NONNEGINT:nreturned}..+ %{POSINT:duration}ms +MONGO3_SEVERITY \w +MONGO3_COMPONENT %{WORD}|- +MONGO3_LOG %{TIMESTAMP_ISO8601:timestamp} %{MONGO3_SEVERITY:severity} %{MONGO3_COMPONENT:component}%{SPACE}(?:\[%{DATA:context}\])? %{GREEDYDATA:message} \ No newline at end of file diff --git a/config/patterns/mysql b/config/patterns/mysql new file mode 100644 index 000000000..ab33fd8b5 --- /dev/null +++ b/config/patterns/mysql @@ -0,0 +1 @@ +MYSQL_AUTH_FAIL %{TIMESTAMP_ISO8601:time} %{NUMBER} \[Note\] Access denied for user '%{DATA:user}'@'%{IP:source_ip}' \(using password: YES\) diff --git a/config/patterns/nagios b/config/patterns/nagios new file mode 100644 index 000000000..5dcba0b96 --- /dev/null +++ b/config/patterns/nagios @@ -0,0 +1,124 @@ +################################################################################## +################################################################################## +# Chop Nagios log files to smithereens! +# +# A set of GROK filters to process logfiles generated by Nagios. +# While it does not, this set intends to cover all possible Nagios logs. +# +# Some more work needs to be done to cover all External Commands: +# http://old.nagios.org/developerinfo/externalcommands/commandlist.php +# +# If you need some support on these rules please contact: +# Jelle Smet http://smetj.net +# +################################################################################# +################################################################################# + +NAGIOSTIME \[%{NUMBER:nagios_epoch}\] + +############################################### +######## Begin nagios log types +############################################### +NAGIOS_TYPE_CURRENT_SERVICE_STATE CURRENT SERVICE STATE +NAGIOS_TYPE_CURRENT_HOST_STATE CURRENT HOST STATE + +NAGIOS_TYPE_SERVICE_NOTIFICATION SERVICE NOTIFICATION +NAGIOS_TYPE_HOST_NOTIFICATION HOST NOTIFICATION + +NAGIOS_TYPE_SERVICE_ALERT SERVICE ALERT +NAGIOS_TYPE_HOST_ALERT HOST ALERT + +NAGIOS_TYPE_SERVICE_FLAPPING_ALERT SERVICE FLAPPING ALERT +NAGIOS_TYPE_HOST_FLAPPING_ALERT HOST FLAPPING ALERT + +NAGIOS_TYPE_SERVICE_DOWNTIME_ALERT SERVICE DOWNTIME ALERT +NAGIOS_TYPE_HOST_DOWNTIME_ALERT HOST DOWNTIME ALERT + +NAGIOS_TYPE_PASSIVE_SERVICE_CHECK PASSIVE SERVICE CHECK +NAGIOS_TYPE_PASSIVE_HOST_CHECK PASSIVE HOST CHECK + +NAGIOS_TYPE_SERVICE_EVENT_HANDLER SERVICE EVENT HANDLER +NAGIOS_TYPE_HOST_EVENT_HANDLER HOST EVENT HANDLER + +NAGIOS_TYPE_EXTERNAL_COMMAND EXTERNAL COMMAND +NAGIOS_TYPE_TIMEPERIOD_TRANSITION TIMEPERIOD TRANSITION +############################################### +######## End nagios log types +############################################### + +############################################### +######## Begin external check types +############################################### +NAGIOS_EC_DISABLE_SVC_CHECK DISABLE_SVC_CHECK +NAGIOS_EC_ENABLE_SVC_CHECK ENABLE_SVC_CHECK +NAGIOS_EC_DISABLE_HOST_CHECK DISABLE_HOST_CHECK +NAGIOS_EC_ENABLE_HOST_CHECK ENABLE_HOST_CHECK +NAGIOS_EC_PROCESS_SERVICE_CHECK_RESULT PROCESS_SERVICE_CHECK_RESULT +NAGIOS_EC_PROCESS_HOST_CHECK_RESULT PROCESS_HOST_CHECK_RESULT +NAGIOS_EC_SCHEDULE_SERVICE_DOWNTIME SCHEDULE_SERVICE_DOWNTIME +NAGIOS_EC_SCHEDULE_HOST_DOWNTIME SCHEDULE_HOST_DOWNTIME +NAGIOS_EC_DISABLE_HOST_SVC_NOTIFICATIONS DISABLE_HOST_SVC_NOTIFICATIONS +NAGIOS_EC_ENABLE_HOST_SVC_NOTIFICATIONS ENABLE_HOST_SVC_NOTIFICATIONS +NAGIOS_EC_DISABLE_HOST_NOTIFICATIONS DISABLE_HOST_NOTIFICATIONS +NAGIOS_EC_ENABLE_HOST_NOTIFICATIONS ENABLE_HOST_NOTIFICATIONS +NAGIOS_EC_DISABLE_SVC_NOTIFICATIONS DISABLE_SVC_NOTIFICATIONS +NAGIOS_EC_ENABLE_SVC_NOTIFICATIONS ENABLE_SVC_NOTIFICATIONS +############################################### +######## End external check types +############################################### +NAGIOS_WARNING Warning:%{SPACE}%{GREEDYDATA:nagios_message} + +NAGIOS_CURRENT_SERVICE_STATE %{NAGIOS_TYPE_CURRENT_SERVICE_STATE:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_statetype};%{DATA:nagios_statecode};%{GREEDYDATA:nagios_message} +NAGIOS_CURRENT_HOST_STATE %{NAGIOS_TYPE_CURRENT_HOST_STATE:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_statetype};%{DATA:nagios_statecode};%{GREEDYDATA:nagios_message} + +NAGIOS_SERVICE_NOTIFICATION %{NAGIOS_TYPE_SERVICE_NOTIFICATION:nagios_type}: %{DATA:nagios_notifyname};%{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_contact};%{GREEDYDATA:nagios_message} +NAGIOS_HOST_NOTIFICATION %{NAGIOS_TYPE_HOST_NOTIFICATION:nagios_type}: %{DATA:nagios_notifyname};%{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_contact};%{GREEDYDATA:nagios_message} + +NAGIOS_SERVICE_ALERT %{NAGIOS_TYPE_SERVICE_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{NUMBER:nagios_attempt};%{GREEDYDATA:nagios_message} +NAGIOS_HOST_ALERT %{NAGIOS_TYPE_HOST_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{NUMBER:nagios_attempt};%{GREEDYDATA:nagios_message} + +NAGIOS_SERVICE_FLAPPING_ALERT %{NAGIOS_TYPE_SERVICE_FLAPPING_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_message} +NAGIOS_HOST_FLAPPING_ALERT %{NAGIOS_TYPE_HOST_FLAPPING_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_message} + +NAGIOS_SERVICE_DOWNTIME_ALERT %{NAGIOS_TYPE_SERVICE_DOWNTIME_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} +NAGIOS_HOST_DOWNTIME_ALERT %{NAGIOS_TYPE_HOST_DOWNTIME_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} + +NAGIOS_PASSIVE_SERVICE_CHECK %{NAGIOS_TYPE_PASSIVE_SERVICE_CHECK:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} +NAGIOS_PASSIVE_HOST_CHECK %{NAGIOS_TYPE_PASSIVE_HOST_CHECK:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} + +NAGIOS_SERVICE_EVENT_HANDLER %{NAGIOS_TYPE_SERVICE_EVENT_HANDLER:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{DATA:nagios_event_handler_name} +NAGIOS_HOST_EVENT_HANDLER %{NAGIOS_TYPE_HOST_EVENT_HANDLER:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{DATA:nagios_event_handler_name} + +NAGIOS_TIMEPERIOD_TRANSITION %{NAGIOS_TYPE_TIMEPERIOD_TRANSITION:nagios_type}: %{DATA:nagios_service};%{DATA:nagios_unknown1};%{DATA:nagios_unknown2} + +#################### +#### External checks +#################### + +#Disable host & service check +NAGIOS_EC_LINE_DISABLE_SVC_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_SVC_CHECK:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_service} +NAGIOS_EC_LINE_DISABLE_HOST_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_HOST_CHECK:nagios_command};%{DATA:nagios_hostname} + +#Enable host & service check +NAGIOS_EC_LINE_ENABLE_SVC_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_SVC_CHECK:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_service} +NAGIOS_EC_LINE_ENABLE_HOST_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_HOST_CHECK:nagios_command};%{DATA:nagios_hostname} + +#Process host & service check +NAGIOS_EC_LINE_PROCESS_SERVICE_CHECK_RESULT %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_PROCESS_SERVICE_CHECK_RESULT:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_check_result} +NAGIOS_EC_LINE_PROCESS_HOST_CHECK_RESULT %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_PROCESS_HOST_CHECK_RESULT:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_check_result} + +#Disable host & service notifications +NAGIOS_EC_LINE_DISABLE_HOST_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_HOST_SVC_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +NAGIOS_EC_LINE_DISABLE_HOST_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_HOST_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +NAGIOS_EC_LINE_DISABLE_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_SVC_NOTIFICATIONS:nagios_command};%{DATA:nagios_hostname};%{GREEDYDATA:nagios_service} + +#Enable host & service notifications +NAGIOS_EC_LINE_ENABLE_HOST_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_HOST_SVC_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +NAGIOS_EC_LINE_ENABLE_HOST_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_HOST_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +NAGIOS_EC_LINE_ENABLE_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_SVC_NOTIFICATIONS:nagios_command};%{DATA:nagios_hostname};%{GREEDYDATA:nagios_service} + +#Schedule host & service downtime +NAGIOS_EC_LINE_SCHEDULE_HOST_DOWNTIME %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_SCHEDULE_HOST_DOWNTIME:nagios_command};%{DATA:nagios_hostname};%{NUMBER:nagios_start_time};%{NUMBER:nagios_end_time};%{NUMBER:nagios_fixed};%{NUMBER:nagios_trigger_id};%{NUMBER:nagios_duration};%{DATA:author};%{DATA:comment} + +#End matching line +NAGIOSLOGLINE %{NAGIOSTIME} (?:%{NAGIOS_WARNING}|%{NAGIOS_CURRENT_SERVICE_STATE}|%{NAGIOS_CURRENT_HOST_STATE}|%{NAGIOS_SERVICE_NOTIFICATION}|%{NAGIOS_HOST_NOTIFICATION}|%{NAGIOS_SERVICE_ALERT}|%{NAGIOS_HOST_ALERT}|%{NAGIOS_SERVICE_FLAPPING_ALERT}|%{NAGIOS_HOST_FLAPPING_ALERT}|%{NAGIOS_SERVICE_DOWNTIME_ALERT}|%{NAGIOS_HOST_DOWNTIME_ALERT}|%{NAGIOS_PASSIVE_SERVICE_CHECK}|%{NAGIOS_PASSIVE_HOST_CHECK}|%{NAGIOS_SERVICE_EVENT_HANDLER}|%{NAGIOS_HOST_EVENT_HANDLER}|%{NAGIOS_TIMEPERIOD_TRANSITION}|%{NAGIOS_EC_LINE_DISABLE_SVC_CHECK}|%{NAGIOS_EC_LINE_ENABLE_SVC_CHECK}|%{NAGIOS_EC_LINE_DISABLE_HOST_CHECK}|%{NAGIOS_EC_LINE_ENABLE_HOST_CHECK}|%{NAGIOS_EC_LINE_PROCESS_HOST_CHECK_RESULT}|%{NAGIOS_EC_LINE_PROCESS_SERVICE_CHECK_RESULT}|%{NAGIOS_EC_LINE_SCHEDULE_HOST_DOWNTIME}|%{NAGIOS_EC_LINE_DISABLE_HOST_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_HOST_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_DISABLE_HOST_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_HOST_NOTIFICATIONS}|%{NAGIOS_EC_LINE_DISABLE_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_SVC_NOTIFICATIONS}) \ No newline at end of file diff --git a/config/patterns/nginx b/config/patterns/nginx new file mode 100644 index 000000000..92982fc80 --- /dev/null +++ b/config/patterns/nginx @@ -0,0 +1,19 @@ +NGUSERNAME [a-zA-Z\.\@\-\+_%]+ +NGUSER %{NGUSERNAME} + +# '$remote_addr - $remote_user [$time_local] ' +# '"$request" $status $body_bytes_sent ' +# '"$http_referer" "$http_user_agent"'; + +# 127.0.0.1 - - [28/Jan/2016:14:19:36 +0300] "GET /zero.html HTTP/1.1" 200 398 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36" + +NOTDQUOTE [^"]* +DAY2 \d{2} + +#NGINXERRTIME %{YEAR:year}/%{MONTHNUM2:month}/%{DAY2:day} %{HOUR:hour}:%{MINUTE:minute}:%{SECOND:second} +NGINXERRTIME %{YEAR}/%{MONTHNUM2}/%{DAY2} %{HOUR}:%{MINUTE}:%{SECOND} + +NGINXACCESS %{IPORHOST:remote_addr} - %{NGUSER:remote_user} \[%{HTTPDATE:time_local}\] "%{WORD:method} %{URIPATHPARAM:request} HTTP/%{NUMBER:http_version}" %{NUMBER:status} %{NUMBER:body_bytes_sent} "%{NOTDQUOTE:http_referer}" "%{NOTDQUOTE:http_user_agent}" + +# YYYY/MM/DD HH:MM:SS [LEVEL] PID#TID: *CID MESSAGE +NGINXERROR %{NGINXERRTIME:time} \[%{LOGLEVEL:loglevel}\] %{NONNEGINT:pid}#%{NONNEGINT:tid}: (\*%{NONNEGINT:cid} )?%{GREEDYDATA:message} diff --git a/config/patterns/paths b/config/patterns/paths new file mode 100644 index 000000000..a4f019491 --- /dev/null +++ b/config/patterns/paths @@ -0,0 +1,14 @@ + +#DIR ^.*/ +#FILE [^/].*$ + +#URI_SPLIT ^%{GREEDYDATA:request}\?%{GREEDYDATA:http_args}$ +#FULLPATH_SPLITTER %{DIR:prefix_directory}%{FILE:file_name} + + +NAXSI_FMT ^NAXSI_FMT: ip=%{IPORHOST:src_ip}&server=%{IPORHOST:target_ip}&uri=%{PATH:http_path}&learning=\d&vers=%{DATA:naxsi_version}&total_processed=\d+&total_blocked=\d+&block=\d+(&cscore\d=%{WORD:score_label}&score\d=%{INT:score})+&zone0=%{WORD:zone} +#^NAXSI_FMT: ip=%{IPORHOST:src_ip}&server=%{IPORHOST:target_ip}&uri=%{PATH:http_path}&learning=\d&vers=%{DATA:naxsi_version}&total_processed=\d+&total_blocked=\d+&block=\d+(&cscore\d=%{WORD:score_label}&score\d=%{INT:score})+&cscore2 +#^NAXSI_FMT: ip=%{IPORHOST:src_ip}&server=%{IPORHOST:target_ip}&uri=%{PATH:http_path}(&cscore\d=%{WORD:score_label}&score\d=%{INT:score})+&cscore2 +#^NAXSI_FMT: ip=%{IPORHOST:src_ip}&server=%{IPORHOST:target_ip}&uri=%{PATH:http_path}&learning=\d&vers=%{DATA:naxsi_version}&total_processed=\d+&total_blocked=\d+&block=\d+(&cscore\d=%{WORD:score_label}&score\d=%{INT:score})+&cscore2 + +NAXSI_EXLOG ^NAXSI_EXLOG: ip=%{IPORHOST:naxsi_src_ip}&server=%{IPORHOST:naxsi_dst_ip}&uri=%{PATH:http_path}&id=%{INT:naxsi_id}&zone=%{WORD:naxsi_zone}&var_name=%{DATA:naxsi_var_name}&content= diff --git a/config/patterns/postgresql b/config/patterns/postgresql new file mode 100644 index 000000000..6d2b984d1 --- /dev/null +++ b/config/patterns/postgresql @@ -0,0 +1,2 @@ +# Default postgresql pg_log format pattern +POSTGRESQL %{DATESTAMP:timestamp} %{TZ} %{DATA:user_id} %{GREEDYDATA:connection_id} %{POSINT:pid} \ No newline at end of file diff --git a/config/patterns/rails b/config/patterns/rails new file mode 100644 index 000000000..04e4c56b5 --- /dev/null +++ b/config/patterns/rails @@ -0,0 +1,18 @@ +RUUID \s{32} +# rails controller with action +RAILS_CONSTROLLER [^#]+ +RAIL_ACTION \w+ +RCONTROLLER %{RAILS_CONSTROLLER:controller}#%{RAIL_ACTION:action} + +# this will often be the only line: +RAILS_TIMESTAMP %{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{ISO8601_TIMEZONE} +RAILS3HEAD (?m)Started %{WORD:verb} "%{URIPATHPARAM:request}" for %{IPORHOST:clientip} at %{RAILS_TIMESTAMP:timestamp} +# for some a strange reason, params are stripped of {} - not sure that's a good idea. +RPROCESSING \W*Processing by %{RCONTROLLER} as %{NOTSPACE:format}(?:\W*Parameters: {%{DATA:params}}\W*)? +RAILS3PROFILE (?:\(Views: %{NUMBER:viewms}ms \| ActiveRecord: %{NUMBER:activerecordms}ms|\(ActiveRecord: %{NUMBER:activerecordms}ms)? +RAILS3FOOT Completed %{NUMBER:response}%{DATA} in %{NUMBER:totalms}ms %{RAILS3PROFILE}%{GREEDYDATA} + +RAILS_CONTEXT (?:%{DATA}\n)* + +# putting it all together +RAILS3 %{RAILS3HEAD}(?:%{RPROCESSING})?%{RAILS_CONTEXT:context}(?:%{RAILS3FOOT})? \ No newline at end of file diff --git a/config/patterns/redis b/config/patterns/redis new file mode 100644 index 000000000..6a005a86a --- /dev/null +++ b/config/patterns/redis @@ -0,0 +1,21 @@ + +# +# Format 1: +# +# [43569] 27 Aug 12:38:58.471 * RDB: 12 MB of memory used by copy-on-write +# + +# +# Format 2: +# +# 31493:M 17 Sep 09:02:54.807 # Server started, Redis version 3.0.2 +# 31493:M 17 Sep 09:02:54.807 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm$ +# 31493:M 17 Sep 09:02:54.807 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. +# 31493:M 17 Sep 09:02:54.807 * DB loaded from disk: 0.000 seconds +# 31493:M 17 Sep 09:02:54.807 * The server is now ready to accept connections on port 6379 +# + +REDISTIMESTAMP %{MONTHDAY} %{MONTH} %{TIME} +REDISLOG \[%{POSINT:pid}\] %{REDISTIMESTAMP:time} \*\s +REDISLOG1 %{REDISLOG} +REDISLOG2 %{POSINT:pid}:M %{REDISTIMESTAMP:time} [*#] %{GREEDYDATA:message} \ No newline at end of file diff --git a/config/patterns/ruby b/config/patterns/ruby new file mode 100644 index 000000000..845ba0db0 --- /dev/null +++ b/config/patterns/ruby @@ -0,0 +1,2 @@ +RUBY_LOGLEVEL DEBUG|FATAL|ERROR|WARN|INFO +RUBY_LOGGER [DFEWI], \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:pid}\] *%{RUBY_LOGLEVEL:loglevel} -- +%{DATA:progname}: %{GREEDYDATA:message} \ No newline at end of file diff --git a/config/patterns/smb b/config/patterns/smb new file mode 100644 index 000000000..38b1f4d8c --- /dev/null +++ b/config/patterns/smb @@ -0,0 +1 @@ +SMB_AUTH_FAIL Auth:%{GREEDYDATA} user \[%{DATA:smb_domain}\]\\\[%{DATA:user}\]%{GREEDYDATA} status \[NT_STATUS_NO_SUCH_USER\]%{GREEDYDATA} remote host \[ipv4:%{IP:ip_source} \ No newline at end of file diff --git a/config/patterns/ssh b/config/patterns/ssh new file mode 100644 index 000000000..4fd5b0d07 --- /dev/null +++ b/config/patterns/ssh @@ -0,0 +1,62 @@ +# sshd grok pattern + +# Start/Stop +SSHD_LISTEN Server listening on %{IP:sshd_listen_ip} port %{NUMBER:sshd_listen_port}. +SSHD_TERMINATE Received signal %{NUMBER:sshd_signal}; terminating. + +# SSH Tunnel +SSHD_TUNN_ERR1 error: connect_to %{IP:sshd_listen_ip} port %{NUMBER:sshd_listen_port}: failed. +SSHD_TUNN_ERR2 error: channel_setup_fwd_listener: cannot listen to port: %{NUMBER:sshd_listen_port} +SSHD_TUNN_ERR3 error: bind: Address already in use +SSHD_TUNN_ERR4 error: channel_setup_fwd_listener_tcpip: cannot listen to port: %{NUMBER:sshd_listen_port} +SSHD_TUNN_TIMEOUT Timeout, client not responding. + +# Normal +SSHD_SUCCESS Accepted %{WORD:sshd_auth_type} for %{USERNAME:sshd_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol}: %{GREEDYDATA:sshd_cipher} +SSHD_DISCONNECT Received disconnect from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:%{NUMBER:sshd_disconnect_code}: %{GREEDYDATA:sshd_disconnect_status} +SSHD_CONN_CLOSE Connection closed by %{IP:sshd_client_ip}$ +SSHD_SESSION_OPEN pam_unix\(sshd:session\): session opened for user %{USERNAME:sshd_user} by \(uid=\d+\) +SSHD_SESSION_CLOSE pam_unix\(sshd:session\): session closed for user %{USERNAME:sshd_user} +SSHD_SESSION_FAIL pam_systemd\(sshd:session\): Failed to release session: %{GREEDYDATA:sshd_disconnect_status} +SSHD_LOGOUT_ERR syslogin_perform_logout: logout\(\) returned an error + +# Probe +SSHD_REFUSE_CONN refused connect from %{DATA:sshd_client_hostname} \(%{IPORHOST:sshd_client_ip}\) +SSHD_TCPWRAP_FAIL1 warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: can't verify hostname: getaddrinfo\(%{DATA:sshd_paranoid_hostname}, %{DATA:sshd_sa_family}\) failed +SSHD_TCPWRAP_FAIL2 warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: host name/address mismatch: %{IPORHOST:sshd_client_ip} != %{HOSTNAME:sshd_paranoid_hostname} +SSHD_TCPWRAP_FAIL3 warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: host name/name mismatch: %{HOSTNAME:sshd_paranoid_hostname_1} != %{HOSTNAME:sshd_paranoid_hostname_2} +SSHD_TCPWRAP_FAIL4 warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: host name/name mismatch: reverse lookup results in non-FQDN %{HOSTNAME:sshd_paranoid_hostname} +SSHD_TCPWRAP_FAIL5 warning: can't get client address: Connection reset by peer +SSHD_FAIL Failed %{WORD:sshd_auth_type} for %{USERNAME:sshd_invalid_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol} +SSHD_USER_FAIL Failed password for invalid user %{USERNAME:sshd_invalid_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol} +SSHD_INVAL_USER Invalid user\s*%{USERNAME:sshd_invalid_user}? from %{IP:sshd_client_ip} + +# preauth +SSHD_DISC_PREAUTH Disconnected from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_RECE_PREAUTH (?:error: |)Received disconnect from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:%{NUMBER:sshd_disconnect_code}: %{GREEDYDATA:sshd_disconnect_status}? \[%{GREEDYDATA:sshd_privsep}\] +SSHD_MAXE_PREAUTH error: maximum authentication attempts exceeded for (?:invalid user |)%{USERNAME:sshd_invalid_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_DISR_PREAUTH Disconnecting: %{GREEDYDATA:sshd_disconnect_status} \[%{GREEDYDATA:sshd_privsep}\] +SSHD_INVA_PREAUTH input_userauth_request: invalid user %{USERNAME:sshd_invalid_user}?\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_REST_PREAUTH Connection reset by %{IP:sshd_client_ip} port %{NUMBER:sshd_port}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_CLOS_PREAUTH Connection closed by %{IP:sshd_client_ip} port %{NUMBER:sshd_port}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_FAIL_PREAUTH fatal: Unable to negotiate with %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:\s*%{GREEDYDATA:sshd_disconnect_status}? \[%{GREEDYDATA:sshd_privsep}\] +SSHD_FAI2_PREAUTH fatal: %{GREEDYDATA:sshd_fatal_status}: Connection from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:\s*%{GREEDYDATA:sshd_disconnect_status}? \[%{GREEDYDATA:sshd_privsep}\] +SSHD_BADL_PREAUTH Bad packet length %{NUMBER:sshd_packet_length}. \[%{GREEDYDATA:sshd_privsep}\] + +# Corrupted +SSHD_IDENT_FAIL Did not receive identification string from %{IP:sshd_client_ip} +SSHD_MAPB_FAIL Address %{IP:sshd_client_ip} maps to %{HOSTNAME:sshd_client_hostname}, but this does not map back to the address - POSSIBLE BREAK-IN ATTEMPT! +SSHD_RMAP_FAIL reverse mapping checking getaddrinfo for %{HOSTNAME:sshd_client_hostname} \[%{IP:sshd_client_ip}\] failed - POSSIBLE BREAK-IN ATTEMPT! +SSHD_TOOMANY_AUTH Disconnecting: Too many authentication failures for %{USERNAME:sshd_invalid_user} +SSHD_CORRUPT_MAC Corrupted MAC on input +SSHD_PACKET_CORRUPT Disconnecting: Packet corrupt +SSHD_BAD_VERSION Bad protocol version identification '%{GREEDYDATA}' from %{IP:sshd_client_ip} + +#### +SSHD_INIT %{SSHD_LISTEN}|%{SSHD_TERMINATE} +SSHD_TUNN %{SSHD_TUNN_ERR1}|%{SSHD_TUNN_ERR2}|%{SSHD_TUNN_ERR3}|%{SSHD_TUNN_ERR4}|%{SSHD_TUNN_TIMEOUT} +SSHD_NORMAL_LOG %{SSHD_SUCCESS}|%{SSHD_DISCONNECT}|%{SSHD_CONN_CLOSE}|%{SSHD_SESSION_OPEN}|%{SSHD_SESSION_CLOSE}|%{SSHD_SESSION_FAIL}|%{SSHD_LOGOUT_ERR} +SSHD_PROBE_LOG %{SSHD_REFUSE_CONN}|%{SSHD_TCPWRAP_FAIL1}|%{SSHD_TCPWRAP_FAIL2}|%{SSHD_TCPWRAP_FAIL3}|%{SSHD_TCPWRAP_FAIL4}|%{SSHD_TCPWRAP_FAIL5}|%{SSHD_FAIL}|%{SSHD_USER_FAIL}|%{SSHD_INVAL_USER} +SSHD_PREAUTH %{SSHD_DISC_PREAUTH}|%{SSHD_RECE_PREAUTH}|%{SSHD_MAXE_PREAUTH}|%{SSHD_DISR_PREAUTH}|%{SSHD_INVA_PREAUTH}|%{SSHD_REST_PREAUTH}|%{SSHD_FAIL_PREAUTH}|%{SSHD_CLOS_PREAUTH}|%{SSHD_FAI2_PREAUTH}|%{SSHD_BADL_PREAUTH} +SSHD_CORRUPTED %{SSHD_IDENT_FAIL}|%{SSHD_MAPB_FAIL}|%{SSHD_RMAP_FAIL}|%{SSHD_TOOMANY_AUTH}|%{SSHD_CORRUPT_MAC}|%{SSHD_PACKET_CORRUPT}|%{SSHD_BAD_VERSION} +SSHD_LOG %{SSHD_INIT}|%{SSHD_NORMAL_LOG}|%{SSHD_PROBE_LOG}|%{SSHD_CORRUPTED}|%{SSHD_TUNN}|%{SSHD_PREAUTH} diff --git a/config/patterns/tcpdump b/config/patterns/tcpdump new file mode 100644 index 000000000..8c7610544 --- /dev/null +++ b/config/patterns/tcpdump @@ -0,0 +1 @@ +TCPDUMP_OUTPUT %{GREEDYDATA:timestamp} IP %{IPORHOST:source_ip}\.%{INT:source_port} > %{IPORHOST:dest_ip}\.%{INT:dest_port}: Flags \[%{GREEDYDATA:tcpflags}\], seq diff --git a/config/plugins/backend/sqlite.yaml b/config/plugins/backend/sqlite.yaml new file mode 100644 index 000000000..c4250c15f --- /dev/null +++ b/config/plugins/backend/sqlite.yaml @@ -0,0 +1,5 @@ +name: sqlite +path: /var/lib/crowdsec/plugins/backend/sqlite.so +config: + db_path: /var/lib/crowdsec/data/crowdsec.db + flush: true diff --git a/config/prod.yaml b/config/prod.yaml new file mode 100644 index 000000000..e1dce77c6 --- /dev/null +++ b/config/prod.yaml @@ -0,0 +1,16 @@ +working_dir: /tmp/ +data_dir: ${DATA} +config_dir: ${CFG} +pid_dir: ${PID} +log_dir: /var/log/ +log_mode: file +log_level: info +profiling: false +sqlite_path: ${DATA}/crowdsec.db +apimode: true +daemon: true +prometheus: true +#for prometheus agent / golang debugging +http_listen: 127.0.0.1:6060 +plugin: + backend: "/etc/crowdsec/plugins/backend" diff --git a/config/profiles.yaml b/config/profiles.yaml new file mode 100644 index 000000000..d0cc48d47 --- /dev/null +++ b/config/profiles.yaml @@ -0,0 +1,18 @@ +profile: default_remediation +filter: "sig.Labels.remediation == 'true'" +api: true # If no api: specified, will use the default config in default.yaml +remediation: + ban: true + slow: true + captcha: true + duration: 4h +outputs: + - plugin: sqlite +--- +profile: default_notification +filter: "sig.Labels.remediation != 'true'" +#remediation is empty, it means non taken +api: false +outputs: + - plugin: sqlite # If we do not want to push, we can remove this line and the next one + store: false diff --git a/data/GeoLite2-ASN.mmdb b/data/GeoLite2-ASN.mmdb new file mode 100644 index 000000000..8d16066cc Binary files /dev/null and b/data/GeoLite2-ASN.mmdb differ diff --git a/data/GeoLite2-City.mmdb b/data/GeoLite2-City.mmdb new file mode 100644 index 000000000..6ab05184b Binary files /dev/null and b/data/GeoLite2-City.mmdb differ diff --git a/doc/img/crowdwatch-global.png b/doc/img/crowdwatch-global.png new file mode 100644 index 000000000..a8a773e5e Binary files /dev/null and b/doc/img/crowdwatch-global.png differ diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 000000000..a7382d33c --- /dev/null +++ b/docker/README.md @@ -0,0 +1,32 @@ +# Crowdwatch with docker + + +## Getting Started + +Go in the main folder of crowdsec (if you are in the folder `docker/` please `cd ..`) + + +- Build the docker image + +``` +docker build -t crowdsec . +``` + + +- Run the docker + + +``` +docker run -d -p 514:514 --name crowdsec -v /var/run/crowdsec/crowdsec.db:/var/run/crowdsec/crowdsec.db crowdsec +``` + +:warning: Be sure that your ban plugin will get decision from the db located in `/var/run/crowdsec/crowdsec.db` on your host. + + + + +## TODO: + + - Be sure that bans are applied on the host + - Check that the sqlite db is created by crowdsec in the docker and read by the ban plugin on the host + - Forward traffic to the docker syslog (127.0.0.1:514) and check that logs are correctly parsed diff --git a/docker/acquis.yaml b/docker/acquis.yaml new file mode 100644 index 000000000..a1892ba64 --- /dev/null +++ b/docker/acquis.yaml @@ -0,0 +1,4 @@ +filenames: + - /var/log/syslog +labels: + type: syslog \ No newline at end of file diff --git a/docker/docker.yaml b/docker/docker.yaml new file mode 100644 index 000000000..542c25306 --- /dev/null +++ b/docker/docker.yaml @@ -0,0 +1,15 @@ +working_dir: /tmp/ +data_dir: /var/run/crowdsec/ +config_dir: /etc/crowdsec +pid_dir: /var/run/ +log_dir: /var/log/ +log_mode: stdout +log_level: debug +profiling: false +sqlite_path: /var/run/crowdsec/crowdsec.db +apimode: false +daemon: false +prometheus: true +#for prometheus agent / golang debugging +http_listen: 127.0.0.1:6060 + diff --git a/docker/rsyslog.conf b/docker/rsyslog.conf new file mode 100644 index 000000000..f031ba715 --- /dev/null +++ b/docker/rsyslog.conf @@ -0,0 +1,44 @@ +################# +#### MODULES #### +################# +# Uncomment out the following lines and comment out everything else to get debugging for variables +# *.* /var/log/debugfmt;RSYSLOG_DebugFormat + +module(load="imudp") +module(load="imtcp") + +########################### +#### GLOBAL DIRECTIVES #### +########################### + +# +# Use traditional timestamp format. +# To enable high precision timestamps, comment out the following line. +# +$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat + +# +# Where to place spool and state files +# +$WorkDirectory /var/spool/rsyslog + +# +# Include all config files in /etc/rsyslog.d/ +# +$IncludeConfig /etc/rsyslog.d/*.conf +global(net.enableDNS="off") + +########################### +# Input Parameters +########################### + +input(type="imtcp" port="514") +input(type="imudp" port="514") + +########################### +#### Central log stuff #### +########################### + +$template RemoteLogs, "/var/log/syslog" +*.* ?RemoteLogs +& ~ \ No newline at end of file diff --git a/docker/wrapper.sh b/docker/wrapper.sh new file mode 100644 index 000000000..40efc9c5c --- /dev/null +++ b/docker/wrapper.sh @@ -0,0 +1,42 @@ +#!/bin/sh + +CROWDSEC="crowdsec" +SYSLOG_NG="rsyslogd" + + +# Start the second process +rsyslogd -n -f /etc/rsyslog.conf & +status=$? +if [ $status -ne 0 ]; then + echo "Failed to start ${SYSLOG_NG}: $status" + exit $status +fi + + +# Start the first process +/usr/local/bin/crowdsec -c /etc/crowdsec/docker.yaml & +status=$? +if [ $status -ne 0 ]; then + echo "Failed to start ${CROWDSEC}: $status" + exit $status +fi + + +# Naive check runs checks once a minute to see if either of the processes exited. +# This illustrates part of the heavy lifting you need to do if you want to run +# more than one service in a container. The container exits with an error +# if it detects that either of the processes has exited. +# Otherwise it loops forever, waking up every 60 seconds + +while sleep 60; do + ps aux |grep ${CROWDSEC} |grep -q -v grep + PROCESS_1_STATUS=$? + ps aux |grep ${SYSLOG_NG} |grep -q -v grep + PROCESS_2_STATUS=0 + # If the greps above find anything, they exit with 0 status + # If they are not both 0, then something is wrong + if [ $PROCESS_1_STATUS -ne 0 -o $PROCESS_2_STATUS -ne 0 ]; then + echo "One of the processes has already exited." + exit 1 + fi +done \ No newline at end of file diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..f8f7c04d9 --- /dev/null +++ b/go.mod @@ -0,0 +1,47 @@ +module github.com/crowdsecurity/crowdsec + +go 1.13 + +require ( + github.com/Microsoft/go-winio v0.4.14 // indirect + github.com/antonmedv/expr v1.8.2 + github.com/containerd/containerd v1.3.4 // indirect + github.com/davecgh/go-spew v1.1.1 + github.com/denisbrodbeck/machineid v1.0.1 + github.com/dghubble/sling v1.3.0 + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/docker v17.12.0-ce-rc1.0.20200419140219-55e6d7d36faf+incompatible + github.com/docker/go-connections v0.4.0 + github.com/docker/go-units v0.4.0 // indirect + github.com/enescakir/emoji v1.0.0 + github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e + github.com/hashicorp/go-version v1.2.0 + github.com/jamiealquiza/tachymeter v2.0.0+incompatible + github.com/jinzhu/gorm v1.9.12 + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/logrusorgru/grokky v0.0.0-20180829062225-47edf017d42c + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mattn/go-sqlite3 v2.0.3+incompatible + github.com/nxadm/tail v1.4.4 + github.com/olekukonko/tablewriter v0.0.4 + github.com/opencontainers/go-digest v1.0.0-rc1 // indirect + github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/oschwald/geoip2-golang v1.4.0 + github.com/oschwald/maxminddb-golang v1.6.0 + github.com/prometheus/client_golang v1.5.1 + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/common v0.9.1 + github.com/prometheus/prom2json v1.3.0 + github.com/sevlyar/go-daemon v0.1.5 + github.com/sirupsen/logrus v1.5.0 + github.com/spf13/cobra v0.0.7 + golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect + golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 + golang.org/x/time v0.0.0-20191024005414-555d28b269f0 + golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 + gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 + gopkg.in/yaml.v2 v2.2.8 +) + +replace golang.org/x/time/rate => github.com/crowdsecurity/crowdsec/pkg/time/rate v0.0.0 diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..3a0b4d69a --- /dev/null +++ b/go.sum @@ -0,0 +1,318 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antonmedv/expr v1.8.2 h1:BfkVHGudYqq7jp3Ji33kTn+qZ9D19t/Mndg0ag/Ycq4= +github.com/antonmedv/expr v1.8.2/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/containerd/containerd v1.3.4 h1:3o0smo5SKY7H6AJCmJhsnCjR2/V2T8VmiHt7seN2/kI= +github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisbrodbeck/machineid v1.0.1 h1:geKr9qtkB876mXguW2X6TU4ZynleN6ezuMSRhl4D7AQ= +github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI= +github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dghubble/sling v1.3.0 h1:pZHjCJq4zJvc6qVQ5wN1jo5oNZlNE0+8T/h0XeXBUKU= +github.com/dghubble/sling v1.3.0/go.mod h1:XXShWaBWKzNLhu2OxikSNFrlsvowtz4kyRuXUG7oQKY= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo= +github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v17.12.0-ce-rc1.0.20200419140219-55e6d7d36faf+incompatible h1:QDfFL8R7RwJTBJKkJuSNGs0SYjyxnwikV/kojSTLi2E= +github.com/docker/docker v17.12.0-ce-rc1.0.20200419140219-55e6d7d36faf+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog= +github.com/enescakir/emoji v1.0.0/go.mod h1:Bt1EKuLnKDTYpLALApstIkAjdDrS/8IAgTkKp+WKFD0= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e h1:XmA6L9IPRdUr28a+SK/oMchGgQy159wvzXA5tJ7l+40= +github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e/go.mod h1:AFIo+02s+12CEg8Gzz9kzhCbmbq6JcKNrhHffCGA9z4= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jamiealquiza/tachymeter v2.0.0+incompatible h1:mGiF1DGo8l6vnGT8FXNNcIXht/YmjzfraiUprXYwJ6g= +github.com/jamiealquiza/tachymeter v2.0.0+incompatible/go.mod h1:Ayf6zPZKEnLsc3winWEXJRkTBhdHo58HODAu1oFJkYU= +github.com/jinzhu/gorm v1.9.12 h1:Drgk1clyWT9t9ERbzHza6Mj/8FY/CqMyVzOiHviMo6Q= +github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/logrusorgru/grokky v0.0.0-20180829062225-47edf017d42c h1:S3P1IbG7Z7V2p9juEttr1oRwozZd2kxw+RQiYBYB1wQ= +github.com/logrusorgru/grokky v0.0.0-20180829062225-47edf017d42c/go.mod h1:YnDG6D6tn35XF4NJXUtoqoC84FYlBPao8PZ8QzN4Zxo= +github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= +github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/oschwald/geoip2-golang v1.4.0 h1:5RlrjCgRyIGDz/mBmPfnAF4h8k0IAcRv9PvrpOfz+Ug= +github.com/oschwald/geoip2-golang v1.4.0/go.mod h1:8QwxJvRImBH+Zl6Aa6MaIcs5YdlZSTKtzmPGzQqi9ng= +github.com/oschwald/maxminddb-golang v1.6.0 h1:KAJSjdHQ8Kv45nFIbtoLGrGWqHFajOIm7skTyz/+Dls= +github.com/oschwald/maxminddb-golang v1.6.0/go.mod h1:DUJFucBg2cvqx42YmDa/+xHvb0elJtOm3o4aFQ/nb/w= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y= +github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= +github.com/sevlyar/go-daemon v0.1.5 h1:Zy/6jLbM8CfqJ4x4RPr7MJlSKt90f00kNM1D401C+Qk= +github.com/sevlyar/go-daemon v0.1.5/go.mod h1:6dJpPatBT9eUwM5VCw9Bt6CdX9Tk6UWvhW3MebLDRKE= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= +github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e h1:3Dzrrxi54Io7Aoyb0PYLsI47K2TxkRQg+cqUn+m04do= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/logs/naxsi_trigger b/logs/naxsi_trigger new file mode 100644 index 000000000..d8f610aae --- /dev/null +++ b/logs/naxsi_trigger @@ -0,0 +1 @@ +2018-04-27T15:46:50+02:00 rp-ch-01 nginx: 2018/04/27 15:46:50 [error] 20329#0: *81170632 NAXSI_EXLOG: ip=191.154.37.115&server=cogedis.trustelem.com&uri=/app/55773/sso&id=10091&zone=ARGS&var_name=signature&content=gTyxddzKMBjOQ6iiNXsauWKyznrWzgzobNS5L226v23%2BSvh0z8uKrZbErckzPs7sF1Yif/T9P1O2Fmm05mSu1%2BL/TBAt1G2JsDv2%2B0zp2blECZFMMTfpgcyIeITDgh8HGM5GR9K2diB6/d1g5yShZs6Vm9%2BMCtXVO4gfpFwH4sSM7jbjU5xbShmiKkYNn3O8f3ZAdnZpk3%2BELVcODIGWwhRuN9Hy6agMirzx4PMTUWcDmdnB9W4iDcV/k28xnxuBE0vNw1JAL9sOSqrBnzqKk%2BUx9kt9hfEofvDYPvLfWiU56oEd8yzT1fEn21dzA6BcOCetzYoNjSdYDreKQm4O%2BVAgn90WKjvcORK%2BO3CkPR5%2B9N4d1hMLc10ZrKps4iHiJMG%2BRHvzBxL3yeYGdmdjX%2Bf6ZKjPkI3dTwP9379Wong0/DZ4BQ8ZC6SozID68PXybKynOGauaUxKCt3y3fAXSLH1Qtcl70kVQ9eQa1q%2B%2BZxujCGJ33sVl6ps10iLn2lYoJ85CAXCk%2B7p%2BMKOQzwGaFUBuVMgVbxATRQPnCN%2BHPymQ23LwWtKQbvRtJpahyPR9Yb6mUbf7JO1H2XF6%2BsPp4pcIZqv/SwJlgxSkPT5ehnJjLUhVIFu6SGlau1C0B/LUgHoZ8c%2Bkoy%2BfzzPqQPO2I1Y5SXFWwFPU6dbBgz1p%2BQ=, client: 77.136.47.223, server: www.trustelem.com, request: "GET /app/55773/sso?SAMLRequest=fZJbc6owFIX%2FCpN3NCJUZIqdtHihglfU2hcmjRGwQDAJaPvrD%2Bpxpuc8dM%2FkIbP3WiuX7%2FHpnKVKRblIWG6DVgMCheaE7ZI8ssEqGKgmeOo9CpylhYVKGecLeiypkEqty4V1bdig5LnFsEiEleOMCksSa4l8z9Ia0Co4k4ywFChICMplHfTCclFmlC8prxJCVwvPBrGUhbCazWRHsSopiXOWsiihopF9NQROqdgzTmiDsOxJMBtCxzDhtWbaNgKKUx8qybG83uNuRlhEd4loSF4KSVOaXeRNXBRNw%2Bh02k0hGFBcxwah9oLq2kzf1PMG%2BX3zNAmik%2B%2Bgy4Lz7094abe8aDMIk%2B3gIYz7zmrGzYU26n8Rrnn7c3beIndjurm63Q2HqTg%2Ff3M1LeHSgL67LraTKD6ij5ggPVjrHwjiKqlN8cP3J0F9nfnF4ICNlbtIzdepF3jxpDIO%2BxF3dv336t1cqN0Xz5fz1f4Ai7QfszOVejUMsoOero9V130bw8ioxsjcxQe9%2B6qy6tBpif0Yh1lZlGietsnpzRkQj0WOxK%2BeHh4jDTPzxMQUr8LhKFTna6KNfX5oLRblftyuw4elQMOQH1MXn7OsTVD9WkKU1M2FxLm0gQZbpgp1VesELcPSHyy929DbnXegzP5%2B%2B3OS32D6jZGP25CwRkEwU2fTZQCU9R3KegDcELSu4fwHe7%2Fb4jtwoHcn4iL6D6fH5g%2Fv3m33L%2By9Pw%3D%3D&RelayState=%2Fa085800002amsSg&SigAlg=http%3A%2F%2Fwww.w3.org%2F2001%2F04%2Fxmldsig-more%23rsa-sha256&Signature=gTyxddzKMBjOQ6iiNXsauWKyznrWzgzobNS5L226v23%2BSvh0z8uKrZbErckzPs7sF1Yif%2FT9P1O2Fmm05mSu1%2BL%2FTBAt1G2JsDv2%2B0zp2blECZFMMTfpgcyIeITDgh8HGM5GR9K2diB6%2Fd1g5yShZs6Vm9%2BMCt diff --git a/logs/ssh_bf b/logs/ssh_bf new file mode 100644 index 000000000..1d5890df4 --- /dev/null +++ b/logs/ssh_bf @@ -0,0 +1,22 @@ +2018-02-07T18:00:06+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2 +2018-02-07T18:00:09+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2 +2018-02-07T18:00:12+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2 +2018-02-07T18:00:12+01:00 eqx10863 sshd[13934]: Disconnecting: Too many authentication failures for root from 192.168.13.38 port 39596 ssh2 [preauth] +2018-02-07T18:00:21+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:23+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:26+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:29+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Disconnecting: Too many authentication failures for root from 192.168.13.38 port 2377 ssh2 [preauth] +2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: PAM 5 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=192.168.13.38 user=root +2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:32+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:32+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:33+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 diff --git a/pkg/acquisition/file_reader.go b/pkg/acquisition/file_reader.go new file mode 100644 index 000000000..c786278f1 --- /dev/null +++ b/pkg/acquisition/file_reader.go @@ -0,0 +1,320 @@ +package acquisition + +import ( + "bufio" + "compress/gzip" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "strings" + + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/types" + + tomb "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" + + //"log" + "path/filepath" + "time" + + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" + + "github.com/nxadm/tail" +) + +type Acquisition interface { + Init(map[string]interface{}) (interface{}, error) + ReadOne(interface{}) (string, error) +} + +type FileCtx struct { + Type string `yaml:"type,omitempty"` //file|bin|... + Mode string `yaml:"mode,omitempty"` //tail|cat|... + Filename string `yaml:"filename,omitempty"` + Filenames []string `yaml:"filenames,omitempty"` + tail *tail.Tail + + Labels map[string]string `yaml:"labels,omitempty"` + Profiling bool `yaml:"profiling,omitempty"` +} + +type FileAcquisCtx struct { + Files []FileCtx + Profiling bool +} + +const ( + TAILMODE = "tail" + CATMODE = "cat" +) + +const ( + FILETYPE = "file" + BINTYPE = "bin" +) + +var ReaderHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_reader_hits", + Help: "How many lines where read.", + }, + []string{"source"}, +) + +func InitReader(cfg string) (*FileAcquisCtx, error) { + var files []FileCtx + + yamlFile, err := os.Open(cfg) + if err != nil { + log.Errorf("Can't access acquisition configuration file with '%v'.", err) + return nil, err + } + //process the yaml + dec := yaml.NewDecoder(yamlFile) + dec.SetStrict(true) + for { + t := FileCtx{} + err = dec.Decode(&t) + if err != nil { + if err == io.EOF { + log.Tracef("End of yaml file") + break + } + log.Fatalf("Error decoding acquisition configuration file with '%s': %v", cfg, err) + break + } + files = append(files, t) + } + return InitReaderFromFileCtx(files) +} + +//InitReader iterates over the FileCtx objects of cfg and resolves globbing to open files +func InitReaderFromFileCtx(files []FileCtx) (*FileAcquisCtx, error) { + + var ctx *FileAcquisCtx = &FileAcquisCtx{} + + for _, t := range files { + //defaults to file type in tail mode. + if t.Type == "" { + t.Type = FILETYPE + } + if t.Mode == "" { + t.Mode = TAILMODE + } + //minimalist sanity check + if t.Filename == "" && len(t.Filenames) == 0 { + log.Infof("No filename or filenames, skipping empty item %+v", t) + continue + } + if len(t.Labels) == 0 { + log.Infof("Acquisition has no tags, skipping empty item %+v", t) + continue + } + + if len(t.Filename) > 0 { + t.Filenames = append(t.Filenames, t.Filename) + } + var opcpt int + //open the files indicated by `filename` and `filesnames` + for _, fglob := range t.Filenames { + opcpt = 0 + files, err := filepath.Glob(fglob) + if err != nil { + log.Errorf("error while globing '%s' : %v", fglob, err) + return nil, err + } + + for _, file := range files { + /*check that we can read said file*/ + if err := unix.Access(file, unix.R_OK); err != nil { + log.Errorf("Unable to open file [%s] : %v", file, err) + continue + } + log.Infof("Opening file '%s' (pattern:%s)", file, fglob) + fdesc := t + fdesc.Filename = file + fdesc.Filenames = []string{} + + switch t.Type { + case FILETYPE: + if t.Mode == TAILMODE { + fdesc.tail, err = tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: true, Location: &tail.SeekInfo{Offset: 0, Whence: 2}}) + if err != nil { + log.Errorf("skipping '%s' : %v", file, err) + continue + } + } + case BINTYPE: + + default: + log.Fatalf("unexpected type %s for %+v", t.Type, t.Filenames) + } + opcpt++ + ctx.Files = append(ctx.Files, fdesc) + } + } + log.Debugf("'%v' opened %d files", t.Filenames, opcpt) + } + return ctx, nil +} + +//let's return an array of chans for signaling for now +func AcquisStartReading(ctx *FileAcquisCtx, output chan types.Event, AcquisTomb *tomb.Tomb) { + + if len(ctx.Files) == 0 { + log.Errorf("No files to read") + } + /* start one go routine reading for each file, and pushing to chan output */ + for idx, fctx := range ctx.Files { + log.Printf("starting reader file %d/%d : %s", idx, len(ctx.Files), fctx.Filename) + if ctx.Profiling == true { + fctx.Profiling = true + } + fctx := fctx + switch fctx.Mode { + case TAILMODE: + AcquisTomb.Go(func() error { + return AcquisReadOneFile(fctx, output, AcquisTomb) + }) + case CATMODE: + AcquisTomb.Go(func() error { + return ReadAtOnce(fctx, output, AcquisTomb) + }) + default: + log.Fatalf("unknown read mode %s for %+v", fctx.Mode, fctx.Filenames) + } + } + log.Printf("Started %d routines for polling/read", len(ctx.Files)) + return +} + +/*A tail-mode file reader (tail) */ +func AcquisReadOneFile(ctx FileCtx, output chan types.Event, AcquisTomb *tomb.Tomb) error { + clog := log.WithFields(log.Fields{ + "acquisition file": ctx.Filename, + }) + + if ctx.Type != FILETYPE { + log.Errorf("Can't tail %s type for %+v", ctx.Type, ctx.Filenames) + return fmt.Errorf("Can't tail %s type for %+v", ctx.Type, ctx.Filenames) + } + log.Infof("Starting tail of %s", ctx.Filename) + timeout := time.Tick(20 * time.Second) +LOOP: + for { + l := types.Line{} + select { + case <-AcquisTomb.Dying(): //we are being killed by main + clog.Infof("Killing acquistion routine") + break LOOP + case <-ctx.tail.Tomb.Dying(): //our tailer is dying + clog.Warningf("Reader is dying/dead") + return errors.New("reader is dead") + case line := <-ctx.tail.Lines: + if line == nil { + clog.Debugf("Nil line") + return errors.New("Tail is empty") + } + if line.Err != nil { + log.Warningf("fetch error : %v", line.Err) + return line.Err + } + if line.Text == "" { //skip empty lines + continue + } + if ctx.Profiling == true { + ReaderHits.With(prometheus.Labels{"source": ctx.Filename}).Inc() + } + l.Raw = line.Text + l.Labels = ctx.Labels + l.Time = line.Time + l.Src = ctx.Filename + l.Process = true + //we're tailing, it must be real time logs + output <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE} + case <-timeout: + //time out, shall we do stuff ? + clog.Tracef("timeout") + } + } + return nil +} + +/*A one shot file reader (cat) */ +func ReadAtOnce(ctx FileCtx, output chan types.Event, AcquisTomb *tomb.Tomb) error { + var scanner *bufio.Scanner + + if len(ctx.Filenames) > 0 { + log.Errorf("no multi-file support for this mode.") + return fmt.Errorf("no multi-file support for this mode") + } + log.Infof("reading %s at once", ctx.Filename) + file := ctx.Filename + + clog := log.WithFields(log.Fields{ + "file": file, + }) + fd, err := os.Open(file) + defer fd.Close() + if err != nil { + clog.Errorf("Failed opening file: %s", err) + return err + } + + if ctx.Type == FILETYPE { + if strings.HasSuffix(file, ".gz") { + gz, err := gzip.NewReader(fd) + if err != nil { + clog.Errorf("Failed to read gz file: %s", err) + return err + } + defer gz.Close() + scanner = bufio.NewScanner(gz) + + } else { + scanner = bufio.NewScanner(fd) + } + scanner.Split(bufio.ScanLines) + count := 0 + for scanner.Scan() { + count++ + l := types.Line{} + l.Raw = scanner.Text() + l.Time = time.Now() + l.Src = file + l.Labels = ctx.Labels + l.Process = true + //we're reading logs at once, it must be time-machine buckets + output <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.TIMEMACHINE} + } + clog.Warningf("read %d lines", count) + } else if ctx.Type == BINTYPE { + /*BINTYPE is only overflows for now*/ + dec := json.NewDecoder(fd) + count := 0 + for { + var p types.Event + if err := dec.Decode(&p); err == io.EOF { + break + } else if err != nil { + log.Warningf("While reading %s : %s", fd.Name(), err) + continue + } + count++ + p.Type = types.OVFLW + p.Process = true + //we're reading logs at once, it must be time-machine buckets + p.ExpectMode = leaky.TIMEMACHINE + output <- p + } + clog.Warningf("unmarshaled %d events", count) + + } + clog.Infof("force commit") + return nil +} diff --git a/pkg/config/crowdwatch/config.go b/pkg/config/crowdwatch/config.go new file mode 100644 index 000000000..2b1c6c4a1 --- /dev/null +++ b/pkg/config/crowdwatch/config.go @@ -0,0 +1,155 @@ +package config + +import ( + "flag" + "fmt" + "io/ioutil" + "path/filepath" + + "os" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/outputs" + + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +// Crowdwatch is the structure of the crowdsec configuration +type Crowdwatch struct { + WorkingFolder string `yaml:"working_dir,omitempty"` + DataFolder string `yaml:"data_dir,omitempty"` + ConfigFolder string `yaml:"config_dir,omitempty"` + AcquisitionFile string `yaml:"acquis_path,omitempty"` + SingleFile string //for forensic mode + SingleFileLabel string //for forensic mode + PIDFolder string `yaml:"pid_dir,omitempty"` + LogFolder string `yaml:"log_dir,omitempty"` + LogMode string `yaml:"log_mode,omitempty"` //like file, syslog or stdout ? + LogLevel log.Level `yaml:"log_level,omitempty"` //trace,debug,info,warning,error + Daemonize bool `yaml:"daemon,omitempty"` //true -> go background + Profiling bool `yaml:"profiling,omitempty"` //true -> enable runtime profiling + SQLiteFile string `yaml:"sqlite_path,omitempty"` //path to sqlite output + APIMode bool `yaml:"apimode,omitempty"` //true -> enable api push + Linter bool + Prometheus bool + HTTPListen string `yaml:"http_listen,omitempty"` + ValidatorMode string /*if present points to a specific config (for tests)*/ + RestoreMode string + DumpBuckets bool + OutputConfig *outputs.OutputFactory `yaml:"plugin"` +} + +// NewCrowdwatchConfig create a new crowdsec configuration with default configuration +func NewCrowdwatchConfig() *Crowdwatch { + return &Crowdwatch{ + LogLevel: log.InfoLevel, + Daemonize: false, + Profiling: false, + WorkingFolder: "./", + DataFolder: "./data/", + ConfigFolder: "./config/", + PIDFolder: "./", + LogFolder: "./", + LogMode: "stdout", + SQLiteFile: "./test.db", + APIMode: false, + Prometheus: false, + HTTPListen: "127.0.0.1:6060", + } +} + +// GetOPT return flags parsed from command line +func (c *Crowdwatch) GetOPT() error { + + AcquisitionFile := flag.String("acquis", "", "path to acquis.yaml") + configFile := flag.String("c", "", "configuration file") + printTrace := flag.Bool("trace", false, "VERY verbose") + printDebug := flag.Bool("debug", false, "print debug-level on stdout") + printInfo := flag.Bool("info", false, "print info-level on stdout") + printVersion := flag.Bool("version", false, "display version") + APIMode := flag.Bool("api", false, "perform pushes to api") + SQLiteMode := flag.Bool("sqlite", true, "write overflows to sqlite") + profileMode := flag.Bool("profile", false, "Enable performance profiling") + catFile := flag.String("file", "", "Process a single file in time-machine") + catFileType := flag.String("type", "", "Labels.type for file in time-machine") + daemonMode := flag.Bool("daemon", false, "Daemonize, go background, drop PID file, log to file") + testMode := flag.Bool("t", false, "only test configs") + prometheus := flag.Bool("prometheus-metrics", false, "expose http prometheus collector (see http_listen)") + validatorMode := flag.String("custom-config", "", "[dev] run a specific subset of configs parser:file.yaml,scenarios:file.yaml") + restoreMode := flag.String("restore-state", "", "[dev] restore buckets state from json file") + dumpMode := flag.Bool("dump-state", false, "[dev] Dump bucket state at the end of run.") + + flag.Parse() + + if *printVersion == true { + cwversion.Show() + os.Exit(0) + } + + if *catFile != "" { + if *catFileType == "" { + log.Fatalf("-file requires -type") + } + c.SingleFile = *catFile + c.SingleFileLabel = *catFileType + } + + /*overriden by cfg file*/ + if *configFile != "" { + rcfg, err := ioutil.ReadFile(*configFile) + if err != nil { + return fmt.Errorf("read '%s' : %s", *configFile, err) + } + if err := yaml.UnmarshalStrict(rcfg, c); err != nil { + return fmt.Errorf("parse '%s' : %s", *configFile, err) + } + if c.AcquisitionFile == "" { + c.AcquisitionFile = filepath.Clean(c.ConfigFolder + "/acquis.yaml") + } + } + + if *AcquisitionFile != "" { + c.AcquisitionFile = *AcquisitionFile + } + if *dumpMode == true { + c.DumpBuckets = true + } + if *prometheus { + c.Prometheus = true + } + if *testMode { + c.Linter = true + } + if *validatorMode != "" { + c.ValidatorMode = *validatorMode + } + /*overriden by cmdline*/ + if *daemonMode { + c.Daemonize = true + } + if *profileMode { + c.Profiling = true + } + if *printDebug { + c.LogLevel = log.DebugLevel + } + if *printInfo { + c.LogLevel = log.InfoLevel + } + if *printTrace { + c.LogLevel = log.TraceLevel + } + if !*SQLiteMode { + c.SQLiteFile = "" + } + if *APIMode { + c.APIMode = true + } + + if *restoreMode != "" { + c.RestoreMode = *restoreMode + } + + return nil +} diff --git a/pkg/cwapi/auth.go b/pkg/cwapi/auth.go new file mode 100644 index 000000000..77e3f2817 --- /dev/null +++ b/pkg/cwapi/auth.go @@ -0,0 +1,225 @@ +package cwapi + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/types" + + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" + + "github.com/dghubble/sling" +) + +type ApiCtx struct { + /*config*/ + ApiVersion string `yaml:"version"` + PullPath string `yaml:"pull_path"` + PushPath string `yaml:"push_path"` + SigninPath string `yaml:"signin_path"` + RegisterPath string `yaml:"register_path"` + ResetPwdPath string `yaml:"reset_pwd_path"` + EnrollPath string `yaml:"enroll_path"` + BaseURL string `yaml:"url"` + CfgUser string `yaml:"machine_id"` + CfgPassword string `yaml:"password"` + Creds ApiCreds `yaml:"-"` + Muted bool `yaml:"muted"` + DebugDump bool `yaml:"debug_dump"` + + /*runtime*/ + tokenExpired bool `yaml:"-"` + toPush []types.Event `yaml:"-"` + Http *sling.Sling `yaml:"-"` +} + +type ApiCreds struct { + User string `json:"machine_id" yaml:"machine_id"` + Password string `json:"password" yaml:"password"` + Profile string `json:"profile,omitempty" yaml:"profile,omitempty"` +} + +type ApiResp struct { + StatusCode int `json:"statusCode"` + Error string `json:"error"` + Message string `json:"message"` +} + +type PullResp struct { + StatusCode int `json:"statusCode"` + Body []map[string]string `json:"message"` +} + +func (ctx *ApiCtx) WriteConfig(cfg string) error { + ret, err := yaml.Marshal(ctx) + if err != nil { + return fmt.Errorf("Failed to marshal config : %s", err) + } + if err := ioutil.WriteFile(cfg, ret, 0600); err != nil { + return fmt.Errorf("Failed to write api file %s : %s", cfg, ret) + } + return nil +} + +func (ctx *ApiCtx) LoadConfig(cfg string) error { + rcfg, err := ioutil.ReadFile(cfg) + if err != nil { + return fmt.Errorf("api load configuration: unable to read configuration file '%s' : %s", cfg, err) + } + if err := yaml.UnmarshalStrict(rcfg, &ctx); err != nil { + return fmt.Errorf("api load configuration: unable to unmarshall configuration file '%s' : %s", cfg, err) + } + if ctx.ApiVersion != cwversion.Constraint_api { + return fmt.Errorf("api load configuration: cscli version only supports %s api, not %s", cwversion.Constraint_api, ctx.ApiVersion) + } + ctx.Creds.User = ctx.CfgUser + ctx.Creds.Password = ctx.CfgPassword + + /* + For sling, if a path starts with '/', it's an absolute path, and it will get rid of the 'prefix', + leading to bad urls + */ + if strings.HasPrefix(ctx.PullPath, "/") || + strings.HasPrefix(ctx.PushPath, "/") || + strings.HasPrefix(ctx.SigninPath, "/") || + strings.HasPrefix(ctx.RegisterPath, "/") || + strings.HasPrefix(ctx.ResetPwdPath, "/") || + strings.HasPrefix(ctx.EnrollPath, "/") { + log.Warningf("!API paths must not be prefixed by /") + } + + ctx.Http = sling.New().Base(ctx.BaseURL+"/"+ctx.ApiVersion+"/").Set("User-Agent", fmt.Sprintf("CrowdWatch/%s", cwversion.VersionStr())) + log.Printf("api load configuration: configuration loaded successfully (base:%s)", ctx.BaseURL+"/"+ctx.ApiVersion+"/") + return nil +} + +func (ctx *ApiCtx) Init(cfg string, profile string) error { + var err error + + err = ctx.LoadConfig(cfg) + if err != nil { + return err + } + ctx.Creds.Profile = profile + ctx.toPush = make([]types.Event, 0) + err = ctx.Signin() + if err != nil { + return err + } + //start the background go-routine + go ctx.pushLoop() + return nil +} + +func (ctx *ApiCtx) Signin() error { + if ctx.Creds.User == "" || ctx.Creds.Password == "" { + return fmt.Errorf("api signin: missing credentials in api.yaml") + } + + req, err := ctx.Http.New().Post(ctx.SigninPath).BodyJSON(ctx.Creds).Request() + if err != nil { + return fmt.Errorf("api signin: HTTP request creation failed: %s", err) + } + log.Debugf("api signin: URL: '%s'", req.URL) + httpClient := http.Client{Timeout: 20 * time.Second} + resp, err := httpClient.Do(req) + if err != nil { + return fmt.Errorf("api signin: API call failed : %s", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("api signin: unable to read API response body: '%s'", err) + } + + if resp.StatusCode != 200 { + return fmt.Errorf("api signin: return bad HTTP code (%d): %s", resp.StatusCode, string(body)) + } + + jsonResp := ApiResp{} + err = json.Unmarshal(body, &jsonResp) + if err != nil { + return fmt.Errorf("api signin: unable to unmarshall api response '%s': %s", string(body), err.Error()) + } + ctx.Http = ctx.Http.Set("Authorization", jsonResp.Message) + log.Printf("api signin: signed in successfuly") + return nil +} + +func (ctx *ApiCtx) RegisterMachine(machineID string, password string) error { + ctx.Creds.User = machineID + ctx.Creds.Password = password + + req, err := ctx.Http.New().Post(ctx.RegisterPath).BodyJSON(ctx.Creds).Request() + if err != nil { + return fmt.Errorf("api register machine: HTTP request creation failed: %s", err) + } + log.Debugf("api register: URL: '%s'", req.URL) + + httpClient := http.Client{Timeout: 20 * time.Second} + resp, err := httpClient.Do(req) + if err != nil { + return fmt.Errorf("api register machine: API call failed : %s", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("api register machine: unable to read API response body: %s", err.Error()) + } + + if resp.StatusCode != 200 { + return fmt.Errorf("api register machine: return bad HTTP code (%d): %s", resp.StatusCode, string(body)) + } + + jsonResp := ApiResp{} + err = json.Unmarshal(body, &jsonResp) + if err != nil { + return fmt.Errorf("api register machine: unable to unmarshall api response '%s': %s", string(body), err.Error()) + } + return nil +} + +func (ctx *ApiCtx) ResetPassword(machineID string, password string) error { + ctx.Creds.User = machineID + ctx.Creds.Password = password + + data := map[string]string{"machine_id": ctx.Creds.User, "password": ctx.Creds.Password} + req, err := ctx.Http.New().Post(ctx.ResetPwdPath).BodyJSON(data).Request() + if err != nil { + return fmt.Errorf("api reset password: HTTP request creation failed: %s", err) + } + log.Debugf("api reset: URL: '%s'", req.URL) + + httpClient := http.Client{Timeout: 20 * time.Second} + resp, err := httpClient.Do(req) + if err != nil { + return fmt.Errorf("api reset password: API call failed : %s", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("api reset password: unable to read API response body: %s", err.Error()) + } + + if resp.StatusCode != 200 { + return fmt.Errorf("api reset password: return bad HTTP code (%d): %s", resp.StatusCode, string(body)) + } + + jsonResp := ApiResp{} + err = json.Unmarshal(body, &jsonResp) + if err != nil { + return fmt.Errorf("api reset password: unable to unmarshall api response '%s': %s", string(body), err.Error()) + } + if jsonResp.StatusCode != 200 { + return fmt.Errorf("api reset password: return bad HTTP code (%d): %s", jsonResp.StatusCode, string(body)) + } + return nil +} diff --git a/pkg/cwapi/enroll.go b/pkg/cwapi/enroll.go new file mode 100644 index 000000000..f622762ee --- /dev/null +++ b/pkg/cwapi/enroll.go @@ -0,0 +1,36 @@ +package cwapi + +import ( + "fmt" + "io/ioutil" + "net/http" + "time" + + log "github.com/sirupsen/logrus" +) + +func (ctx *ApiCtx) Enroll(userID string) error { + toPush := map[string]string{"user_id": userID} + + req, err := ctx.Http.New().Post(ctx.EnrollPath).BodyJSON(&toPush).Request() + if err != nil { + return fmt.Errorf("api enroll: HTTP request creation failed: %s", err) + } + log.Debugf("api enroll: URL: '%s'", req.URL) + httpClient := http.Client{Timeout: 20 * time.Second} + resp, err := httpClient.Do(req) + if err != nil { + return fmt.Errorf("api enroll: API call failed : %s", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("api enroll: unable to read API response body: '%s'", err) + } + + if resp.StatusCode != 200 { + return fmt.Errorf("api enroll: user '%s' return bad HTTP code (%d): %s", userID, resp.StatusCode, string(body)) + } + log.Printf("user '%s' is enrolled successfully", string(userID)) + return nil +} diff --git a/pkg/cwapi/signals.go b/pkg/cwapi/signals.go new file mode 100644 index 000000000..4cdbd31c0 --- /dev/null +++ b/pkg/cwapi/signals.go @@ -0,0 +1,109 @@ +package cwapi + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + + log "github.com/sirupsen/logrus" +) + +func (ctx *ApiCtx) AppendSignal(sig types.SignalOccurence) error { + ctx.toPush = append(ctx.toPush, types.Event{Overflow: sig}) + log.Debugf("api append signal: adding new signal (cache size : %d): %+v", len(ctx.toPush), sig) + return nil +} + +func (ctx *ApiCtx) pushSignals() error { + if len(ctx.toPush) == 0 { + return nil + } + + req, err := ctx.Http.New().Put(ctx.PushPath).BodyJSON(&ctx.toPush).Request() + if err != nil { + return fmt.Errorf("api push signal: HTTP request creation failed: %s", err) + } + log.Debugf("api push: URL: '%s'", req.URL) + + httpClient := http.Client{Timeout: 20 * time.Second} + resp, err := httpClient.Do(req) + if err != nil { + return fmt.Errorf("api push signal: API call failed : %s", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + + log.Debugf("api push signal: HTTP Code: %+v | Body: %s \n", resp.StatusCode, string(body)) + if resp.StatusCode != 200 { + if resp.StatusCode == 401 && !ctx.tokenExpired { + log.Printf("api push signal: expired token, resigning to API") + ctx.tokenExpired = true + err := ctx.Signin() + if err != nil { + return err + } + log.Printf("api push signal: token renewed. Pushing signals") + err = ctx.pushSignals() + if err != nil { + return fmt.Errorf("api push signal: unable to renew api session token: %s", err.Error()) + } + } else { + return fmt.Errorf("api push signal: return bad HTTP code (%d): %s", resp.StatusCode, string(body)) + } + } + if len(ctx.toPush) > 0 { + log.Infof("api push signal: pushed %d signals successfully", len(ctx.toPush)) + } + ctx.toPush = make([]types.Event, 0) + ctx.tokenExpired = false + return nil +} + +func (ctx *ApiCtx) Flush() error { + + /*flag can be activated to dump to local file*/ + if ctx.DebugDump { + log.Warningf("api flush: dumping api cache to ./api-dump.json") + x, err := json.MarshalIndent(ctx.toPush, "", " ") + if err != nil { + return fmt.Errorf("api flush: failed to marshal data: %s", err) + } + if err := ioutil.WriteFile("./api-dump.json", x, 0755); err != nil { + return fmt.Errorf("api flush: failed to write marshaled data : %s", err) + } + } + + //pretend we did stuff + if ctx.Muted { + return nil + } + if err := ctx.pushSignals(); err != nil { + log.Errorf("api flush: fail to push signals: %s", err) + } + return nil +} + +//This one is called on a regular basis (decided by init) and push stacked events to API +func (ctx *ApiCtx) pushLoop() error { + log.Debugf("api push loop: running with a ticker every 2 minutes") + ticker := time.NewTicker(2 * time.Minute) + + for { + select { + case <-ticker.C: //push data. + if len(ctx.toPush) == 0 { + log.Debugf("api push loop: nothing to push") + continue + } + err := ctx.Flush() + if err != nil { + log.Errorf("api push loop: %s", err.Error()) + } + } + } + +} diff --git a/pkg/cwapi/topx.go b/pkg/cwapi/topx.go new file mode 100644 index 000000000..51460949c --- /dev/null +++ b/pkg/cwapi/topx.go @@ -0,0 +1,43 @@ +package cwapi + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "time" + + log "github.com/sirupsen/logrus" +) + +func (ctx *ApiCtx) PullTop() ([]map[string]string, error) { + req, err := ctx.Http.New().Get(ctx.PullPath).Request() + if err != nil { + return nil, fmt.Errorf("api pull: HTTP request creation failed: %s", err) + } + log.Debugf("api pull: URL: '%s'", req.URL) + httpClient := http.Client{Timeout: 20 * time.Second} + resp, err := httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("api pull: API call failed : %s", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("api pull: unable to read API response body: '%s'", err) + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("api pull: return bad HTTP code (%d): %s", resp.StatusCode, string(body)) + } + + top := PullResp{} + err = json.Unmarshal([]byte(body), &top) + if err != nil { + return nil, fmt.Errorf("api pull: unable to unmarshall api response '%s': %s", string(body), err.Error()) + } + + log.Debugf("api pull: response : %+v", top.Body) + return top.Body, nil +} diff --git a/pkg/cwhub/.index.json b/pkg/cwhub/.index.json new file mode 100644 index 000000000..89d4174c1 --- /dev/null +++ b/pkg/cwhub/.index.json @@ -0,0 +1,498 @@ +{ + "collections" : { + "crowdsec/linux" : { + "path" : "collections/crowdsec/linux.yaml", + "version" : "0.1", + "versions" : { + "0.1" : { "digest" : "1fc917c7ad66487470e466c0ad40ddd45b9f7730a4b43e1b2542627f0596bbdc", "deprecated" : false } + }, + "description" : "generic linux : ssh/nginx/apache + ssh/http scenarios", + "author" : "crowdsec", + "tags" : null, + "parsers" : ["crowdsec/apache2-logs", "crowdsec/sshd-logs", "crowdsec/nginx-logs"], + "scenarios" : ["crowdsec/ssh_enum"] + } + }, + "parsers": { + "crowdsec/apache2-logs": { + "path": "parsers/s01-parse/crowdsec/apache2-logs.yaml", + "stage": "s01-parse", + "version": "0.2", + "versions": { + "0.1": { + "digest": "e09bb847fb9a80aedaa4b682309b7e5876398a9a28c28911d969c5dd4aa2c0cf", + "deprecated": false + }, + "0.2": { + "digest": "809d2de8c0a9bb7faa69cd53fd2f78bb4fb67b8e85a61b7179243913073890b8", + "deprecated": false + } + }, + "description": "Parse Apache2 access and error logs", + "author": "crowdsec", + "tags": null + }, + "crowdsec/cowrie-logs": { + "path": "parsers/s01-parse/crowdsec/cowrie-logs.yaml", + "stage": "s01-parse", + "version": "0.2", + "versions": { + "0.1": { + "digest": "5914721479adf812e27fa7d8ef7d533698d773faa863e658c9a9a9b996a2008e", + "deprecated": false + }, + "0.2": { + "digest": "86240cc3887580304a1662213ba08e5993d790dcb14b3f08576cb988e449b8b2", + "deprecated": false + } + }, + "description": "Parse cowrie honeypots logs", + "author": "crowdsec", + "tags": null + }, + "crowdsec/geoip": { + "path": "parsers/s02-enrich/crowdsec/geoip.yaml", + "stage": "s02-enrich", + "version": "0.2", + "versions": { + "0.1": { + "digest": "a80dd157205988b209c95017af56adcd415f7d05e2106d255853016d3068d993", + "deprecated": false + }, + "0.2": { + "digest": "9546892698b3e52ee2ad835521093e11edef9c3bbd86a30c8a6b25bc2f732721", + "deprecated": false + } + }, + "description": "Enrich geolocalisation data associated to the source IP", + "author": "crowdsec", + "tags": null + }, + "crowdsec/http-logs": { + "path": "parsers/s02-enrich/crowdsec/http-logs.yaml", + "stage": "s02-enrich", + "version": "0.2", + "versions": { + "0.1": { + "digest": "17c20627760a32f372fabacc1933ed53ad533bc3cb6b36dc9f2237e768798abe", + "deprecated": false + }, + "0.2": { + "digest": "a9c76d274bf69c3e64c486a162f589355c3a53978c2bc2b34dbdaa8c5d65b73c", + "deprecated": false + } + }, + "description": "Parse more Specifically HTTP logs, such as HTTP Code, HTTP path, HTTP args and if its a static ressource", + "author": "crowdsec", + "tags": null + }, + "crowdsec/mysql-logs": { + "path": "parsers/s01-parse/crowdsec/mysql-logs.yaml", + "stage": "s01-parse", + "version": "0.2", + "versions": { + "0.1": { + "digest": "b5bf9052c14f6a5887804247f58088d9da364b923d61a14791722f7a695e99e4", + "deprecated": false + }, + "0.2": { + "digest": "f3975dea7bb749ee0e0bd8b8f444af2f5bb028afd5f78c4198daf2de8c17a9e8", + "deprecated": false + } + }, + "description": "Parse MySQL logs", + "author": "crowdsec", + "tags": null + }, + "crowdsec/naxsi-logs": { + "path": "parsers/s02-enrich/crowdsec/naxsi-logs.yaml", + "stage": "s02-enrich", + "version": "0.2", + "versions": { + "0.1": { + "digest": "75b0ef4d320aced623327bca496f77d606e2449990dd0f6ef849aa9aaf91aad2", + "deprecated": false + }, + "0.2": { + "digest": "a93b89b1cb2a9d61d61c50c6dd4c89707d770c7e9c504d8683d802bb1ec57d07", + "deprecated": false + } + }, + "description": "Enrich logs if its from NAXSI", + "author": "crowdsec", + "tags": null + }, + "crowdsec/nginx-logs": { + "path": "parsers/s01-parse/crowdsec/nginx-logs.yaml", + "stage": "s01-parse", + "version": "0.2", + "versions": { + "0.1": { + "digest": "86c5d6cb6671f1c233b06b0afbd43a33740dd55df004ae01ff43714d2ca822bf", + "deprecated": false + }, + "0.2": { + "digest": "36200096b897494563d31f38bee86c22868ac9bd54b74591398474547d968339", + "deprecated": false + } + }, + "description": "Parse nginx access and error logs", + "author": "crowdsec", + "tags": null + }, + "crowdsec/skip-pretag": { + "path": "parsers/s00-raw/crowdsec/skip-pretag.yaml", + "stage": "s00-raw", + "version": "0.1", + "versions": { + "0.1": { + "digest": "c43d625b9854a5d66a5227068e943a77d57111b3411262a856a4d3c9415dd6c4", + "deprecated": false + } + }, + "author": "crowdsec", + "tags": null + }, + "crowdsec/smb-logs": { + "path": "parsers/s01-parse/crowdsec/smb-logs.yaml", + "stage": "s01-parse", + "version": "0.2", + "versions": { + "0.1": { + "digest": "edba72ee6bdbfad7d453e8564de4c6cfbaa3f99c907f3ad9da3e8d499f6d264d", + "deprecated": false + }, + "0.2": { + "digest": "86a5cfaf053da6a820fb6f3679633dce76dc6b75a3f84cf18b1502d8c0d2a519", + "deprecated": false + } + }, + "description": "Parse SMB logs", + "author": "crowdsec", + "tags": null + }, + "crowdsec/sshd-logs": { + "path": "parsers/s01-parse/crowdsec/sshd-logs.yaml", + "stage": "s01-parse", + "version": "0.2", + "versions": { + "0.1": { + "digest": "ede920fb15f97c8fe559e2687d200232074ea2d76e57a80db147451e5fded359", + "deprecated": false + }, + "0.2": { + "digest": "43c2602153722d2bfc8f1851278469fa7838a82ce752ce1bbdde192299a93c6d", + "deprecated": false + } + }, + "description": "Parse openSSH logs", + "author": "crowdsec", + "tags": null + }, + "crowdsec/syslog-parse": { + "path": "parsers/s00-raw/crowdsec/syslog-parse.yaml", + "stage": "s00-raw", + "version": "0.2", + "versions": { + "0.1": { + "digest": "ea6d39fdfd9c73ece96bd57ecdff952e6db99e4d1652f3c1b74ed9d52d185846", + "deprecated": false + }, + "0.2": { + "digest": "98feb5259f175e0e17db44bc911ef458f9f55c5b524fa2e201847e16f4e83a1b", + "deprecated": false + } + }, + "author": "crowdsec", + "tags": null + }, + "crowdsec/tcpdump-logs": { + "path": "parsers/s01-parse/crowdsec/tcpdump-logs.yaml", + "stage": "s01-parse", + "version": "0.2", + "versions": { + "0.1": { + "digest": "f3a55b79061bc1dbfce85855363b73a09e7cce5c0ff9972bdb4f7ec7fabcd9f8", + "deprecated": false + }, + "0.2": { + "digest": "8d0dc2230eefc35d9c7aec97cbf95a824fbdd66582aa4e5ededf17131ecd6103", + "deprecated": false + } + }, + "description": "Parse tcpdump raw logs", + "author": "crowdsec", + "tags": null + }, + "crowdsec/timemachine": { + "path": "parsers/s02-enrich/crowdsec/timemachine.yaml", + "stage": "s02-enrich", + "version": "0.1", + "versions": { + "0.1": { + "digest": "cd9f202305b3210511bce32950e0e06ce416391ab53875cc17d5f6aecc8bbf19", + "deprecated": false + } + }, + "author": "crowdsec", + "tags": null + } + }, + "postoverflows": { + "crowdsec/rdns": { + "path": "postoverflows/s00-enrich/crowdsec/rdns.yaml", + "stage": "s00-enrich", + "version": "0.2", + "versions": { + "0.1": { + "digest": "d04e28fa2c74f4c1ba3f1daeeeaa8a95858f620e7587123cde224b6b376ad16a", + "deprecated": false + }, + "0.2": { + "digest": "e1f7905318e7d8c432e4cf1428e3e7c943aec7c625a5d598e5b26b36a6231f1e", + "deprecated": false + } + }, + "description": "Lookup the DNS assiocated to the source IP only for overflows", + "author": "crowdsec", + "tags": null + } + }, + "scenarios": { + "crowdsec/counters": { + "path": "scenarios/crowdsec/counters.yaml", + "version": "0.2", + "versions": { + "0.1": { + "digest": "edd898e179c89ddc85890e702dc2975ecf411546fa3082b8f190ccb5d7304aa8", + "deprecated": false + }, + "0.2": { + "digest": "04ef21d6f7f48d66119098e8ecd23b5c1107e8fdd274ffddb5f8309252c1dfd1", + "deprecated": false + } + }, + "description": "Count unique ssh bruteforces", + "author": "crowdsec", + "tags": [ + "ssh" + ] + }, + "crowdsec/double_drop": { + "path": "scenarios/crowdsec/double_drop.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "0f6bd279437d9ef8061d8b69c6567c0389101811cc741a2ad766ffee1f7a8dc6", + "deprecated": false + } + }, + "description": "Ban a range if more than 5 ips from it are banned at a time", + "author": "crowdsec", + "tags": null + }, + "crowdsec/http_404_scan": { + "path": "scenarios/crowdsec/http_404_scan.yaml", + "version": "0.3", + "versions": { + "0.1": { + "digest": "4224c98f088b553cf65db1608dc448ee5e679de31437bfe2f65352362c66b24f", + "deprecated": false + }, + "0.2": { + "digest": "62768595d349c174078057534ebc21de37560a258b98fbc63ddc5106edb4db40", + "deprecated": false + }, + "0.3": { + "digest": "9ec1df959e637d08d6fc969bbfa94deba72230cb1cb528ecba4180b62670032a", + "deprecated": false + } + }, + "description": "Detect multiple unique 404 from a single ip", + "author": "crowdsec", + "tags": [ + "http", + "scan" + ] + }, + "crowdsec/http_aggressive_crawl": { + "path": "scenarios/crowdsec/http_aggressive_crawl.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "e0b6a1c40f8009bec4698fb0562ad34d8159aa7e1006dedbd9d28c397ab4db1a", + "deprecated": false + } + }, + "description": "Detect aggressive crawl from multiple ips", + "author": "crowdsec", + "tags": [ + "http", + "distributed_crawl" + ] + }, + "crowdsec/http_distributed_crawl": { + "path": "scenarios/crowdsec/http_distributed_crawl.yaml", + "version": "0.2", + "versions": { + "0.1": { + "digest": "8eb442380f5a996a4ccba30b6dd39391ea021c0dead7cb3b7a7eea8f216a468f", + "deprecated": false + }, + "0.2": { + "digest": "bf778e2c091bb9099a019317311a191ece7b027389231f13a2c684f647e06a66", + "deprecated": false + } + }, + "description": "an aggressive crawl distributed amongst several ips", + "author": "crowdsec", + "tags": [ + "http", + "distributed_crawl" + ] + }, + "crowdsec/mysql_bf": { + "path": "scenarios/crowdsec/mysql_bf.yaml", + "version": "0.2", + "versions": { + "0.1": { + "digest": "058a37a9d144c25586c6cb6f5cd471436bd8adb87f54e66a0a7dfc3509bb20d0", + "deprecated": false + }, + "0.2": { + "digest": "74356430e1ff91b08b95e213e5fc8bb7b9894a3f131ffc31a6507cbfba7f2abb", + "deprecated": false + } + }, + "description": "Detect mysql bruteforce", + "author": "crowdsec", + "tags": [ + "mysql", + "bruteforce" + ] + }, + "crowdsec/naxsi": { + "path": "scenarios/crowdsec/naxsi.yaml", + "version": "0.2", + "versions": { + "0.1": { + "digest": "7004c206a2fc5e4f786ae226ebca142a5eb372bb22b56276811bf2b43b9e8c22", + "deprecated": false + }, + "0.2": { + "digest": "16838eae3b5515e732084e1508518ecdc8c35968631d617f10314e5d95950493", + "deprecated": false + } + }, + "description": "Detect custom blacklist triggered in naxsi", + "author": "crowdsec", + "tags": [ + "http", + "scan" + ] + }, + "crowdsec/smb_bf": { + "path": "scenarios/crowdsec/smb_bf.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "0078c276a111618d89203fac5e192d2564d186b9da7575e9cd75a186ca573e72", + "deprecated": false + } + }, + "description": "Detect smb bruteforce", + "author": "crowdsec", + "tags": [ + "smb", + "bruteforce" + ] + }, + "crowdsec/ssh_bf": { + "path": "scenarios/crowdsec/ssh_bf.yaml", + "version": "0.2", + "versions": { + "0.1": { + "digest": "252354885e933ed8f6fb255c764d15e529c285443eee5efac3bc3d801f2789fe", + "deprecated": false + }, + "0.2": { + "digest": "8e4bf46e185e8a0764535bf84ba5d8a5515e266272a363c8f8929fc85dbc4609", + "deprecated": false + } + }, + "description": "Detect ssh user enum bruteforce", + "author": "crowdsec", + "tags": [ + "ssh", + "bruteforce" + ] + }, + "crowdsec/ssh_enum": { + "path": "scenarios/crowdsec/ssh_enum.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "335776aafa070073abdc1c9cf333c5fd2513c982443a29476e0b31c339b6b17f", + "deprecated": false + } + }, + "description": "Detect ssh user enum bruteforce", + "author": "crowdsec", + "tags": [ + "ssh", + "bruteforce" + ] + }, + "crowdsec/tcpdump": { + "path": "scenarios/crowdsec/tcpdump.yaml", + "version": "0.2", + "versions": { + "0.1": { + "digest": "2fe9e4ce72a8552bfd65d2d28759e4724bd0a85c716685d9d9b992f9cecb5a1f", + "deprecated": false + }, + "0.2": { + "digest": "fe9392749ad32925ebd7a5c776bbde8527a1a02f8a531de04da51726bdb54bcb", + "deprecated": false + } + }, + "description": "Detect new connection with tcpdump", + "author": "crowdsec", + "tags": [ + "tcp" + ] + }, + "crowdsec/telnet_bf": { + "path": "scenarios/crowdsec/telnet_bf.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "c0dcbfcfc86f3f3ecbc4888e78e06f322ca7d4dc11fd6604893f76bb52ca6c9d", + "deprecated": false + } + }, + "description": "detect telnet bruteforce", + "author": "crowdsec", + "tags": [ + "telnet", + "bruteforce" + ] + }, + "crowdsec/wordpress_bf": { + "path": "scenarios/crowdsec/wordpress_bf.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "a89253d2f02f0dc0bfecd85998ba5dd45eecf94929c1fa058ef9fe1646b511d9", + "deprecated": false + } + }, + "description": "detect wordpress bruteforce", + "author": "crowdsec", + "tags": [ + "http", + "bruteforce" + ] + } + } +} diff --git a/pkg/cwhub/hubMgmt.go b/pkg/cwhub/hubMgmt.go new file mode 100644 index 000000000..7b40d6877 --- /dev/null +++ b/pkg/cwhub/hubMgmt.go @@ -0,0 +1,802 @@ +package cwhub + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "path" + + //"log" + "net/http" + "os" + "path/filepath" + "strings" + + "github.com/enescakir/emoji" + log "github.com/sirupsen/logrus" +) + +var PARSERS = "parsers" +var PARSERS_OVFLW = "postoverflows" +var SCENARIOS = "scenarios" +var COLLECTIONS = "collections" + +var ItemTypes = []string{PARSERS, PARSERS_OVFLW, SCENARIOS, COLLECTIONS} + +var HubIdx map[string]map[string]Item + +var Installdir = "/etc/crowdsec/" +var Hubdir = "/etc/crowdsec/cscli/hub/" +var Cfgdir = "/etc/crowdsec/cscli/" + +var RawFileURLTemplate = "https://raw.githubusercontent.com/crowdsecurity/hub/master/%s" +var HUB_INDEX_FILE = ".index.json" + +type ItemVersion struct { + Digest string + Deprecated bool +} + +//Item can be : parsed, scenario, collection +type Item struct { + /*descriptive info*/ + Type string `yaml:"type,omitempty"` //parser|postoverflows|scenario|collection(|enrich) + Stage string `json:"stage" yaml:"type,omitempty,omitempty"` //Stage for parser|postoverflow : s00-raw/s01-... + Name string //as seen in .config.json, usually "author/name" + FileName string //the filename, ie. apache2-logs.yaml + Description string `yaml:"description,omitempty"` //as seen in .config.json + Author string `json:"author"` //as seen in .config.json + References []string `yaml:"references,omitempty"` //as seen in .config.json + BelongsToCollections []string `yaml:"belongs_to_collections,omitempty"` /*if it's part of collections, track name here*/ + + /*remote (hub) infos*/ + RemoteURL string `yaml:"remoteURL,omitempty"` //the full remote uri of file in http + RemotePath string `json:"path" yaml:"remote_path,omitempty"` //the path relative to git ie. /parsers/stage/author/file.yaml + RemoteHash string `yaml:"hash,omitempty"` //the meow + Version string `json:"version"` //the last version + Versions map[string]ItemVersion `json:"versions" yaml:"-"` //the list of existing versions + + /*local (deployed) infos*/ + LocalPath string `yaml:"local_path,omitempty"` //the local path relative to ${CFG_DIR} + //LocalHubPath string + LocalVersion string + LocalHash string //the local meow + Installed bool + Downloaded bool + UpToDate bool + Tainted bool //has it been locally modified + Local bool //if it's a non versioned control one + + /*if it's a collection, it not a single file*/ + Parsers []string `yaml:"parsers,omitempty"` + PostOverflows []string `yaml:"postoverflows,omitempty"` + Scenarios []string `yaml:"scenarios,omitempty"` + Collections []string `yaml:"collections,omitempty"` +} + +// calculate sha256 of a file +func getSHA256(filepath string) (string, error) { + /* Digest of file */ + f, err := os.Open(filepath) + if err != nil { + return "", fmt.Errorf("unable to open '%s' : %s", filepath, err.Error()) + } + + defer f.Close() + + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return "", fmt.Errorf("unable to calculate sha256 of '%s': %s", filepath, err.Error()) + } + + return fmt.Sprintf("%x", h.Sum(nil)), nil +} + +var skippedLocal = 0 +var skippedTainted = 0 + +func parser_visit(path string, f os.FileInfo, err error) error { + + var target Item + var local bool + var hubpath string + var inhub bool + var fname string + var ftype string + var fauthor string + var stage string + //we only care about files + if f == nil || f.IsDir() { + return nil + } + + subs := strings.Split(path, "/") + + log.Debugf("path:%s, hubdir:%s, installdir:%s", path, Hubdir, Installdir) + /*we're in hub (~/.cscli/hub/)*/ + if strings.HasPrefix(path, Hubdir) { + inhub = true + //~/.cscli/hub/parsers/s00-raw/crowdsec/skip-pretag.yaml + //~/.cscli/hub/scenarios/crowdsec/ssh_bf.yaml + //~/.cscli/hub/profiles/crowdsec/linux.yaml + if len(subs) < 4 { + log.Fatalf("path is too short : %s", path) + } + fname = subs[len(subs)-1] + fauthor = subs[len(subs)-2] + stage = subs[len(subs)-3] + ftype = subs[len(subs)-4] + log.Debugf("HUBB check [%s] by [%s] in stage [%s] of type [%s]", fname, fauthor, stage, ftype) + + } else if strings.HasPrefix(path, Installdir) { /*we're in install /etc/crowdsec//... */ + if len(subs) < 3 { + log.Fatalf("path is too short : %s", path) + } + ///etc/.../parser/stage/file.yaml + ///etc/.../postoverflow/stage/file.yaml + ///etc/.../scenarios/scenar.yaml + ///etc/.../collections/linux.yaml //file is empty + fname = subs[len(subs)-1] + stage = subs[len(subs)-2] + ftype = subs[len(subs)-3] + fauthor = "" + log.Debugf("INSTALL check [%s] by [%s] in stage [%s] of type [%s]", fname, fauthor, stage, ftype) + } + + //log.Printf("%s -> name:%s stage:%s", path, fname, stage) + if stage == SCENARIOS { + ftype = SCENARIOS + stage = "" + } else if stage == COLLECTIONS { + ftype = COLLECTIONS + stage = "" + } else if ftype != PARSERS && ftype != PARSERS_OVFLW /*its a PARSER / PARSER_OVFLW with a stage */ { + return fmt.Errorf("Unknown prefix in %s : fname:%s, fauthor:%s, stage:%s, ftype:%s", path, fname, fauthor, stage, ftype) + } + + log.Debugf("CORRECTED [%s] by [%s] in stage [%s] of type [%s]", fname, fauthor, stage, ftype) + + /* + we can encounter 'collections' in the form of a symlink : + /etc/crowdsec/.../collections/linux.yaml -> ~/.cscli/hub/collections/.../linux.yaml + when the collection is installed, both files are created + */ + //non symlinks are local user files or hub files + if f.Mode()&os.ModeSymlink == 0 { + local = true + skippedLocal++ + log.Debugf("%s isn't a symlink", path) + } else { + hubpath, err = os.Readlink(path) + if err != nil { + return fmt.Errorf("unable to read symlink of %s", path) + } + //the symlink target doesn't exist, user might have remove ~/.cscli/hub/...yaml without deleting /etc/crowdsec/....yaml + _, err := os.Lstat(hubpath) + if os.IsNotExist(err) { + log.Infof("%s is a symlink to %s that doesn't exist, deleting symlink", path, hubpath) + //remove the symlink + if err = os.Remove(path); err != nil { + return fmt.Errorf("Failed to unlink %s: %+v", path, err) + } + return nil + } + log.Debugf("%s points to %s", path, hubpath) + } + + //if it's not a symlink and not in hub, it's a local file, don't bother + if local == true && inhub == false { + log.Debugf("%s is a local file, skip", path) + skippedLocal++ + // log.Printf("local scenario, skip.") + target.Name = fname + target.Stage = stage + target.Installed = true + target.Type = ftype + target.Local = true + target.LocalPath = path + target.UpToDate = true + x := strings.Split(path, "/") + target.FileName = x[len(x)-1] + + HubIdx[ftype][fname] = target + return nil + } + //try to find which configuration item it is + log.Debugf("check [%s] of %s", fname, ftype) + + match := false + for k, v := range HubIdx[ftype] { + log.Debugf("check [%s] vs [%s] : %s", fname, v.RemotePath, ftype+"/"+stage+"/"+fname+".yaml") + if fname != v.FileName { + log.Debugf("%s != %s (filename)", fname, v.FileName) + continue + } + //wrong stage + if v.Stage != stage { + continue + } + /*if we are walking hub dir, just mark present files as downloaded*/ + if inhub { + //wrong author + if fauthor != v.Author { + continue + } + //wrong file + if v.Name+".yaml" != fauthor+"/"+fname { + continue + } + if path == Hubdir+"/"+v.RemotePath { + log.Debugf("marking %s as downloaded", v.Name) + v.Downloaded = true + } + } else { + //wrong file + /////.yaml + if !strings.HasSuffix(hubpath, v.RemotePath) { + //log.Printf("wrong file %s %s", hubpath, spew.Sdump(v)) + + continue + } + } + //wrong hash + sha, err := getSHA256(path) + if err != nil { + log.Fatalf("Failed to get sha of %s : %v", path, err) + } + for version, val := range v.Versions { + if sha != val.Digest { + //log.Printf("matching filenames, wrong hash %s != %s -- %s", sha, val.Digest, spew.Sdump(v)) + continue + } else { + /*we got an exact match, update struct*/ + // log.Printf("got exact match") + if inhub == false { + log.Debugf("found exact match for %s, version is %s, latest is %s", v.Name, version, v.Version) + v.LocalPath = path + v.LocalVersion = version + v.Tainted = false + v.Downloaded = true + /*if we're walking the hub, present file doesn't means installed file*/ + v.Installed = true + v.LocalHash = sha + x := strings.Split(path, "/") + target.FileName = x[len(x)-1] + } + if version == v.Version { + log.Debugf("%s is up-to-date", v.Name) + v.UpToDate = true + } else { + log.Debugf("%s is outdated", v.Name) + } + match = true + + } + } + if match == false { + log.Debugf("got tainted match for %s : %s", v.Name, path) + skippedTainted += 1 + //the file and the stage is right, but the hash is wrong, it has been tainted by user + if inhub == false { + v.LocalPath = path + v.Installed = true + } + v.UpToDate = false + v.LocalVersion = "?" + v.Tainted = true + v.LocalHash = sha + x := strings.Split(path, "/") + target.FileName = x[len(x)-1] + + } + //update the entry + HubIdx[ftype][k] = v + return nil + } + log.Infof("Ignoring file %s of type %s", path, ftype) + return nil +} + +func CollecDepsCheck(v *Item) error { + /*if it's a collection, ensure all the items are installed, or tag it as tainted*/ + if v.Type == COLLECTIONS { + log.Debugf("checking submembers of %s installed:%t", v.Name, v.Installed) + var tmp = [][]string{v.Parsers, v.PostOverflows, v.Scenarios, v.Collections} + for idx, ptr := range tmp { + ptrtype := ItemTypes[idx] + for _, p := range ptr { + if val, ok := HubIdx[ptrtype][p]; ok { + log.Debugf("check %s installed:%t", val.Name, val.Installed) + if !v.Installed { + continue + } + if val.Type == COLLECTIONS { + log.Debugf("collec, recurse.") + if err := CollecDepsCheck(&val); err != nil { + return fmt.Errorf("sub collection %s is broken : %s", val.Name, err) + } + HubIdx[ptrtype][p] = val + } + + //propagate the state of sub-items to set + if val.Tainted == true { + v.Tainted = true + return fmt.Errorf("tainted %s %s, tainted.", ptrtype, p) + } else if val.Installed == false && v.Installed == true { + v.Tainted = true + return fmt.Errorf("missing %s %s, tainted.", ptrtype, p) + } else if val.UpToDate == false { + v.UpToDate = false + return fmt.Errorf("outdated %s %s", ptrtype, p) + } + val.BelongsToCollections = append(val.BelongsToCollections, v.Name) + HubIdx[ptrtype][p] = val + log.Debugf("checking for %s - tainted:%t uptodate:%t", p, v.Tainted, v.UpToDate) + } else { + log.Fatalf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, v.Name) + } + } + } + } + return nil +} + +/* Updates the infos from HubInit() with the local state */ +func LocalSync() error { + skippedLocal = 0 + skippedTainted = 0 + /*For each, scan PARSERS, PARSERS_OVFLW, SCENARIOS and COLLECTIONS last*/ + for _, scan := range ItemTypes { + /*Scan install and Hubdir to get local status*/ + for _, dir := range []string{Installdir, Hubdir} { + //walk the user's directory + err := filepath.Walk(dir+"/"+scan, parser_visit) + if err != nil { + return err + } + } + } + + for k, v := range HubIdx[COLLECTIONS] { + if err := CollecDepsCheck(&v); err != nil { + log.Infof("dependency issue %s : %s", v.Name, err) + } + HubIdx[COLLECTIONS][k] = v + } + return nil +} + +func GetHubIdx() error { + + bidx, err := ioutil.ReadFile(Cfgdir + "/.index.json") + if err != nil { + log.Fatalf("Unable to read downloaded index : %v. Please run update", err) + } + ret, err := LoadPkgIndex(bidx) + if err != nil { + log.Fatalf("Unable to load existing index : %v.", err) + } + HubIdx = ret + if err := LocalSync(); err != nil { + log.Fatalf("Failed to sync Hub index with local deployment : %v", err) + } + return nil +} + +func UpdateHubIdx() error { + bidx, err := DownloadHubIdx() + if err != nil { + log.Fatalf("Unable to download index : %v.", err) + } + ret, err := LoadPkgIndex(bidx) + if err != nil { + log.Fatalf("Unable to load freshly downloaded index : %v.", err) + } + HubIdx = ret + if err := LocalSync(); err != nil { + log.Fatalf("Failed to sync Hub index with local deployment : %v", err) + } + return nil +} + +func DownloadHubIdx() ([]byte, error) { + req, err := http.NewRequest("GET", fmt.Sprintf(RawFileURLTemplate, HUB_INDEX_FILE), nil) + if err != nil { + log.Errorf("failed request : %s", err) + return nil, err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + log.Errorf("failed request Do : %s", err) + return nil, err + } + if resp.StatusCode != 200 { + log.Errorf("got code %d while requesting %s, abort", resp.StatusCode, + fmt.Sprintf(RawFileURLTemplate, HUB_INDEX_FILE)) + return nil, fmt.Errorf("bad http code") + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Errorf("failed request reqd: %s", err) + return nil, err + } + //os.Remove(path.Join(configFolder, GitIndexFile)) + file, err := os.OpenFile(path.Join(Cfgdir, "/.index.json"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + + if err != nil { + log.Fatalf(err.Error()) + } + defer file.Close() + + wsize, err := file.WriteString(string(body)) + if err != nil { + log.Fatalf(err.Error()) + } + log.Infof("Wrote new %d bytes index to %s", wsize, path.Join(Cfgdir, "/.index.json")) + return body, nil +} + +func DisplaySummary() { + log.Printf("Loaded %d collecs, %d parsers, %d scenarios, %d post-overflow parsers", len(HubIdx[COLLECTIONS]), + len(HubIdx[PARSERS]), len(HubIdx[SCENARIOS]), len(HubIdx[PARSERS_OVFLW])) + if skippedLocal > 0 || skippedTainted > 0 { + log.Printf("unmanaged items : %d local, %d tainted", skippedLocal, skippedTainted) + } +} + +/*LoadPkgIndex loads a local .index.json file and returns the map of parsers/scenarios/collections associated*/ +func LoadPkgIndex(buff []byte) (map[string]map[string]Item, error) { + var err error + var RawIndex map[string]map[string]Item + + if err = json.Unmarshal(buff, &RawIndex); err != nil { + return nil, fmt.Errorf("Failed to unmarshal index : %v", err) + } + + /*Iterate over the different types to complete struct */ + for _, itemType := range ItemTypes { + /*complete struct*/ + for idx, item := range RawIndex[itemType] { + item.Name = idx + item.Type = itemType + x := strings.Split(item.RemotePath, "/") + item.FileName = x[len(x)-1] + RawIndex[itemType][idx] = item + /*if it's a collection, check its sub-items are present*/ + //XX should be done later + if itemType == COLLECTIONS { + var tmp = [][]string{item.Parsers, item.PostOverflows, item.Scenarios, item.Collections} + for idx, ptr := range tmp { + ptrtype := ItemTypes[idx] + for _, p := range ptr { + if _, ok := RawIndex[ptrtype][p]; !ok { + log.Errorf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, item.Name) + } + } + } + } + } + } + + return RawIndex, nil +} + +//DisableItem to disable an item managed by the hub, removes the symlink +func DisableItem(target Item, tdir string, hdir string, purge bool) (Item, error) { + syml := tdir + "/" + target.Type + "/" + target.Stage + "/" + target.FileName + if target.Local { + return target, fmt.Errorf("%s isn't managed by hub. Please delete manually", target.Name) + } + + var err error + /*for a COLLECTIONS, disable sub-items*/ + if target.Type == COLLECTIONS { + var tmp = [][]string{target.Parsers, target.PostOverflows, target.Scenarios, target.Collections} + for idx, ptr := range tmp { + ptrtype := ItemTypes[idx] + for _, p := range ptr { + if val, ok := HubIdx[ptrtype][p]; ok { + HubIdx[ptrtype][p], err = DisableItem(val, Installdir, Hubdir, false) + if err != nil { + log.Errorf("Encountered error while disabling %s %s : %s.", ptrtype, p, err) + } + } else { + log.Errorf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, target.Name) + } + } + } + + } + + stat, err := os.Lstat(syml) + if os.IsNotExist(err) { + log.Warningf("%s (%s) doesn't exist, can't disable", target.Name, syml) + //return target, nil //fmt.Errorf("'%s' doesn't exist", syml) + } else { + //if it's managed by hub, it's a symlink to Hubdir / ... + if stat.Mode()&os.ModeSymlink == 0 { + log.Warningf("%s (%s) isn't a symlink, can't disable", target.Name, syml) + return target, fmt.Errorf("%s isn't managed by hub", target.Name) + } + hubpath, err := os.Readlink(syml) + if err != nil { + return target, fmt.Errorf("unable to read symlink of %s (%s)", target.Name, syml) + } + if hubpath != filepath.Clean(hdir+"/"+target.RemotePath) { + log.Warningf("%s (%s) isn't a symlink to %s", target.Name, syml, filepath.Clean(hdir+"/"+target.RemotePath)) + return target, fmt.Errorf("%s isn't managed by hub", target.Name) + } + + //remove the symlink + if err = os.Remove(syml); err != nil { + return target, fmt.Errorf("Failed to unlink %s: %+v", syml, err) + } + log.Infof("Removed symlink [%s] : %s", target.Name, syml) + } + target.Installed = false + + if purge { + hubpath := hdir + "/" + target.RemotePath + //if purge, disable hub file + if err = os.Remove(hubpath); err != nil { + return target, fmt.Errorf("Failed to purge hub file %s: %+v", hubpath, err) + } + target.Downloaded = false + log.Infof("Removed source file [%s] : %s", target.Name, hubpath) + } + return target, nil +} + +func EnableItem(target Item, tdir string, hdir string) (Item, error) { + parent_dir := filepath.Clean(tdir + "/" + target.Type + "/" + target.Stage + "/") + /*create directories if needed*/ + if target.Installed == true { + if target.Tainted == true { + return target, fmt.Errorf("%s is tainted, won't enable unless --force", target.Name) + } + if target.Local == true { + return target, fmt.Errorf("%s is local, won't enable", target.Name) + } + if target.UpToDate == true { + log.Debugf("%s is installed and up-to-date, skip.", target.Name) + return target, nil + } + } + if _, err := os.Stat(parent_dir); os.IsNotExist(err) { + log.Printf("%s doesn't exist, create", parent_dir) + if err := os.MkdirAll(parent_dir, os.ModePerm); err != nil { + return target, fmt.Errorf("Unable to create parent directories") + } + } + if _, err := os.Lstat(parent_dir + "/" + target.FileName); os.IsNotExist(err) { + /*install sub-items if it's a collection*/ + if target.Type == COLLECTIONS { + var tmp = [][]string{target.Parsers, target.PostOverflows, target.Scenarios, target.Collections} + for idx, ptr := range tmp { + ptrtype := ItemTypes[idx] + for _, p := range ptr { + if val, ok := HubIdx[ptrtype][p]; ok { + HubIdx[ptrtype][p], err = EnableItem(val, Installdir, Hubdir) + if err != nil { + log.Errorf("Encountered error while installing sub-item %s %s : %s.", ptrtype, p, err) + return target, fmt.Errorf("Encountered error while install %s for %s, abort.", val.Name, target.Name) + } + } else { + //log.Errorf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, target.Name) + return target, fmt.Errorf("Required %s %s of %s doesn't exist, abort.", ptrtype, p, target.Name) + } + } + } + } + //tdir+target.RemotePath + srcPath, err := filepath.Abs(hdir + "/" + target.RemotePath) + if err != nil { + return target, fmt.Errorf("failed to resolve %s : %s", hdir+"/"+target.RemotePath, err) + } + dstPath, err := filepath.Abs(parent_dir + "/" + target.FileName) + if err != nil { + return target, fmt.Errorf("failed to resolve %s : %s", parent_dir+"/"+target.FileName, err) + } + err = os.Symlink(srcPath, dstPath) + if err != nil { + log.Fatalf("Failed to symlink %s to %s : %v", srcPath, dstPath, err) + return target, fmt.Errorf("Failed to symlink %s to %s", srcPath, dstPath) + } + log.Printf("Enabled %s : %s", target.Type, target.Name) + } else { + log.Printf("%s already exists.", parent_dir+"/"+target.FileName) + return target, nil + } + target.Installed = true + return target, nil +} + +func DownloadLatest(target Item, tdir string, overwrite bool) (Item, error) { + var err error + log.Debugf("Downloading %s %s", target.Type, target.Name) + if target.Type == COLLECTIONS { + var tmp = [][]string{target.Parsers, target.PostOverflows, target.Scenarios, target.Collections} + for idx, ptr := range tmp { + ptrtype := ItemTypes[idx] + for _, p := range ptr { + if val, ok := HubIdx[ptrtype][p]; ok { + log.Debugf("Download %s sub-item : %s %s", target.Name, ptrtype, p) + //recurse as it's a collection + if ptrtype == COLLECTIONS { + log.Debugf("collection, recurse") + HubIdx[ptrtype][p], err = DownloadLatest(val, tdir, overwrite) + if err != nil { + log.Errorf("Encountered error while downloading sub-item %s %s : %s.", ptrtype, p, err) + return target, fmt.Errorf("Encountered error while downloading %s for %s, abort.", val.Name, target.Name) + } + } + HubIdx[ptrtype][p], err = DownloadItem(val, tdir, overwrite) + if err != nil { + log.Errorf("Encountered error while downloading sub-item %s %s : %s.", ptrtype, p, err) + return target, fmt.Errorf("Encountered error while downloading %s for %s, abort.", val.Name, target.Name) + } + } else { + //log.Errorf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, target.Name) + return target, fmt.Errorf("Required %s %s of %s doesn't exist, abort.", ptrtype, p, target.Name) + } + } + } + target, err = DownloadItem(target, tdir, overwrite) + } else { + return DownloadItem(target, tdir, overwrite) + } + return target, nil +} + +func DownloadItem(target Item, tdir string, overwrite bool) (Item, error) { + + /*if user didn't --force, don't overwrite local, tainted, up-to-date files*/ + if !overwrite { + if target.Tainted { + log.Debugf("%s : tainted, not updated", target.Name) + return target, nil + } + if target.UpToDate { + log.Debugf("%s : up-to-date, not updated", target.Name) + return target, nil + } + } + + //log.Infof("Downloading %s to %s", target.Name, tdir) + req, err := http.NewRequest("GET", fmt.Sprintf(RawFileURLTemplate, target.RemotePath), nil) + if err != nil { + log.Errorf("%s : request creation failed : %s", target.Name, err) + return target, err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + log.Errorf("%s : request failed : %s", target.Name, err) + return target, err + } + if resp.StatusCode != 200 { + log.Errorf("%s : non 200 response : %d", target.Name, resp.StatusCode) + return target, fmt.Errorf("bad http code") + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Errorf("%s : failed request read: %s", target.Name, err) + return target, err + } + h := sha256.New() + h.Write([]byte(body)) + meow := fmt.Sprintf("%x", h.Sum(nil)) + if meow != target.Versions[target.Version].Digest { + log.Errorf("Downloaded version doesn't match index, please 'hub update'") + return target, fmt.Errorf("invalid download hash") + } + //all good, install + //check if parent dir exists + tmpdirs := strings.Split(tdir+"/"+target.RemotePath, "/") + parent_dir := strings.Join(tmpdirs[:len(tmpdirs)-1], "/") + + /*check dir*/ + if _, err = os.Stat(parent_dir); os.IsNotExist(err) { + log.Debugf("%s doesn't exist, create", parent_dir) + if err := os.MkdirAll(parent_dir, os.ModePerm); err != nil { + return target, fmt.Errorf("Unable to create parent directories") + } + } + /*check actual file*/ + if _, err = os.Stat(tdir + "/" + target.RemotePath); !os.IsNotExist(err) { + log.Warningf("%s : overwrite", target.Name) + log.Debugf("target: %s/%s", tdir, target.RemotePath) + } else { + log.Infof("%s : OK", target.Name) + } + + f, err := os.OpenFile(tdir+"/"+target.RemotePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return target, fmt.Errorf("Failed to open destination file %s : %v", tdir+"/"+target.RemotePath, err) + } + defer f.Close() + _, err = f.WriteString(string(body)) + if err != nil { + return target, fmt.Errorf("Failed to write destination file %s : %v", tdir+"/"+target.RemotePath, err) + } + target.Downloaded = true + target.Tainted = false + target.UpToDate = true + + return target, nil +} + +//returns: human-text, Enabled, Warning, Unmanaged +func ItemStatus(v Item) (string, bool, bool, bool) { + var Ok, Warning, Managed bool + var strret string + + if v.Installed == false { + strret = "disabled" + Ok = false + } else { + Ok = true + strret = "enabled" + } + + if v.Local == true { + Managed = false + strret += ",local" + } else { + Managed = true + } + + //tainted or out of date + if v.Tainted { + Warning = true + strret += ",tainted" + } else if !v.UpToDate { + strret += ",update-available" + Warning = true + } + return strret, Ok, Warning, Managed +} + +//Returns a list of entries for packages : name, status, local_path, local_version, utf8_status (fancy) +func HubStatus(itype string, name string, list_all bool) []map[string]string { + if _, ok := HubIdx[itype]; !ok { + log.Errorf("type %s doesn't exist", itype) + return nil + } + if list_all { + log.Printf("only enabled ones") + } + + var mli []map[string]string + /*remember, you do it for the user :)*/ + for _, v := range HubIdx[itype] { + if name != "" && name != v.Name { + //user has required a specific name + continue + } + //Only enabled items ? + if !list_all && !v.Installed { + continue + } + //Check the item status + st, ok, warning, managed := ItemStatus(v) + tmp := make(map[string]string) + tmp["name"] = v.Name + tmp["status"] = st + tmp["local_version"] = v.LocalVersion + tmp["local_path"] = v.LocalPath + tmp["description"] = v.Description + if !managed || !v.Installed { + tmp["utf8_status"] = fmt.Sprintf("%v %s", emoji.Prohibited, st) + } else if warning { + tmp["utf8_status"] = fmt.Sprintf("%v %s", emoji.Warning, st) + } else if ok { + tmp["utf8_status"] = fmt.Sprintf("%v %s", emoji.CheckMark, st) + } + mli = append(mli, tmp) + } + return mli +} diff --git a/pkg/cwplugin/backend.go b/pkg/cwplugin/backend.go new file mode 100644 index 000000000..809e66503 --- /dev/null +++ b/pkg/cwplugin/backend.go @@ -0,0 +1,185 @@ +package cwplugin + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "plugin" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +// the structure returned by the function New() of the plugin must match this interface +type Backend interface { + Insert(types.SignalOccurence) error + ReadAT(time.Time) ([]map[string]string, error) + Delete(string) (int, error) + Init(map[string]string) error + Flush() error + DeleteAll() error +} + +type BackendPlugin struct { + Name string `yaml:"name"` + Path string `yaml:"path"` + ConfigFilePath string + Config map[string]string `yaml:"config"` + ID string + funcs Backend +} + +type BackendManager struct { + backendPlugins map[string]BackendPlugin +} + +func NewBackendPlugin(path string, isDaemon bool) (*BackendManager, error) { + var files []string + var backendManager = &BackendManager{} + err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error { + if filepath.Ext(path) == ".yaml" { + files = append(files, path) + } + return nil + }) + if err != nil { + panic(err) + } + + backendManager.backendPlugins = make(map[string]BackendPlugin, len(files)) + + for _, file := range files { + var newPlugin BackendPlugin + log.Debugf("opening plugin '%s'", file) + bConfig, err := ioutil.ReadFile(file) + if err != nil { + log.Errorf("unable to open file '%s' : %s, skipping", file, err) + continue + } + if err := yaml.UnmarshalStrict(bConfig, &newPlugin); err != nil { + log.Errorf("parsing '%s' yaml error : %s, skipping", file, err) + continue + } + plug, err := plugin.Open(newPlugin.Path) + if err != nil { + return nil, err + } + //Lookup a function called 'New' to get the plugin interface + symbol, err := plug.Lookup("New") + if err != nil { + return nil, fmt.Errorf("no 'New' function in plugin : %s", err) + } + symNew, ok := symbol.(func() interface{}) + if !ok { + log.Errorf("plugin '%s' do not implement a GetFunctions() that return a list of string, skipping", file) + continue + } + + // cast the return interface to Backend interface + plugNew := symNew() + bInterface, ok := plugNew.(Backend) + if !ok { + return nil, fmt.Errorf("unexpected '%s' type, skipping", newPlugin.Name) + } + + // Add the interface and Init() + newPlugin.funcs = bInterface + if isDaemon { + newPlugin.Config["flush"] = "true" + } else { + newPlugin.Config["flush"] = "false" + } + + err = newPlugin.funcs.Init(newPlugin.Config) + if err != nil { + return nil, fmt.Errorf("plugin '%s' init error : %s", newPlugin.Name, err) + } + log.Infof("backend plugin '%s' loaded", newPlugin.Name) + backendManager.backendPlugins[newPlugin.Name] = newPlugin + + } + log.Debugf("loaded %d backend plugins", len(backendManager.backendPlugins)) + if len(backendManager.backendPlugins) == 0 { + return nil, fmt.Errorf("no plugins loaded from %s", path) + } + return backendManager, nil +} + +func (b *BackendManager) Delete(target string) (int, error) { + var err error + var nbDel int + for _, plugin := range b.backendPlugins { + nbDel, err = plugin.funcs.Delete(target) + if err != nil { + return 0, fmt.Errorf("failed to delete : %s", err) + } + } + return nbDel, nil +} + +func (b *BackendManager) DeleteAll() error { + var err error + for _, plugin := range b.backendPlugins { + err = plugin.funcs.DeleteAll() + if err != nil { + return fmt.Errorf("failed to delete : %s", err) + } + } + return nil +} + +// Insert the signal for the plugin specified in the config["plugin"] parameter +func (b *BackendManager) InsertOnePlugin(sig types.SignalOccurence, pluginName string) error { + if val, ok := b.backendPlugins[pluginName]; ok { + val.funcs.Insert(sig) + } else { + return fmt.Errorf("plugin '%s' not loaded", pluginName) + } + return nil +} + +// Insert the signal for all the plugins +func (b *BackendManager) Insert(sig types.SignalOccurence) error { + var err error + for _, plugin := range b.backendPlugins { + err = plugin.funcs.Insert(sig) + if err != nil { + return fmt.Errorf("flushing backend plugin '%s' failed: %s", plugin.Name, err) + } + } + + return nil +} + +func (b *BackendManager) IsBackendPlugin(plugin string) bool { + if _, ok := b.backendPlugins[plugin]; ok { + return true + } + return false +} + +func (b *BackendManager) ReadAT(timeAT time.Time) ([]map[string]string, error) { + var ret []map[string]string + var err error + for _, plugin := range b.backendPlugins { + ret, err = plugin.funcs.ReadAT(timeAT) + if err != nil { + return nil, err + } + } + return ret, nil +} + +func (b *BackendManager) Flush() error { + var err error + for _, plugin := range b.backendPlugins { + err = plugin.funcs.Flush() + if err != nil { + return fmt.Errorf("flushing backend plugin '%s' failed: %s", plugin.Name, err) + } + } + return nil +} diff --git a/pkg/cwplugin/notification.go b/pkg/cwplugin/notification.go new file mode 100644 index 000000000..dc97f8a83 --- /dev/null +++ b/pkg/cwplugin/notification.go @@ -0,0 +1,4 @@ +package cwplugin + +type NotificationManager struct { +} diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go new file mode 100644 index 000000000..10fa70b43 --- /dev/null +++ b/pkg/cwversion/version.go @@ -0,0 +1,62 @@ +package cwversion + +import ( + "fmt" + "log" + + version "github.com/hashicorp/go-version" +) + +/* + +Given a version number MAJOR.MINOR.PATCH, increment the: + + MAJOR version when you make incompatible API changes, + MINOR version when you add functionality in a backwards compatible manner, and + PATCH version when you make backwards compatible bug fixes. + +Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format. + +*/ + +var ( + Version string // = "v0.0.0" + Codename string // = "SoumSoum" + BuildDate string // = "I don't remember exactly" + Tag string // = "dev" + GoVersion string // = "1.13" + Constraint_parser = ">= 1.0, < 2.0" + Constraint_scenario = ">= 1.0, < 2.0" + Constraint_api = "v1" + Constraint_acquis = ">= 1.0, < 2.0" +) + +func Show() { + log.Printf("version: %s-%s", Version, Tag) + log.Printf("Codename: %s", Codename) + log.Printf("BuildDate: %s", BuildDate) + log.Printf("GoVersion: %s", GoVersion) + log.Printf("Constraint_parser: %s", Constraint_parser) + log.Printf("Constraint_scenario: %s", Constraint_scenario) + log.Printf("Constraint_api: %s", Constraint_api) + log.Printf("Constraint_acquis: %s", Constraint_acquis) +} + +func VersionStr() string { + return fmt.Sprintf("%s-%s", Version, Tag) +} + +func Statisfies(strvers string, constraint string) (bool, error) { + vers, err := version.NewVersion(strvers) + if err != nil { + return false, fmt.Errorf("Failed to parse '%s' : %v", strvers, err) + } + constraints, err := version.NewConstraint(constraint) + if err != nil { + return false, fmt.Errorf("Failed to parse constraint '%s'", constraint) + } + if !constraints.Check(vers) { + return false, nil + } + return true, nil +} diff --git a/pkg/exprhelpers/exprlib.go b/pkg/exprhelpers/exprlib.go new file mode 100644 index 000000000..debb8a15d --- /dev/null +++ b/pkg/exprhelpers/exprlib.go @@ -0,0 +1,25 @@ +package exprhelpers + +import ( + "strconv" + + log "github.com/sirupsen/logrus" +) + +func Atof(x string) float64 { + log.Debugf("debug atof %s", x) + ret, err := strconv.ParseFloat(x, 64) + if err != nil { + log.Warningf("Atof : can't convert float '%s' : %v", x, err) + } + return ret +} + +func GetExprEnv(ctx map[string]interface{}) map[string]interface{} { + + var ExprLib = map[string]interface{}{"Atof": Atof} + for k, v := range ctx { + ExprLib[k] = v + } + return ExprLib +} diff --git a/pkg/leakybucket/README.md b/pkg/leakybucket/README.md new file mode 100644 index 000000000..770ec8088 --- /dev/null +++ b/pkg/leakybucket/README.md @@ -0,0 +1,142 @@ +# Leakybuckets + +## Bucket concepts + +Leakybucket is used for decision making. Under certain conditions +enriched events are poured in these buckets. When these buckets are +full, we raise a new event. After this event is raised the bucket is +destroyed. There are many types of buckets, and we welcome any new +useful design of buckets. + +Usually the bucket configuration generates the creation of many +buckets. They are differenciated by a field called stackkey. When two +events arrives with the same stackkey they go in the same matching +bucket. + +The very purpose of these buckets is to detect clients that exceed a +certain rate of attemps to do something (ssh connection, http +authentication failure, etc...). Thus, the most use stackkey field is +often the source_ip. + +## Standard leaky buckets + +Default buckets have two main configuration options: + * capacity: number of events the bucket can hold. When the capacity + is reached and a new event is poured, a new event is raised. We + call this type of event overflow. This is an int. + * leakspeed: duration needed for an event to leak. When an event + leaks, it disappear from the bucket. + +## Trigger + +It's a special type of bucket with a zero capacity. Thus, when an +event is poured in a trigger, it always raises an overflow. + +## Uniq + +It's a bucket working as the standard leaky bucket except for one +thing: a filter returns a property for each event and only one +occurence of this property is allowed in the bucket, thus the bucket +is called uniq. + +## Counter + +It's a special type of bucket with an infinite capacity and an +infinite leakspeed (it never overflows, neither leaks). Nevertheless, +the event is raised after a fixed duration. The option is called +duration. + +## Available configuration options for buckets + +### Fields for standard buckets + +* type: mandatory field. Must be one of "leaky", "trigger", "uniq" or + "counter" +* name: mandatory field, but the value is totally open. Nevertheless + this value will tag the events raised by the bucket. +* filter: mandatory field. It's a filter that is run when the decision + to make an event match the bucket or not. The filter have to return + a boolean. As a filter implementation we use + https://github.com/antonmedv/expr +* capacity: [mandatory for now, shouldn't be mandatory in the final + version] it's the size of the bucket. When pouring in a bucket + already with size events, it overflows. +* leakspeed: leakspeed is a time duration (has to be parseable by + https://golang.org/pkg/time/#ParseDuration). After each interval an + event is leaked from the bucket. +* stackkey: mandatory field. This field is used to differenciate on + which bucket ongoing events will be poured. When an unknows stackkey + is seen in an event a new bucekt is created. +* on_overflow: optional field, that tells the what to do when the + bucket is returning the overflow event. As of today, the possibility + are these: "ban,1h", "Reprocess", "Delete". + Reprocess is used to send the raised event back in the event pool to + be matched agains buckets + +### Fields for special buckets + +#### Uniq + +Uniq has an extra field uniq_filter which is too use the filter +implementation from https://github.com/antonmedv/expr. The filter must +return a string. All strins returned by this filter in the same +buckets have to be different. Thus, if a string is seen twice it is +dismissed. + +#### Trigger + +Capacity and leakspeed are not relevant for this kind of bucket. + +#### Counter + +It's a special kind of bucket that raise an event and is destroyed +after a fixed duration. The configuration field used is duration and +must be parseable by https://golang.org/pkg/time/#ParseDuration. +Nevertheless, this kind of bucket is often used with an infinite +leakspeed and an infinite capacity [capacity set to -1 for now]. + + +## Add exemples here + +``` +# ssh bruteforce +- type: leaky + name: ssh_bruteforce + filter: "Meta.log_type == 'ssh_failed-auth'" + leakspeed: "10s" + capacity: 5 + stackkey: "source_ip" + on_overflow: ban,1h + +# reporting of src_ip,dest_port seen +- type: counter + name: counter + filter: "Meta.service == 'tcp' && Event.new_connection == 'true'" + distinct: "Meta.source_ip + ':' + Meta.dest_port" + duration: 5m + capacity: -1 + +- type: trigger + name: "New connection" + filter: "Meta.service == 'tcp' && Event.new_connection == 'true'" + on_overflow: Reprocess +``` + +# Note on leakybuckets implementation + +[This is not dry enough to have many details here, but:] + +The bucket code is triggered by `InfiniBucketify` in main.go. +There's one struct called buckets which is for now a +`map[string]interface{}` that holds all buckets. The key of this map +is derivated from the filter configured for the bucket and its +stackkey. This looks like complicated, but in fact it allows us to use +only one structs. This is done in buckets.go. + +On top of that the implementation define only the standard leaky +bucket. A goroutine is launched for every buckets (bucket.go). This +goroutine manages the life of the bucket. + +For special buckets, hooks are defined at initialization time in +manager.go. Hooks are called when relevant by the bucket gorourine +when events are poured and/or when bucket overflows. \ No newline at end of file diff --git a/pkg/leakybucket/blackhole.go b/pkg/leakybucket/blackhole.go new file mode 100644 index 000000000..2c93ff5c2 --- /dev/null +++ b/pkg/leakybucket/blackhole.go @@ -0,0 +1,72 @@ +package leakybucket + +import ( + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +type HiddenKey struct { + key string + expiration time.Time +} + +type Blackhole struct { + duration time.Duration + hiddenKeys []HiddenKey + DumbProcessor +} + +func NewBlackhole(g *BucketFactory) (*Blackhole, error) { + + var duration time.Duration + if d, err := time.ParseDuration(g.Blackhole); err != nil { + g.logger.Warning("Blackhole duration not valid, using 1h") + return nil, fmt.Errorf("Blackhole duration not valid '%s'", g.Blackhole) + } else { + duration = d + } + return &Blackhole{ + duration: duration, + hiddenKeys: []HiddenKey{}, + DumbProcessor: DumbProcessor{}, + }, nil +} + +func (bl *Blackhole) OnBucketOverflow(b *BucketFactory) func(*Leaky, types.SignalOccurence, *Queue) (types.SignalOccurence, *Queue) { + return func(l *Leaky, s types.SignalOccurence, q *Queue) (types.SignalOccurence, *Queue) { + var blackholed bool = false + var tmp []HiddenKey + // search if we are blackholed and refresh the slice + for _, element := range bl.hiddenKeys { + + if element.key == l.Mapkey { + if element.expiration.After(l.Ovflw_ts) { + l.logger.Debugf("Overflow discarded, still blackholed for %s", element.expiration.Sub(l.Ovflw_ts)) + blackholed = true + } + } + + if element.expiration.After(l.Ovflw_ts) { + tmp = append(tmp, element) + } else { + l.logger.Debugf("%s left blackhole %s ago", element.key, l.Ovflw_ts.Sub(element.expiration)) + + } + } + bl.hiddenKeys = tmp + + if blackholed { + l.logger.Tracef("Event is blackholed (%s)", l.First_ts) + return types.SignalOccurence{ + MapKey: l.Mapkey, + // BucketConfiguration: bcfg, + }, nil + } + bl.hiddenKeys = append(bl.hiddenKeys, HiddenKey{l.Mapkey, l.Ovflw_ts.Add(bl.duration)}) + l.logger.Debugf("Adding overflow to blackhole (%s)", l.First_ts) + return s, q + } + +} diff --git a/pkg/leakybucket/bucket.go b/pkg/leakybucket/bucket.go new file mode 100644 index 000000000..ee1f98714 --- /dev/null +++ b/pkg/leakybucket/bucket.go @@ -0,0 +1,419 @@ +package leakybucket + +import ( + "encoding/json" + "fmt" + "net" + "strconv" + "sync/atomic" + "time" + + //"log" + "github.com/crowdsecurity/crowdsec/pkg/time/rate" + "github.com/crowdsecurity/crowdsec/pkg/types" + + //rate "time/rate" + + "github.com/davecgh/go-spew/spew" + "github.com/goombaio/namegenerator" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + //"golang.org/x/time/rate" +) + +const ( + LIVE = iota + TIMEMACHINE +) + +//the bucket itself +type Leaky struct { + //action_overflow + //OverflowAction string + //bucket actions + //Actions []string + Name string + Mode int //LIVE or TIMEMACHINE + //the limiter is what holds the proper "leaky aspect", it determines when/if we can pour objects + Limiter rate.RateLimiter `json:"-"` + SerializedState rate.Lstate + //Queue is used to held the cache of objects in the bucket, it is used to know 'how many' objects we have in buffer. + Queue *Queue + //Leaky buckets are receiving message through a chan + In chan types.Event `json:"-"` + //Leaky buckets are pushing their overflows through a chan + Out chan *Queue `json:"-"` + // shared for all buckets (the idea is to kill this afterwards) + AllOut chan types.Event `json:"-"` + KillSwitch chan bool `json:"-"` + //max capacity (for burst) + Capacity int + //CacheRatio is the number of elements that should be kept in memory (compared to capacity) + CacheSize int + //the unique identifier of the bucket (a hash) + Mapkey string + // chan for signaling + Signal chan bool `json:"-"` + Reprocess bool + Uuid string + First_ts time.Time + Last_ts time.Time + Ovflw_ts time.Time + Total_count int + Leakspeed time.Duration + BucketConfig *BucketFactory + Duration time.Duration + Pour func(*Leaky, types.Event) `json:"-"` + //Profiling when set to true enables profiling of bucket + Profiling bool + timedOverflow bool + logger *log.Entry + //as the rate-limiter is intended for http or such, we need to have a separate mechanism to track 'empty' bucket. + //we use a go-routine that use waitN to know when the bucket is empty (N would be equal to bucket capacity) + //as it try to reserves the capacity, we need to cancel it before we can pour in the bucket + //reservation *rate.Reservation +} + +var BucketsPour = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_bucket_pour", + Help: "How many time an event was poured in this bucket.", + }, + []string{"source", "name"}, +) + +var BucketsOverflow = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_bucket_overflow", + Help: "How many time this bucket overflowed.", + }, + []string{"name"}, +) + +var BucketsUnderflow = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_bucket_underflow", + Help: "How many time this bucket has underflowed.", + }, + []string{"name"}, +) + +var BucketsInstanciation = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_bucket_create", + Help: "How many time this bucket was instanciated.", + }, + []string{"name"}, +) + +func NewLeaky(g BucketFactory) *Leaky { + g.logger.Tracef("Instantiating live bucket %s", g.Name) + return FromFactory(g) +} + +// Newleaky creates a new leaky bucket from a BucketFactory +// Events created by the bucket (overflow, bucket empty) are sent to a chan defined by BucketFactory +// The leaky bucket implementation is based on rate limiter (see https://godoc.org/golang.org/x/time/rate) +// There's a trick to have an event said when the bucket gets empty to allow its destruction +func FromFactory(g BucketFactory) *Leaky { + var limiter rate.RateLimiter + //golang rate limiter. It's mainly intended for http rate limiter + Qsize := g.Capacity + if g.CacheSize > 0 { + //cache is smaller than actual capacity + if g.CacheSize <= g.Capacity { + Qsize = g.CacheSize + //bucket might be counter (infinite size), allow cache limitation + } else if g.Capacity == -1 { + Qsize = g.CacheSize + } + } + if g.Capacity == -1 { + //In this case we allow all events to pass. + //maybe in the future we could avoid using a limiter + limiter = &rate.AlwaysFull{} + } else { + limiter = rate.NewLimiter(rate.Every(g.leakspeed), g.Capacity) + } + if g.Profiling == true { + BucketsInstanciation.With(prometheus.Labels{"name": g.Name}).Inc() + } + //create the leaky bucket per se + l := &Leaky{ + Name: g.Name, + Limiter: limiter, + Uuid: namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()).Generate(), + Queue: NewQueue(Qsize), + CacheSize: g.CacheSize, + Out: make(chan *Queue, 1), + AllOut: g.ret, + Capacity: g.Capacity, + Leakspeed: g.leakspeed, + BucketConfig: &g, + Pour: Pour, + Reprocess: g.Reprocess, + Profiling: g.Profiling, + Mode: LIVE, + } + if l.BucketConfig.Capacity > 0 && l.BucketConfig.leakspeed != time.Duration(0) { + l.Duration = time.Duration(l.BucketConfig.Capacity+1) * l.BucketConfig.leakspeed + } + if l.BucketConfig.duration != time.Duration(0) { + l.Duration = l.BucketConfig.duration + l.timedOverflow = true + } + + return l +} + +var LeakyRoutineCount int64 + +/* for now mimic a leak routine */ +func LeakRoutine(l *Leaky) { + + var ( + durationTicker <-chan time.Time = make(<-chan time.Time) + ) + + /*todo : we create a logger at runtime while we want leakroutine to be up asap, might not be a good idea*/ + l.logger = l.BucketConfig.logger.WithFields(log.Fields{"capacity": l.Capacity, "partition": l.Mapkey, "bucket_id": l.Uuid}) + + l.Signal <- true + atomic.AddInt64(&LeakyRoutineCount, 1) + defer atomic.AddInt64(&LeakyRoutineCount, -1) + + for _, f := range l.BucketConfig.processors { + err := f.OnBucketInit(l.BucketConfig) + if err != nil { + l.logger.Errorf("Problem at bucket initializiation. Bail out %T : %v", f, err) + close(l.Signal) + return + } + } + + l.logger.Debugf("Leaky routine starting, lifetime : %s", l.Duration) + defer l.logger.Debugf("Leaky routine exiting") + for { + select { + /*receiving an event*/ + case msg := <-l.In: + /*the msg var use is confusing and is redeclared in a different type :/*/ + for _, f := range l.BucketConfig.processors { + msg := f.OnBucketPour(l.BucketConfig)(msg, l) + // if &msg == nil we stop processing + if msg == nil { + goto End + } + } + l.logger.Tracef("Pour event: %s", spew.Sdump(msg)) + l.logger.Debugf("Pouring event.") + + if l.Profiling == true { + BucketsPour.With(prometheus.Labels{"name": l.Name, "source": msg.Line.Src}).Inc() + } + l.Pour(l, msg) // glue for now + //Clear cache on behalf of pour + tmp := time.NewTicker(l.Duration) + durationTicker = tmp.C + l.Signal <- true + defer tmp.Stop() + /*a kill chan to allow externally killing the leaky routines*/ + case <-l.KillSwitch: + close(l.Signal) + return + /*we overflowed*/ + case ofw := <-l.Out: + close(l.Signal) + sig := FormatOverflow(l, ofw) + l.logger.Tracef("Overflow hooks time : %v", l.BucketConfig.processors) + for _, f := range l.BucketConfig.processors { + sig, ofw = f.OnBucketOverflow(l.BucketConfig)(l, sig, ofw) + if ofw == nil { + l.logger.Debugf("Overflow has been discard (%T)", f) + break + } + } + l.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: sig})) + mt, _ := l.Ovflw_ts.MarshalText() + l.logger.Tracef("overflow time : %s", mt) + if l.Profiling == true { + BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc() + } + l.AllOut <- types.Event{Overflow: sig, Type: types.OVFLW, MarshaledTime: string(mt)} + return + /*we underflow or reach bucket deadline (timers)*/ + case <-durationTicker: + l.Ovflw_ts = time.Now() + close(l.Signal) + ofw := l.Queue + sig := types.SignalOccurence{MapKey: l.Mapkey} + + if l.timedOverflow { + if l.Profiling == true { + BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc() + } + sig = FormatOverflow(l, ofw) + for _, f := range l.BucketConfig.processors { + sig, ofw = f.OnBucketOverflow(l.BucketConfig)(l, sig, ofw) + if ofw == nil { + l.logger.Debugf("Overflow has been discard (%T)", f) + break + } + } + l.logger.Infof("Timed Overflow") + } else { + l.logger.Debugf("bucket underflow, destroy") + BucketsUnderflow.With(prometheus.Labels{"name": l.Name}).Inc() + + } + l.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: sig})) + + l.AllOut <- types.Event{Overflow: sig, Type: types.OVFLW} + l.logger.Tracef("Returning from leaky routine.") + return + } + End: + } +} + +func Pour(l *Leaky, msg types.Event) { + + l.Total_count += 1 + if l.First_ts.IsZero() { + l.First_ts = time.Now() + } + l.Last_ts = time.Now() + if l.Limiter.Allow() { + l.Queue.Add(msg) + } else { + l.Ovflw_ts = time.Now() + l.logger.Debugf("Last event to be poured, bucket overflow.") + l.Queue.Add(msg) + l.Out <- l.Queue + } +} + +func FormatOverflow(l *Leaky, queue *Queue) types.SignalOccurence { + var am string + + l.logger.Debugf("Overflow (start: %s, end: %s)", l.First_ts, l.Ovflw_ts) + + sig := types.SignalOccurence{ + Scenario: l.Name, + Bucket_id: l.Uuid, + Alert_message: am, + Start_at: l.First_ts, + Stop_at: l.Ovflw_ts, + Events_count: l.Total_count, + Capacity: l.Capacity, + Reprocess: l.Reprocess, + Leak_speed: l.Leakspeed, + MapKey: l.Mapkey, + Sources: make(map[string]types.Source), + Labels: l.BucketConfig.Labels, + } + + for _, evt := range queue.Queue { + //either it's a collection of logs, or a collection of past overflows being reprocessed. + //one overflow can have multiple sources for example + if evt.Type == types.LOG { + if _, ok := evt.Meta["source_ip"]; !ok { + continue + } + source_ip := evt.Meta["source_ip"] + if _, ok := sig.Sources[source_ip]; !ok { + src := types.Source{} + src.Ip = net.ParseIP(source_ip) + if v, ok := evt.Enriched["ASNNumber"]; ok { + src.AutonomousSystemNumber = v + } + if v, ok := evt.Enriched["IsoCode"]; ok { + src.Country = v + } + if v, ok := evt.Enriched["ASNOrg"]; ok { + src.AutonomousSystemOrganization = v + } + if v, ok := evt.Enriched["Latitude"]; ok { + src.Latitude, _ = strconv.ParseFloat(v, 32) + } + if v, ok := evt.Enriched["Longitude"]; ok { + src.Longitude, _ = strconv.ParseFloat(v, 32) + } + if v, ok := evt.Meta["SourceRange"]; ok { + _, ipNet, err := net.ParseCIDR(v) + if err != nil { + l.logger.Errorf("Declared range %s of %s can't be parsed", v, src.Ip.String()) + } else if ipNet != nil { + src.Range = *ipNet + l.logger.Tracef("Valid range from %s : %s", src.Ip.String(), src.Range.String()) + } + } + sig.Sources[source_ip] = src + if sig.Source == nil { + sig.Source = &src + sig.Source_ip = src.Ip.String() + sig.Source_AutonomousSystemNumber = src.AutonomousSystemNumber + sig.Source_AutonomousSystemOrganization = src.AutonomousSystemOrganization + sig.Source_Country = src.Country + sig.Source_range = src.Range.String() + sig.Source_Latitude = src.Latitude + sig.Source_Longitude = src.Longitude + } + } + } else if evt.Type == types.OVFLW { + for _, src := range evt.Overflow.Sources { + if _, ok := sig.Sources[src.Ip.String()]; !ok { + sig.Sources[src.Ip.String()] = src + if sig.Source == nil { + l.logger.Tracef("populating overflow with source : %+v", src) + src := src //src will be reused, copy before giving pointer + sig.Source = &src + sig.Source_ip = src.Ip.String() + sig.Source_AutonomousSystemNumber = src.AutonomousSystemNumber + sig.Source_AutonomousSystemOrganization = src.AutonomousSystemOrganization + sig.Source_Country = src.Country + sig.Source_range = src.Range.String() + sig.Source_Latitude = src.Latitude + sig.Source_Longitude = src.Longitude + } + } + + } + + } + + strret, err := json.Marshal(evt.Meta) + if err != nil { + l.logger.Errorf("failed to marshal ret : %v", err) + continue + } + if sig.Source != nil { + sig.Events_sequence = append(sig.Events_sequence, types.EventSequence{ + Source: *sig.Source, + Source_ip: sig.Source_ip, + Source_AutonomousSystemNumber: sig.Source.AutonomousSystemNumber, + Source_AutonomousSystemOrganization: sig.Source.AutonomousSystemOrganization, + Source_Country: sig.Source.Country, + Serialized: string(strret), + Time: l.First_ts}) + } else { + l.logger.Warningf("Event without source ?!") + } + } + + if len(sig.Sources) > 1 { + am = fmt.Sprintf("%d IPs", len(sig.Sources)) + } else if len(sig.Sources) == 1 { + if sig.Source != nil { + am = fmt.Sprintf("%s", sig.Source.Ip.String()) + } else { + am = fmt.Sprintf("??") + } + } else { + am = "UNKNOWN" + } + + am += fmt.Sprintf(" performed '%s' (%d events over %s) at %s", l.Name, l.Total_count, l.Ovflw_ts.Sub(l.First_ts), l.Ovflw_ts) + sig.Alert_message = am + return sig +} diff --git a/pkg/leakybucket/buckets.go b/pkg/leakybucket/buckets.go new file mode 100644 index 000000000..ce25f43b2 --- /dev/null +++ b/pkg/leakybucket/buckets.go @@ -0,0 +1,25 @@ +package leakybucket + +import ( + "crypto/sha1" + "fmt" + "sync" +) + +// Buckets is the struct used to hold buckets in the context of +// main.go the idea is to have one struct to rule them all +type Buckets struct { + Bucket_map sync.Map +} + +// NewBuckets create the Buckets struct +func NewBuckets() *Buckets { + return &Buckets{ + Bucket_map: sync.Map{}, + } +} + +func GetKey(bucketCfg BucketFactory, stackkey string) string { + return fmt.Sprintf("%x", sha1.Sum([]byte(bucketCfg.Filter+stackkey+bucketCfg.Name))) + +} diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go new file mode 100644 index 000000000..368164716 --- /dev/null +++ b/pkg/leakybucket/buckets_test.go @@ -0,0 +1,264 @@ +package leakybucket + +import ( + "bytes" + "fmt" + "html/template" + "io" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + log "github.com/sirupsen/logrus" + yaml "gopkg.in/yaml.v2" +) + +type TestFile struct { + Lines []types.Event `yaml:"lines,omitempty"` + Results []types.Event `yaml:"results,omitempty"` +} + +func testBucketStates() { + //same as a scenario, but load a bucket state first ? + +} + +func TestBucket(t *testing.T) { + + var envSetting = os.Getenv("TEST_ONLY") + + if envSetting != "" { + if err := testOneBucket(t, envSetting); err != nil { + t.Fatalf("Test '%s' failed : %s", envSetting, err) + } + } else { + fds, err := ioutil.ReadDir("./tests/") + if err != nil { + t.Fatalf("Unable to read test directory : %s", err) + } + for _, fd := range fds { + fname := "./tests/" + fd.Name() + log.Infof("Running test on %s", fname) + if err := testOneBucket(t, fname); err != nil { + t.Fatalf("Test '%s' failed : %s", fname, err) + } + } + } +} + +func testOneBucket(t *testing.T, dir string) error { + + var holders []BucketFactory + + var stagefiles []byte + var stagecfg string + var stages []parser.Stagefile + var err error + + /*load the scenarios*/ + stagecfg = dir + "/scenarios.yaml" + if stagefiles, err = ioutil.ReadFile(stagecfg); err != nil { + t.Fatalf("Failed to load stage file %s : %s", stagecfg, err) + } + + tmpl, err := template.New("test").Parse(string(stagefiles)) + if err != nil { + return fmt.Errorf("failed to parse template %s : %s", stagefiles, err) + } + var out bytes.Buffer + err = tmpl.Execute(&out, map[string]string{"TestDirectory": dir}) + if err != nil { + panic(err) + } + if err := yaml.UnmarshalStrict(out.Bytes(), &stages); err != nil { + log.Fatalf("failed unmarshaling %s : %s", stagecfg, err) + } + files := []string{} + for _, x := range stages { + files = append(files, x.Filename) + } + holders, response, err := LoadBuckets(files) + if testFile(t, dir+"/test.yaml", dir+"/in-buckets_state.json", holders, response) == false { + t.Fatalf("the test failed") + } + return nil +} + +func testFile(t *testing.T, file string, bs string, holders []BucketFactory, response chan types.Event) bool { + + var results []types.Event + var buckets *Buckets + var dump bool + + buckets = NewBuckets() + //should we restore + if _, err := os.Stat(bs); err == nil { + dump = true + if err := LoadBucketsState(bs, buckets, holders); err != nil { + t.Fatalf("Failed to load bucket state : %s", err) + } + } + + /* now we can load the test files */ + //process the yaml + yamlFile, err := os.Open(file) + if err != nil { + t.Errorf("yamlFile.Get err #%v ", err) + } + dec := yaml.NewDecoder(yamlFile) + dec.SetStrict(true) + tf := TestFile{} + err = dec.Decode(&tf) + if err != nil { + if err == io.EOF { + log.Warningf("end of test file") + } else { + t.Errorf("Failed to load testfile '%s' yaml error : %v", file, err) + return false + } + } + var latest_ts time.Time + for _, in := range tf.Lines { + //just to avoid any race during ingestion of funny scenarios + time.Sleep(50 * time.Millisecond) + var ts time.Time + if err := ts.UnmarshalText([]byte(in.MarshaledTime)); err != nil { + t.Fatalf("Failed to unmarshal time from input event : %s", err) + } + if latest_ts.IsZero() { + latest_ts = ts + } else if ts.After(latest_ts) { + latest_ts = ts + } + + in.ExpectMode = TIMEMACHINE + log.Debugf("Buckets input : %s", spew.Sdump(in)) + ok, err := PourItemToHolders(in, holders, buckets) + if err != nil { + t.Fatalf("Failed to pour : %s", err) + } + if !ok { + log.Warningf("Event wasn't poured") + } + } + log.Warningf("Done pouring !") + + time.Sleep(1 * time.Second) + + //Read results from chan +POLL_AGAIN: + fails := 0 + for fails < 2 { + select { + case ret := <-response: + log.Warningf("got one result") + results = append(results, ret) + if ret.Overflow.Reprocess { + log.Debugf("Overflow being reprocessed.") + ok, err := PourItemToHolders(ret, holders, buckets) + if err != nil { + t.Fatalf("Failed to pour : %s", err) + } + if !ok { + log.Warningf("Event wasn't poured") + } + goto POLL_AGAIN + } + fails = 0 + default: + log.Warningf("no more results") + time.Sleep(1 * time.Second) + fails += 1 + } + } + log.Warningf("Got %d overflows from run", len(results)) + /* + check the results we got against the expected ones + only the keys of the expected part are checked against result + */ + + for { + if len(tf.Results) == 0 && len(results) == 0 { + log.Warningf("Test is successfull") + if dump { + if err := DumpBucketsStateAt(bs+".new", latest_ts, buckets); err != nil { + t.Fatalf("Failed dumping bucket state : %s", err) + } + } + return true + } else { + log.Warningf("%d results to check against %d expected results", len(results), len(tf.Results)) + if len(tf.Results) != len(results) { + if dump { + if err := DumpBucketsStateAt(bs+".new", latest_ts, buckets); err != nil { + t.Fatalf("Failed dumping bucket state : %s", err) + } + } + log.Errorf("results / expected count doesn't match results = %d / expected = %d", len(results), len(tf.Results)) + return false + } + } + var valid bool + checkresultsloop: + for eidx, out := range results { + for ridx, expected := range tf.Results { + + log.Debugf("Checking next expected result.") + valid = true + + log.Infof("go %s", spew.Sdump(out)) + //Scenario + if out.Overflow.Scenario != expected.Overflow.Scenario { + log.Errorf("(scenario) %s != %s", out.Overflow.Scenario, expected.Overflow.Scenario) + valid = false + continue + } else { + log.Infof("(scenario) %s == %s", out.Overflow.Scenario, expected.Overflow.Scenario) + valid = true + } + //Events_count + if out.Overflow.Events_count != expected.Overflow.Events_count { + log.Errorf("(Events_count) %d != %d", out.Overflow.Events_count, expected.Overflow.Events_count) + valid = false + continue + } else { + log.Infof("(Events_count) %d == %d", out.Overflow.Events_count, expected.Overflow.Events_count) + valid = true + } + //Source_ip + if out.Overflow.Source_ip != expected.Overflow.Source_ip { + log.Errorf("(Source_ip) %s != %s", out.Overflow.Source_ip, expected.Overflow.Source_ip) + valid = false + continue + } else { + log.Infof("(Source_ip) %s == %s", out.Overflow.Source_ip, expected.Overflow.Source_ip) + valid = true + } + + //CheckFailed: + + if valid == true { + log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx) + //don't do this at home : delete current element from list and redo + results[eidx] = results[len(results)-1] + results = results[:len(results)-1] + tf.Results[ridx] = tf.Results[len(tf.Results)-1] + tf.Results = tf.Results[:len(tf.Results)-1] + break checkresultsloop + } + } + } + if valid == false { + t.Fatalf("mismatching entries left") + } else { + log.Warningf("entry valid at end of loop") + } + } + + t.Errorf("failed test") + return false +} diff --git a/pkg/leakybucket/manager.go b/pkg/leakybucket/manager.go new file mode 100644 index 000000000..fbc87b80c --- /dev/null +++ b/pkg/leakybucket/manager.go @@ -0,0 +1,587 @@ +package leakybucket + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path/filepath" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/types" + + "github.com/davecgh/go-spew/spew" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + log "github.com/sirupsen/logrus" + + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/vm" + "github.com/goombaio/namegenerator" + yaml "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" +) + +// BucketFactory struct holds all fields for any bucket configuration. This is to have a +// generic struct for buckets. This can be seen as a bucket factory. +type BucketFactory struct { + FormatVersion string `yaml:"format"` + Author string `yaml:"author"` + Description string `yaml:"description"` + References []string `yaml:"references"` + Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics + Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique + Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity + LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket + Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time + Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct + GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip + Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on uniq_filter expr result) + Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically + Labels map[string]string `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow + Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration + logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well) + Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain + CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket + Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc. + OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through + BucketName string `yaml:"-"` + Filename string `yaml:"-"` + RunTimeFilter *vm.Program `json:"-"` + RunTimeGroupBy *vm.Program `json:"-"` + leakspeed time.Duration //internal representation of `Leakspeed` + duration time.Duration //internal representation of `Duration` + ret chan types.Event //the bucket-specific output chan for overflows + processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.) + output bool //?? +} + +func ValidateFactory(b *BucketFactory) error { + if b.Name == "" { + return fmt.Errorf("A bucket must have name") + } + if b.Description == "" { + return fmt.Errorf("Description is mandatory") + } + if b.Type == "leaky" { + if b.Capacity <= 0 { //capacity must be a positive int + return fmt.Errorf("Bad capacity for leaky '%d'", b.Capacity) + } + if b.LeakSpeed == "" { + return fmt.Errorf("leakspeed can't be empty for leaky") + } + if b.leakspeed == 0 { + return fmt.Errorf("Bad leakspeed for leaky '%s'", b.LeakSpeed) + } + } else if b.Type == "counter" { + if b.Duration == "" { + return fmt.Errorf("Duration ca't be empty for counter") + } + if b.duration == 0 { + return fmt.Errorf("Bad duration for counter bucket '%d'", b.duration) + } + if b.Capacity != -1 { + return fmt.Errorf("Counter bucket must have -1 capacity") + } + } else if b.Type == "trigger" { + if b.Capacity != 0 { + return fmt.Errorf("Trigger bucket must have 0 capacity") + } + } else { + return fmt.Errorf("Unknown bucket type '%s'", b.Type) + } + return nil +} + +/* Init recursively process yaml files from a directory and loads them as BucketFactory */ +func Init(cfg map[string]string) ([]BucketFactory, chan types.Event, error) { + return LoadBucketDir(cfg["patterns"]) +} + +func LoadBuckets(files []string) ([]BucketFactory, chan types.Event, error) { + var ( + ret []BucketFactory = []BucketFactory{} + response chan types.Event + ) + + var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()) + response = make(chan types.Event, 1) + for _, f := range files { + log.Debugf("Loading %s", f) + if !strings.HasSuffix(f, ".yaml") { + log.Debugf("Skipping %s : not a yaml file", f) + continue + } + + //process the yaml + bucketConfigurationFile, err := os.Open(f) + if err != nil { + log.Errorf("Can't access leaky configuration file %s", f) + return nil, nil, err + } + dec := yaml.NewDecoder(bucketConfigurationFile) + dec.SetStrict(true) + for { + g := BucketFactory{} + err = dec.Decode(&g) + if err != nil { + if err == io.EOF { + log.Tracef("End of yaml file") + break + } else { + log.Errorf("Bad yaml in %s : %v", f, err) + return nil, nil, fmt.Errorf("Bad yaml in %s : %v", f, err) + } + } + //check empty + if g.Name == "" { + log.Errorf("Won't load nameless bucket") + return nil, nil, fmt.Errorf("Nameless bucket") + } + //check compat + if g.FormatVersion == "" { + log.Warningf("no version in %s : %s, assuming '1.0'", g.Name, f) + g.FormatVersion = "1.0" + } + ok, err := cwversion.Statisfies(g.FormatVersion, cwversion.Constraint_scenario) + if err != nil { + log.Fatalf("Failed to check version : %s", err) + } + if !ok { + log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", g.Name, g.FormatVersion, cwversion.Constraint_scenario) + continue + } + g.Filename = filepath.Clean(f) + g.BucketName = seed.Generate() + g.ret = response + err = LoadBucket(&g) + if err != nil { + log.Errorf("Failed to load bucket : %v", err) + return nil, nil, fmt.Errorf("LoadBucket failed : %v", err) + } + ret = append(ret, g) + } + } + log.Warningf("Loaded %d scenarios", len(ret)) + return ret, response, nil +} + +func LoadBucketDir(dir string) ([]BucketFactory, chan types.Event, error) { + var ( + filenames []string + ) + files, err := ioutil.ReadDir(dir) + if err != nil { + return nil, nil, err + } + for _, f := range files { + filenames = append(filenames, dir+f.Name()) + } + return LoadBuckets(filenames) +} + +/* Init recursively process yaml files from a directory and loads them as BucketFactory */ +func LoadBucket(g *BucketFactory) error { + var err error + if g.Debug == true { + var clog = logrus.New() + clog.SetFormatter(&log.TextFormatter{FullTimestamp: true}) + clog.SetLevel(log.DebugLevel) + g.logger = clog.WithFields(log.Fields{ + "cfg": g.BucketName, + "name": g.Name, + "file": g.Filename, + }) + } else { + /* else bind it to the default one (might find something more elegant here)*/ + g.logger = log.WithFields(log.Fields{ + "cfg": g.BucketName, + "name": g.Name, + "file": g.Filename, + }) + } + + if g.LeakSpeed != "" { + if g.leakspeed, err = time.ParseDuration(g.LeakSpeed); err != nil { + return fmt.Errorf("Bad leakspeed '%s' in %s : %v", g.LeakSpeed, g.Filename, err) + } + } else { + g.leakspeed = time.Duration(0) + } + if g.Duration != "" { + if g.duration, err = time.ParseDuration(g.Duration); err != nil { + return fmt.Errorf("Invalid Duration '%s' in %s : %v", g.Duration, g.Filename, err) + } + } + + if g.Filter == "" { + g.logger.Warningf("Bucket without filter, abort.") + return fmt.Errorf("Bucket without filter directive.") + } + g.RunTimeFilter, err = expr.Compile(g.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + return fmt.Errorf("Invalid filter '%s' in %s : %v", g.Filter, g.Filename, err) + } + + if g.GroupBy != "" { + g.RunTimeGroupBy, err = expr.Compile(g.GroupBy, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + return fmt.Errorf("Invalid groupby '%s' in %s : %v", g.GroupBy, g.Filename, err) + } + } + + g.logger.Infof("Adding %s bucket", g.Type) + //return the Holder correponding to the type of bucket + g.processors = []Processor{} + switch g.Type { + case "leaky": + g.processors = append(g.processors, &DumbProcessor{}) + case "trigger": + g.processors = append(g.processors, &Trigger{}) + case "counter": + g.processors = append(g.processors, &DumbProcessor{}) + default: + return fmt.Errorf("Invalid type '%s' in %s : %v", g.Type, g.Filename, err) + } + + if g.Distinct != "" { + g.logger.Debugf("Adding a non duplicate filter on %s.", g.Name) + g.processors = append(g.processors, &Uniq{}) + } + + if g.OverflowFilter != "" { + g.logger.Debugf("Adding an overflow filter") + filovflw, err := NewOverflowFilter(g) + if err != nil { + g.logger.Errorf("Error creating overflow_filter : %s", err) + return fmt.Errorf("Error creating overflow_filter : %s", err) + } + g.processors = append(g.processors, filovflw) + } + + if g.Blackhole != "" { + g.logger.Debugf("Adding blackhole.") + blackhole, err := NewBlackhole(g) + if err != nil { + g.logger.Errorf("Error creating blackhole : %s", err) + return fmt.Errorf("Error creating blackhole : %s", err) + } + g.processors = append(g.processors, blackhole) + } + + g.output = false + if err := ValidateFactory(g); err != nil { + return fmt.Errorf("Invalid bucket from %s : %v", g.Filename, err) + } + return nil + +} + +func LoadBucketsState(file string, buckets *Buckets, holders []BucketFactory) error { + var state map[string]Leaky + body, err := ioutil.ReadFile(file) + if err != nil { + return fmt.Errorf("can't state file %s : %s", file, err) + } + if err := json.Unmarshal(body, &state); err != nil { + return fmt.Errorf("can't unmarshal state file %s : %s", file, err) + } + for k, v := range state { + var tbucket *Leaky + log.Debugf("Reloading bucket %s", k) + val, ok := buckets.Bucket_map.Load(k) + if ok { + log.Fatalf("key %s already exists : %+v", k, val) + } + //find back our holder + found := false + for _, h := range holders { + if h.Name == v.Name { + log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) + //check in which mode the bucket was + if v.Mode == TIMEMACHINE { + tbucket = NewTimeMachine(h) + } else if v.Mode == LIVE { + tbucket = NewLeaky(h) + } else { + log.Errorf("Unknown bucket type : %d", v.Mode) + } + /*Trying to restore queue state*/ + tbucket.Queue = v.Queue + /*Trying to set the limiter to the saved values*/ + tbucket.Limiter.Load(v.SerializedState) + tbucket.In = make(chan types.Event) + tbucket.Mapkey = k + tbucket.Signal = make(chan bool, 1) + tbucket.KillSwitch = make(chan bool, 1) + tbucket.First_ts = v.First_ts + tbucket.Last_ts = v.Last_ts + tbucket.Ovflw_ts = v.Ovflw_ts + tbucket.Total_count = v.Total_count + buckets.Bucket_map.Store(k, tbucket) + go LeakRoutine(tbucket) + _ = <-tbucket.Signal + found = true + break + } + } + if found == false { + log.Fatalf("Unable to find holder for bucket %s : %s", k, spew.Sdump(v)) + } + } + + log.Infof("Restored %d buckets from dump", len(state)) + return nil + +} + +var serialized map[string]Leaky + +/*The leaky routines lifecycle are based on "real" time. +But when we are running in time-machine mode, the reference time is in logs and not "real" time. +Thus we need to garbage collect them to avoid a skyrocketing memory usage.*/ +func GarbageCollectBuckets(deadline time.Time, buckets *Buckets) error { + total := 0 + discard := 0 + toflush := []string{} + buckets.Bucket_map.Range(func(rkey, rvalue interface{}) bool { + key := rkey.(string) + val := rvalue.(*Leaky) + total += 1 + if !val.Ovflw_ts.IsZero() { + discard += 1 + val.logger.Debugf("overflowed at %s.", val.Ovflw_ts) + toflush = append(toflush, key) + val.KillSwitch <- true + return true + } + /*FIXME : sometimes the gettokenscountat has some rounding issues when we try to + match it with bucket capacity, even if the bucket has long due underflow. Round to 2 decimals*/ + tokat := val.Limiter.GetTokensCountAt(deadline) + tokcapa := float64(val.Capacity) + tokat = math.Round(tokat*100) / 100 + tokcapa = math.Round(tokcapa*100) / 100 + if tokat >= tokcapa { + BucketsUnderflow.With(prometheus.Labels{"name": val.Name}).Inc() + val.logger.Debugf("UNDERFLOW : first_ts:%s tokens_at:%f capcity:%f", val.First_ts, tokat, tokcapa) + toflush = append(toflush, key) + val.KillSwitch <- true + return true + } else { + val.logger.Debugf("(%s) not dead, count:%f capacity:%f", val.First_ts, tokat, tokcapa) + } + if _, ok := serialized[key]; ok { + log.Errorf("entry %s already exists", key) + return false + } else { + log.Debugf("serialize %s of %s : %s", val.Name, val.Uuid, val.Mapkey) + } + return true + }) + log.Infof("Cleaned %d buckets", len(toflush)) + for _, flushkey := range toflush { + buckets.Bucket_map.Delete(flushkey) + } + return nil +} + +func DumpBucketsStateAt(file string, deadline time.Time, buckets *Buckets) error { + serialized = make(map[string]Leaky) + log.Printf("Dumping buckets state at %s", deadline) + total := 0 + discard := 0 + buckets.Bucket_map.Range(func(rkey, rvalue interface{}) bool { + key := rkey.(string) + val := rvalue.(*Leaky) + total += 1 + if !val.Ovflw_ts.IsZero() { + discard += 1 + val.logger.Debugf("overflowed at %s.", val.Ovflw_ts) + return true + } + /*FIXME : sometimes the gettokenscountat has some rounding issues when we try to + match it with bucket capacity, even if the bucket has long due underflow. Round to 2 decimals*/ + tokat := val.Limiter.GetTokensCountAt(deadline) + tokcapa := float64(val.Capacity) + tokat = math.Round(tokat*100) / 100 + tokcapa = math.Round(tokcapa*100) / 100 + + if tokat >= tokcapa { + BucketsUnderflow.With(prometheus.Labels{"name": val.Name}).Inc() + val.logger.Debugf("UNDERFLOW : first_ts:%s tokens_at:%f capcity:%f", val.First_ts, tokat, tokcapa) + discard += 1 + return true + } else { + val.logger.Debugf("(%s) not dead, count:%f capacity:%f", val.First_ts, tokat, tokcapa) + } + if _, ok := serialized[key]; ok { + log.Errorf("entry %s already exists", key) + return false + } else { + log.Debugf("serialize %s of %s : %s", val.Name, val.Uuid, val.Mapkey) + } + val.SerializedState = val.Limiter.Dump() + serialized[key] = *val + return true + }) + bbuckets, err := json.MarshalIndent(serialized, "", " ") + if err != nil { + log.Fatalf("Failed to unmarshal buckets : %s", err) + } + err = ioutil.WriteFile(file, bbuckets, 0644) + if err != nil { + log.Fatalf("Failed to write buckets state %s", err) + } + log.Warningf("Serialized %d live buckets state, %d total with %d expired to %s", len(serialized), total, discard, file) + return nil +} + +func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buckets) (bool, error) { + var ( + ok, condition, sent bool + err error + ) + + for idx, holder := range holders { + + if holder.RunTimeFilter != nil { + log.Debugf("event against holder %d/%d", idx, len(holders)) + output, err := expr.Run(holder.RunTimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &parsed})) + if err != nil { + holder.logger.Errorf("failed parsing : %v", err) + return false, fmt.Errorf("leaky failed : %s", err) + } + // we assume we a bool should add type check here + if condition, ok = output.(bool); !ok { + holder.logger.Errorf("unexpected non-bool return : %T", output) + log.Fatalf("Filter issue") + } + if !condition { + holder.logger.Debugf("eval(FALSE) %s", holder.Filter) + //log.Debugf("%s -> FALSE", holder.Filter) + //holder.logger.Debugf("Filter eval failed") + continue + } else { + holder.logger.Debugf("eval(TRUE) %s", holder.Filter) + } + } + + sent = false + var groupby string + if holder.RunTimeGroupBy != nil { + tmpGroupBy, err := expr.Run(holder.RunTimeGroupBy, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &parsed})) + if err != nil { + log.Errorf("failed groupby : %v", err) + return false, errors.New("leaky failed :/") + } + + if groupby, ok = tmpGroupBy.(string); !ok { + log.Fatalf("failed groupby type : %v", err) + return false, errors.New("groupby wrong type") + } + } + buckey := GetKey(holder, groupby) + + sigclosed := 0 + keymiss := 0 + failed_sent := 0 + attempts := 0 + start := time.Now() + for !sent { + attempts += 1 + /* Warn the user if we used more than a 100 ms to pour an event, it's at least an half lock*/ + if attempts%100000 == 0 && start.Add(100*time.Millisecond).Before(time.Now()) { + log.Warningf("stuck for %s sending event to %s (sigclosed:%d keymiss:%d failed_sent:%d attempts:%d)", time.Since(start), + buckey, sigclosed, keymiss, failed_sent, attempts) + } + biface, ok := buckets.Bucket_map.Load(buckey) + //biface, bigout + /* the bucket doesn't exist, create it !*/ + if !ok { + /* + not found in map + */ + + log.Debugf("Creating bucket %s", buckey) + keymiss += 1 + var fresh_bucket *Leaky + + switch parsed.ExpectMode { + case TIMEMACHINE: + fresh_bucket = NewTimeMachine(holder) + holder.logger.Debugf("Creating TimeMachine bucket") + case LIVE: + fresh_bucket = NewLeaky(holder) + holder.logger.Debugf("Creating Live bucket") + default: + log.Fatalf("input event has no expected mode, malformed : %+v", parsed) + } + fresh_bucket.In = make(chan types.Event) + fresh_bucket.Mapkey = buckey + fresh_bucket.Signal = make(chan bool, 1) + fresh_bucket.KillSwitch = make(chan bool, 1) + buckets.Bucket_map.Store(buckey, fresh_bucket) + go LeakRoutine(fresh_bucket) + log.Debugf("Created new bucket %s", buckey) + //wait for signal to be opened + _ = <-fresh_bucket.Signal + continue + } + + bucket := biface.(*Leaky) + /* check if leak routine is up */ + select { + case _, ok := <-bucket.Signal: + if !ok { + //it's closed, delete it + bucket.logger.Debugf("Bucket %s found dead, cleanup the body", buckey) + buckets.Bucket_map.Delete(buckey) + sigclosed += 1 + continue + } + log.Debugf("Signal exists, try to pour :)") + + default: + /*nothing to read, but not closed, try to pour */ + log.Debugf("Signal exists but empty, try to pour :)") + + } + /*let's see if this time-bucket should have expired */ + if bucket.Mode == TIMEMACHINE && !bucket.First_ts.IsZero() { + var d time.Time + err = d.UnmarshalText([]byte(parsed.MarshaledTime)) + if err != nil { + log.Warningf("Failed unmarshaling event time (%s) : %v", parsed.MarshaledTime, err) + } + if d.After(bucket.Last_ts.Add(bucket.Duration)) { + bucket.logger.Debugf("bucket is expired (curr event: %s, bucket deadline: %s), kill", d, bucket.Last_ts.Add(bucket.Duration)) + buckets.Bucket_map.Delete(buckey) + continue + } + } + /*if we're here, let's try to pour */ + + select { + case bucket.In <- parsed: + log.Debugf("Successfully sent !") + //sent was successful ! + sent = true + continue + default: + failed_sent += 1 + log.Debugf("Failed to send, try again") + continue + + } + } + + log.Debugf("bucket '%s' is poured", holder.Name) + } + return sent, nil +} diff --git a/pkg/leakybucket/overflow_filter.go b/pkg/leakybucket/overflow_filter.go new file mode 100644 index 000000000..6525eecca --- /dev/null +++ b/pkg/leakybucket/overflow_filter.go @@ -0,0 +1,64 @@ +package leakybucket + +import ( + "fmt" + + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/vm" + + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// Uniq creates three new functions that share the same initialisation and the same scope. +// They are triggered respectively: +// on pour +// on overflow +// on leak + +type OverflowFilter struct { + Filter string + FilterRuntime *vm.Program + DumbProcessor +} + +func NewOverflowFilter(g *BucketFactory) (*OverflowFilter, error) { + var err error + + u := OverflowFilter{} + u.Filter = g.OverflowFilter + u.FilterRuntime, err = expr.Compile(u.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{ + "queue": &Queue{}, "signal": &types.SignalOccurence{}, "leaky": &Leaky{}}))) + if err != nil { + g.logger.Errorf("Unable to compile filter : %v", err) + return nil, fmt.Errorf("Unable to compile filter : %v", err) + } + return &u, nil +} + +func (u *OverflowFilter) OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.SignalOccurence, *Queue) (types.SignalOccurence, *Queue) { + return func(l *Leaky, s types.SignalOccurence, q *Queue) (types.SignalOccurence, *Queue) { + el, err := expr.Run(u.FilterRuntime, exprhelpers.GetExprEnv(map[string]interface{}{ + "queue": q, "signal": s, "leaky": l})) + if err != nil { + l.logger.Errorf("Failed running overflow filter: %s", err) + return s, q + } + element, ok := el.(bool) + if !ok { + l.logger.Errorf("Overflow filter didn't return bool: %s", err) + return s, q + } + /*filter returned false, event is blackholded*/ + if element == false { + l.logger.Infof("Event is discard by overflow filter (%s)", u.Filter) + return types.SignalOccurence{ + MapKey: l.Mapkey, + // BucketConfiguration: bcfg, + }, nil + } else { + l.logger.Debugf("Event is not discard by overflow filter (%s)", u.Filter) + } + return s, q + } +} diff --git a/pkg/leakybucket/processor.go b/pkg/leakybucket/processor.go new file mode 100644 index 000000000..16f7ac800 --- /dev/null +++ b/pkg/leakybucket/processor.go @@ -0,0 +1,29 @@ +package leakybucket + +import "github.com/crowdsecurity/crowdsec/pkg/types" + +type Processor interface { + OnBucketInit(Bucket *BucketFactory) error + OnBucketPour(Bucket *BucketFactory) func(types.Event, *Leaky) *types.Event + OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.SignalOccurence, *Queue) (types.SignalOccurence, *Queue) +} + +type DumbProcessor struct { +} + +func (d *DumbProcessor) OnBucketInit(b *BucketFactory) error { + return nil +} + +func (d *DumbProcessor) OnBucketPour(b *BucketFactory) func(types.Event, *Leaky) *types.Event { + return func(msg types.Event, l *Leaky) *types.Event { + return &msg + } +} + +func (d *DumbProcessor) OnBucketOverflow(b *BucketFactory) func(*Leaky, types.SignalOccurence, *Queue) (types.SignalOccurence, *Queue) { + return func(l *Leaky, s types.SignalOccurence, q *Queue) (types.SignalOccurence, *Queue) { + return s, q + } + +} diff --git a/pkg/leakybucket/queue.go b/pkg/leakybucket/queue.go new file mode 100644 index 000000000..9bbe04462 --- /dev/null +++ b/pkg/leakybucket/queue.go @@ -0,0 +1,76 @@ +package leakybucket + +import ( + "reflect" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +//A very simple queue mechanism to hold track of the objects in the bucket + +// Queue is a simple struct that holds a limited size queue +type Queue struct { + Queue []types.Event + L int //capacity +} + +// NewQueue create a new queue with a size of l +func NewQueue(l int) *Queue { + if l == -1 { + return &Queue{ + Queue: make([]types.Event, 0), + L: int(^uint(0) >> 1), // max integer value, architecture independent + } + } + q := &Queue{ + Queue: make([]types.Event, 0, l), + L: l, + } + log.WithFields(log.Fields{"Capacity": q.L}).Debugf("Creating queue") + return q +} + +// Add an event in the queue. If it has already l elements, the first +// element is dropped before adding the new m element +func (q *Queue) Add(m types.Event) { + for len(q.Queue) > q.L { //we allow to add one element more than the true capacity + q.Queue = q.Queue[1:] + } + q.Queue = append(q.Queue, m) +} + +//Remove removes and return the last element of the queue +func (q *Queue) Remove() *types.Event { + if len(q.Queue) > 0 { + var dropped types.Event = q.Queue[0] + q.Queue = q.Queue[1:] + return &dropped + } + return nil +} + +// GetQueue returns the entire queue +func (q *Queue) GetQueue() []types.Event { + return q.Queue +} + +// In test if evt is in the queue +func (q *Queue) In(evt types.Event) bool { + for _, element := range q.Queue { + if reflect.DeepEqual(element, evt) { + return true + } + } + return false +} + +// Len gives de the Len of queue +func (q *Queue) Len() int { + return len(q.Queue) +} + +// Size gives de the Size of queue +func (q *Queue) Size() int { + return q.L +} diff --git a/pkg/leakybucket/tests/simple-leaky-blackhole/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-blackhole/bucket.yaml new file mode 100644 index 000000000..ed199b2a0 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-blackhole/bucket.yaml @@ -0,0 +1,13 @@ +# ssh bruteforce +type: leaky +debug: true +name: test/simple-leaky +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 1 +blackhole: 1m +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-leaky-blackhole/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-blackhole/scenarios.yaml new file mode 100644 index 000000000..f45f7be12 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-blackhole/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-blackhole/test.yaml b/pkg/leakybucket/tests/simple-leaky-blackhole/test.yaml new file mode 100644 index 000000000..8d897e997 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-blackhole/test.yaml @@ -0,0 +1,66 @@ +lines: +#the first two will trigger overflow + - Line: + Labels: + type: testlog + Raw: xxheader VALUE1 trailing stuff + MarshaledTime: 2020-01-01T10:00:00Z + Meta: + source_ip: 1.2.3.4 + entry: 1 + - Line: + Labels: + type: testlog + Raw: xxheader VALUE2 trailing stuff + MarshaledTime: 2020-01-01T10:00:04Z + Meta: + source_ip: 1.2.3.4 + entry: 2 +#the next overflow will be blackholed + - Line: + Labels: + type: testlog + Raw: xxheader VALUE3 trailing stuff + MarshaledTime: 2020-01-01T10:00:15Z + Meta: + source_ip: 1.2.3.4 + entry: 3 + - Line: + Labels: + type: testlog + Raw: xxheader VALUE4 trailing stuff + MarshaledTime: 2020-01-01T10:00:16Z + Meta: + source_ip: 1.2.3.4 + entry: 4 +#but this one won't + - Line: + Labels: + type: testlog + Raw: xxheader VALUE5 trailing stuff + MarshaledTime: 2020-01-01T10:01:15Z + Meta: + source_ip: 1.2.3.4 + entry: 5 + - Line: + Labels: + type: testlog + Raw: xxheader VALUE6 trailing stuff + MarshaledTime: 2020-01-01T10:01:16Z + Meta: + source_ip: 1.2.3.4 + entry: 6 +results: +#the first overflow + - Overflow: + scenario: test/simple-leaky + Source_ip: 1.2.3.4 + Events_count: 2 +#the blackholed one + - Overflow: + scenario: +#the second one + - Overflow: + scenario: test/simple-leaky + Source_ip: 1.2.3.4 + Events_count: 2 diff --git a/pkg/leakybucket/tests/simple-leaky-overflow/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-overflow/bucket.yaml new file mode 100644 index 000000000..8c94d2c2e --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-overflow/bucket.yaml @@ -0,0 +1,11 @@ +type: leaky +debug: true +name: test/simple-leaky +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 1 +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-leaky-overflow/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-overflow/scenarios.yaml new file mode 100644 index 000000000..f45f7be12 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-overflow/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-overflow/test.yaml b/pkg/leakybucket/tests/simple-leaky-overflow/test.yaml new file mode 100644 index 000000000..7fc1bb83b --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-overflow/test.yaml @@ -0,0 +1,23 @@ +#this one will trigger a simple overflow +lines: + - Line: + Labels: + type: testlog + Raw: xxheader VALUE1 trailing stuff + MarshaledTime: 2020-01-01T10:00:00Z + Meta: + source_ip: 1.2.3.4 + - Line: + Labels: + type: testlog + Raw: xxheader VALUE2 trailing stuff + MarshaledTime: 2020-01-01T10:00:05Z + Meta: + source_ip: 1.2.3.4 +results: + - Overflow: + scenario: test/simple-leaky + Source_ip: 1.2.3.4 + Events_count: 2 + + diff --git a/pkg/leakybucket/tests/simple-leaky-ovflwfilter/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/bucket.yaml new file mode 100644 index 000000000..f340c787f --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/bucket.yaml @@ -0,0 +1,27 @@ +# ssh bruteforce +type: leaky +debug: true +name: test/filter-discard +description: "ko" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 1 +overflow_filter: any(queue.Queue, { Atof(.Meta.specvalue) > 3}) +#overflow_filter: Atof() +groupby: evt.Meta.source_ip +labels: + type: overflow_1 +--- +# ssh bruteforce +type: leaky +debug: true +name: test/filter-ok +description: "ok" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 1 +overflow_filter: any(queue.Queue, { Atof(.Meta.specvalue) > 1}) +#overflow_filter: Atof() +groupby: evt.Meta.source_ip +labels: + type: overflow_2 diff --git a/pkg/leakybucket/tests/simple-leaky-ovflwfilter/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/scenarios.yaml new file mode 100644 index 000000000..f45f7be12 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-ovflwfilter/test.yaml b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/test.yaml new file mode 100644 index 000000000..eee84b4dc --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/test.yaml @@ -0,0 +1,28 @@ +#this one won't due to leakspeed / delay +lines: + - Line: + Labels: + type: testlog + Raw: xxheader VALUE1 trailing stuff + MarshaledTime: 2020-01-01T10:00:00Z + Meta: + source_ip: 1.2.3.4 + specvalue: 1 + - Line: + Labels: + type: testlog + Raw: xxheader VALUE2 trailing stuff + MarshaledTime: 2020-01-01T10:00:01Z + Meta: + source_ip: 1.2.3.4 + specvalue: 2 +results: + - Overflow: + scenario: + - Overflow: + scenario: test/filter-ok + Events_count: 2 + Source_ip: 1.2.3.4 + + + diff --git a/pkg/leakybucket/tests/simple-leaky-underflow/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-underflow/bucket.yaml new file mode 100644 index 000000000..e02e1c0a9 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-underflow/bucket.yaml @@ -0,0 +1,12 @@ +# ssh bruteforce +type: leaky +debug: true +name: test/simple-leaky +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 1 +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-leaky-underflow/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-underflow/scenarios.yaml new file mode 100644 index 000000000..f45f7be12 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-underflow/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-underflow/test.yaml b/pkg/leakybucket/tests/simple-leaky-underflow/test.yaml new file mode 100644 index 000000000..2c12f79aa --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-underflow/test.yaml @@ -0,0 +1,20 @@ +#this one won't due to leakspeed / delay +lines: + - Line: + Labels: + type: testlog + Raw: xxheader VALUE1 trailing stuff + MarshaledTime: 2020-01-01T10:00:00Z + Meta: + source_ip: 1.2.3.4 + - Line: + Labels: + type: testlog + Raw: xxheader VALUE2 trailing stuff + MarshaledTime: 2020-01-01T10:00:10Z + Meta: + source_ip: 1.2.3.4 +results: + + + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/bucket.yaml new file mode 100644 index 000000000..4fca336f9 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/bucket.yaml @@ -0,0 +1,13 @@ +# ssh bruteforce +type: leaky +debug: true +name: test/simple-leaky +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 3 +distinct: evt.Meta.uniq_key +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/in-buckets_state.json b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/in-buckets_state.json new file mode 100644 index 000000000..88b0b645d --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/in-buckets_state.json @@ -0,0 +1,113 @@ +{ + "cdf58e6ae48e79ac3ae0f006e1a2e627eccd8b63": { + "Name": "test/simple-leaky", + "Mode": 1, + "SerializedState": { + "Limit": 0.1, + "Burst": 3, + "Tokens": 1.1, + "Last": "2020-01-01T10:00:05Z", + "LastEvent": "2020-01-01T10:00:05Z" + }, + "Queue": { + "Queue": [ + { + "Type": 0, + "ExpectMode": 1, + "Whitelisted": false, + "Stage": "", + "Overflow": { + "MapKey": "", + "start_at": "0001-01-01T00:00:00Z", + "stop_at": "0001-01-01T00:00:00Z", + "source": null, + "Source_ip": "", + "Source_range": "", + "Source_AutonomousSystemNumber": "", + "Source_AutonomousSystemOrganization": "", + "Source_Country": "", + "Source_Latitude": 0, + "Source_Longitude": 0, + "Reprocess": false, + "Labels": null + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "2020-01-01T10:00:04Z", + "Process": false, + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + } + }, + { + "Type": 0, + "ExpectMode": 1, + "Whitelisted": false, + "Stage": "", + "Overflow": { + "MapKey": "", + "start_at": "0001-01-01T00:00:00Z", + "stop_at": "0001-01-01T00:00:00Z", + "source": null, + "Source_ip": "", + "Source_range": "", + "Source_AutonomousSystemNumber": "", + "Source_AutonomousSystemOrganization": "", + "Source_Country": "", + "Source_Latitude": 0, + "Source_Longitude": 0, + "Reprocess": false, + "Labels": null + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "2020-01-01T10:00:05Z", + "Process": false, + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aab" + } + } + ], + "L": 3 + }, + "Capacity": 3, + "CacheSize": 0, + "Mapkey": "cdf58e6ae48e79ac3ae0f006e1a2e627eccd8b63", + "Reprocess": false, + "Uuid": "dark-bush", + "First_ts": "2020-01-01T10:00:04Z", + "Last_ts": "2020-01-01T10:00:05Z", + "Ovflw_ts": "0001-01-01T00:00:00Z", + "Total_count": 2, + "Leakspeed": 10000000000, + "BucketConfig": { + "FormatVersion": "1.0", + "Author": "", + "Description": "Simple leaky", + "References": null, + "Type": "leaky", + "Name": "test/simple-leaky", + "Capacity": 3, + "LeakSpeed": "10s", + "Duration": "", + "Filter": "evt.Line.Labels.type =='testlog'", + "GroupBy": "evt.Meta.source_ip", + "Distinct": "evt.Meta.uniq_key", + "Debug": true, + "Labels": { + "type": "overflow_1" + }, + "Blackhole": "", + "Reprocess": false, + "CacheSize": 0, + "Profiling": false, + "OverflowFilter": "", + "BucketName": "lingering-river", + "Filename": "/home/bui/github/crowdsec/crowdsec/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/bucket.yaml" + }, + "Duration": 40000000000, + "Profiling": false + } +} \ No newline at end of file diff --git a/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/scenarios.yaml new file mode 100644 index 000000000..f45f7be12 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/test.yaml b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/test.yaml new file mode 100644 index 000000000..565f06c48 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/test.yaml @@ -0,0 +1,35 @@ +#this one won't due to leakspeed / delay +lines: + - Line: + Labels: + type: testlog + Raw: xxheader VALUE3 trailing stuff + MarshaledTime: 2020-01-01T10:00:06Z + Meta: + source_ip: 1.2.3.4 + uniq_key: baa +#this one will be discarded because of uniq + - Line: + Labels: + type: testlog + Raw: xxheader VALUE3 trailing stuff + MarshaledTime: 2020-01-01T10:00:07Z + Meta: + source_ip: 1.2.3.4 + uniq_key: baa +#not this one + - Line: + Labels: + type: testlog + Raw: xxheader VALUE4 trailing stuff + MarshaledTime: 2020-01-01T10:00:08Z + Meta: + source_ip: 1.2.3.4 + uniq_key: bab +results: + - Overflow: + scenario: test/simple-leaky + Source_ip: 1.2.3.4 + Events_count: 4 + + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-uniq/bucket.yaml new file mode 100644 index 000000000..b9692e10e --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq/bucket.yaml @@ -0,0 +1,13 @@ +# ssh bruteforce +type: leaky +debug: true +name: test/simple-leaky +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 1 +distinct: evt.Meta.uniq_key +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-uniq/scenarios.yaml new file mode 100644 index 000000000..f45f7be12 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq/test.yaml b/pkg/leakybucket/tests/simple-leaky-uniq/test.yaml new file mode 100644 index 000000000..9e1b1c5e2 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq/test.yaml @@ -0,0 +1,33 @@ +#this one won't due to leakspeed / delay +lines: + - Line: + Labels: + type: testlog + Raw: xxheader VALUE1 trailing stuff + MarshaledTime: 2020-01-01T10:00:00Z + Meta: + source_ip: 1.2.3.4 + uniq_key: aaa + - Line: + Labels: + type: testlog + Raw: xxheader VALUE2 trailing stuff + MarshaledTime: 2020-01-01T10:00:01Z + Meta: + source_ip: 1.2.3.4 + uniq_key: aaa + - Line: + Labels: + type: testlog + Raw: xxheader VALUE2 trailing stuff + MarshaledTime: 2020-01-01T10:00:02Z + Meta: + source_ip: 1.2.3.4 + uniq_key: aab +results: + - Overflow: + scenario: test/simple-leaky + Source_ip: 1.2.3.4 + Events_count: 2 + + diff --git a/pkg/leakybucket/tests/simple-trigger-reprocess/bucket.yaml b/pkg/leakybucket/tests/simple-trigger-reprocess/bucket.yaml new file mode 100644 index 000000000..47acd710d --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-reprocess/bucket.yaml @@ -0,0 +1,10 @@ +type: trigger +debug: true +name: test/simple-trigger +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +groupby: evt.Meta.source_ip +reprocess: true +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-trigger-reprocess/reprocess.yaml b/pkg/leakybucket/tests/simple-trigger-reprocess/reprocess.yaml new file mode 100644 index 000000000..aee5996f7 --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-reprocess/reprocess.yaml @@ -0,0 +1,8 @@ +type: trigger +debug: true +name: test/simple-postoverflow-scenario +description: "Simple post overflow" +filter: "len(evt.Overflow.Scenario) > 0" +labels: + type: overflow_2 + diff --git a/pkg/leakybucket/tests/simple-trigger-reprocess/scenarios.yaml b/pkg/leakybucket/tests/simple-trigger-reprocess/scenarios.yaml new file mode 100644 index 000000000..a6c56d391 --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-reprocess/scenarios.yaml @@ -0,0 +1,3 @@ + - filename: {{.TestDirectory}}/bucket.yaml + - filename: {{.TestDirectory}}/reprocess.yaml + diff --git a/pkg/leakybucket/tests/simple-trigger-reprocess/test.yaml b/pkg/leakybucket/tests/simple-trigger-reprocess/test.yaml new file mode 100644 index 000000000..6fe037a14 --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-reprocess/test.yaml @@ -0,0 +1,19 @@ +#this one will trigger a simple overflow +lines: + - Line: + Labels: + type: testlog + Raw: xxheader VALUE1 trailing stuff + MarshaledTime: 2020-01-01T10:00:00Z + Meta: + source_ip: 1.2.3.4 +results: + - Overflow: + scenario: test/simple-trigger + Source_ip: 1.2.3.4 + Events_count: 1 + - Overflow: + scenario: test/simple-postoverflow-scenario + Source_ip: 1.2.3.4 + Events_count: 1 + diff --git a/pkg/leakybucket/tests/simple-trigger/bucket.yaml b/pkg/leakybucket/tests/simple-trigger/bucket.yaml new file mode 100644 index 000000000..dcccab74e --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger/bucket.yaml @@ -0,0 +1,9 @@ +type: trigger +debug: true +name: test/simple-trigger +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-trigger/scenarios.yaml b/pkg/leakybucket/tests/simple-trigger/scenarios.yaml new file mode 100644 index 000000000..f45f7be12 --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-trigger/test.yaml b/pkg/leakybucket/tests/simple-trigger/test.yaml new file mode 100644 index 000000000..e8ea9821d --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger/test.yaml @@ -0,0 +1,16 @@ +#this one will trigger a simple overflow +lines: + - Line: + Labels: + type: testlog + Raw: xxheader VALUE1 trailing stuff + MarshaledTime: 2020-01-01T10:00:00Z + Meta: + source_ip: 1.2.3.4 +results: + - Overflow: + scenario: test/simple-trigger + Source_ip: 1.2.3.4 + Events_count: 1 + + diff --git a/pkg/leakybucket/timemachine.go b/pkg/leakybucket/timemachine.go new file mode 100644 index 000000000..01f9d362b --- /dev/null +++ b/pkg/leakybucket/timemachine.go @@ -0,0 +1,51 @@ +package leakybucket + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + log "github.com/sirupsen/logrus" +) + +func TimeMachinePour(l *Leaky, msg types.Event) { + var ( + d time.Time + err error + ) + if msg.MarshaledTime == "" { + log.Warningf("Trying to time-machine event without timstamp : %s", spew.Sdump(msg)) + return + } + + err = d.UnmarshalText([]byte(msg.MarshaledTime)) + if err != nil { + log.Warningf("Failed unmarshaling event time (%s) : %v", msg.MarshaledTime, err) + return + } + + l.Total_count += 1 + if l.First_ts.IsZero() { + l.logger.Debugf("First event, bucket creation time : %s", d) + l.First_ts = d + } + l.Last_ts = d + + if l.Limiter.AllowN(d, 1) { + l.logger.Tracef("Time-Pouring event %s (tokens:%f)", d, l.Limiter.GetTokensCount()) + l.Queue.Add(msg) + } else { + l.Ovflw_ts = d + l.logger.Debugf("Bucket overflow at %s", l.Ovflw_ts) + l.Queue.Add(msg) + l.Out <- l.Queue + } +} + +func NewTimeMachine(g BucketFactory) *Leaky { + l := NewLeaky(g) + g.logger.Tracef("Instanciating timeMachine bucket") + l.Pour = TimeMachinePour + l.Mode = TIMEMACHINE + return l +} diff --git a/pkg/leakybucket/trigger.go b/pkg/leakybucket/trigger.go new file mode 100644 index 000000000..14f0b3bec --- /dev/null +++ b/pkg/leakybucket/trigger.go @@ -0,0 +1,26 @@ +package leakybucket + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +type Trigger struct { + DumbProcessor +} + +func (t *Trigger) OnBucketPour(b *BucketFactory) func(types.Event, *Leaky) *types.Event { + // Pour makes the bucket overflow all the time + // TriggerPour unconditionnaly overflows + return func(msg types.Event, l *Leaky) *types.Event { + l.Total_count = 1 + l.First_ts = time.Now() + l.Ovflw_ts = time.Now() + l.logger.Infof("Bucket overflow") + l.Queue.Add(msg) + l.Out <- l.Queue + + return nil + } +} diff --git a/pkg/leakybucket/uniq.go b/pkg/leakybucket/uniq.go new file mode 100644 index 000000000..f1486acab --- /dev/null +++ b/pkg/leakybucket/uniq.go @@ -0,0 +1,67 @@ +package leakybucket + +import ( + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/vm" + + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// Uniq creates three new functions that share the same initialisation and the same scope. +// They are triggered respectively: +// on pour +// on overflow +// on leak + +type Uniq struct { + DistinctCompiled *vm.Program +} + +func (u *Uniq) OnBucketPour(Bucket *BucketFactory) func(types.Event, *Leaky) *types.Event { + return func(msg types.Event, Leaky *Leaky) *types.Event { + element, err := getElement(msg, u.DistinctCompiled) + if err != nil { + Leaky.logger.Errorf("Uniq filter exec failed : %v", err) + return &msg + } + Leaky.logger.Tracef("Uniq '%s' -> '%s'", Bucket.Distinct, element) + for _, evt := range Leaky.Queue.GetQueue() { + if val, err := getElement(evt, u.DistinctCompiled); err == nil && val == element { + Leaky.logger.Debugf("Uniq(%s) : ko, discard event", element) + return nil + } + if err != nil { + Leaky.logger.Errorf("Uniq filter exec failed : %v", err) + } + } + Leaky.logger.Debugf("Uniq(%s) : ok", element) + return &msg + } +} + +func (u *Uniq) OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.SignalOccurence, *Queue) (types.SignalOccurence, *Queue) { + return func(l *Leaky, sig types.SignalOccurence, queue *Queue) (types.SignalOccurence, *Queue) { + return sig, queue + } +} + +func (u *Uniq) OnBucketInit(Bucket *BucketFactory) error { + var err error + + u.DistinctCompiled, err = expr.Compile(Bucket.Distinct, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + return err +} + +// getElement computes a string from an event and a filter +func getElement(msg types.Event, cFilter *vm.Program) (string, error) { + el, err := expr.Run(cFilter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &msg})) + if err != nil { + return "", err + } + element, ok := el.(string) + if !ok { + return "", err + } + return element, nil +} diff --git a/pkg/outputs/ouputs.go b/pkg/outputs/ouputs.go new file mode 100644 index 000000000..21314f863 --- /dev/null +++ b/pkg/outputs/ouputs.go @@ -0,0 +1,309 @@ +package outputs + +import ( + "fmt" + "io" + "os" + "strconv" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cwplugin" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" + + "github.com/crowdsecurity/crowdsec/pkg/cwapi" + + "github.com/antonmedv/expr" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +type OutputFactory struct { + BackendFolder string `yaml:"backend"` +} + +type Output struct { + API *cwapi.ApiCtx + bManager *cwplugin.BackendManager +} + +/* +Transform an overflow (SignalOccurence) and a Profile into a BanOrder +*/ +func OvflwToOrder(sig types.SignalOccurence, prof types.Profile) (*types.BanOrder, error, error) { + var ordr types.BanOrder + var warn error + + //Identify remediation type + if prof.Remediation.Ban == true { + ordr.MeasureType = "ban" + } else if prof.Remediation.Slow == true { + ordr.MeasureType = "slow" + } else if prof.Remediation.Captcha == true { + ordr.MeasureType = "captcha" + } else { + /*if the profil has no remediation, no order */ + return nil, nil, fmt.Errorf("no remediation") + } + ordr.MeasureSource = "local" + ordr.Reason = sig.Scenario + //Identify scope + v, ok := sig.Labels["scope"] + if !ok { + //if remediation_scope isn't specified, it's IP + v = "ip" + } + ordr.Scope = v + asn, err := strconv.Atoi(sig.Source.AutonomousSystemNumber) + if err != nil { + warn = fmt.Errorf("Invalid as number : %s : %s", sig.Source.AutonomousSystemNumber, err) + } + ordr.TargetAS = asn + ordr.TargetASName = sig.Source.AutonomousSystemOrganization + ordr.TargetIP = sig.Source.Ip + ordr.TargetRange = sig.Source.Range + ordr.TargetCountry = sig.Source.Country + switch v { + case "range": + ordr.TxtTarget = ordr.TargetRange.String() + case "ip": + ordr.TxtTarget = ordr.TargetIP.String() + case "as": + ordr.TxtTarget = fmt.Sprintf("ban as %d (unsupported)", ordr.TargetAS) + case "country": + ordr.TxtTarget = fmt.Sprintf("ban country %s (unsupported)", ordr.TargetCountry) + default: + log.Errorf("Unknown remediation scope '%s'", sig.Labels["remediation_Scope"]) + return nil, fmt.Errorf("unknown remediation scope"), nil + } + //Set deadline + ordr.Until = sig.Stop_at.Add(prof.Remediation.TimeDuration) + return &ordr, nil, warn +} + +func (o *Output) FlushAll() { + if o.API != nil { + if err := o.API.Flush(); err != nil { + log.Errorf("Failing API flush : %s", err) + } + } + if o.bManager != nil { + if err := o.bManager.Flush(); err != nil { + log.Errorf("Failing Sqlite flush : %s", err) + } + } +} + +func (o *Output) ProcessOutput(sig types.SignalOccurence, profiles []types.Profile) error { + + var logger *log.Entry + if sig.Source != nil { + logger = log.WithFields(log.Fields{ + "source_ip": sig.Source.Ip.String(), + "scenario": sig.Scenario, + "bucket_id": sig.Bucket_id, + "event_time": sig.Stop_at, + }) + } else { + logger = log.WithFields(log.Fields{ + "scenario": sig.Scenario, + "bucket_id": sig.Bucket_id, + "event_time": sig.Stop_at, + }) + } + + for _, profile := range profiles { + if profile.RunTimeFilter != nil { + //Evaluate node's filter + output, err := expr.Run(profile.RunTimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"sig": sig})) + if err != nil { + logger.Warningf("failed to run filter : %v", err) + continue + } + switch output.(type) { + case bool: + /* filter returned false, don't process Node */ + if output.(bool) == false { + logger.Debugf("eval(FALSE) '%s'", profile.Filter) + continue + } + default: + logger.Warningf("Expr '%s' returned non-bool", profile.Filter) + continue + } + logger.Debugf("eval(TRUE) '%s'", profile.Filter) + } + /*the filter was ok*/ + ordr, err, warn := OvflwToOrder(sig, profile) + if err != nil { + logger.Errorf("Unable to turn Overflow to Order : %v", err) + return err + } + if warn != nil { + logger.Infof("node warning : %s", warn) + } + if ordr != nil { + bans, err := types.OrderToApplications(ordr) + if err != nil { + logger.Errorf("Error turning order to ban applications : %v", err) + return err + } + logger.Warningf("%s triggered a %s %s %s remediation for [%s]", ordr.TxtTarget, ordr.Until.Sub(sig.Stop_at), ordr.Scope, ordr.MeasureType, sig.Scenario) + sig.BanApplications = bans + } else { + //Order didn't lead to concrete bans + logger.Infof("Processing Overflow with no decisions %s", sig.Alert_message) + } + + // if ApiPush is nil (not specified in profile configuration) we use global api config (from default.yaml) + if profile.ApiPush == nil { + if o.API != nil { // if API is not nil, we can push + o.API.AppendSignal((sig)) + } + } + for _, outputConfig := range profile.OutputConfigs { + if pluginName, ok := outputConfig["plugin"]; ok { + if o.bManager.IsBackendPlugin(pluginName) { + if toStore, ok := outputConfig["store"]; ok { + boolConv, err := strconv.ParseBool(toStore) + if err != nil { + log.Errorf("unable to parse boolean value of store configuration '%s' : %s", toStore, err) + } + if !boolConv { + continue + } + } + o.bManager.InsertOnePlugin(sig, pluginName) + } + } + } + } + return nil +} + +func LoadOutputProfiles(profileConfig string) ([]types.Profile, error) { + + var ( + profiles []types.Profile + ) + + yamlFile, err := os.Open(profileConfig) + if err != nil { + log.Errorf("Can't access parsing configuration file with '%v'.", err) + return nil, err + } + //process the yaml + dec := yaml.NewDecoder(yamlFile) + dec.SetStrict(true) + for { + profile := types.Profile{} + err = dec.Decode(&profile) + if err != nil { + if err == io.EOF { + log.Tracef("End of yaml file") + break + } + log.Errorf("Error decoding profile configuration file with '%s': %v", profileConfig, err) + return nil, err + } + //compile filter if present + if profile.Filter != "" { + profile.RunTimeFilter, err = expr.Compile(profile.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"sig": &types.SignalOccurence{}}))) + if err != nil { + log.Errorf("Compilation failed %v\n", err) + return nil, err + } + } + + if profile.Remediation.Ban || profile.Remediation.Slow || profile.Remediation.Captcha { + profile.Remediation.TimeDuration, err = time.ParseDuration(profile.Remediation.Duration) + if err != nil { + log.Fatalf("Unable to parse profile duration '%s'", profile.Remediation.Duration) + } + } + //ensure we have outputs :) + if profile.OutputConfigs == nil { + log.Errorf("Profile has empty OutputConfigs") + return nil, err + } + + profiles = append(profiles, profile) + } + + /*Initialize individual connectors*/ + return profiles, nil + +} + +func (o *Output) InitAPI(config map[string]string) error { + var err error + o.API = &cwapi.ApiCtx{} + log.Infof("API connector init") + err = o.API.Init(config["path"], config["profile"]) + if err != nil { + log.Errorf("API init failed, won't push/pull : %v", err) + return err + } + return nil +} + +func (o *Output) LoadAPIConfig(configFile string) error { + var err error + o.API = &cwapi.ApiCtx{} + + err = o.API.LoadConfig(configFile) + if err != nil { + return err + } + return nil +} + +func (o *Output) load(config *OutputFactory, isDaemon bool) error { + var err error + if config == nil { + return fmt.Errorf("missing output plugin configuration") + } + log.Debugf("loading backend plugins ...") + o.bManager, err = cwplugin.NewBackendPlugin(config.BackendFolder, isDaemon) + if err != nil { + return err + } + return nil +} + +func (o *Output) Delete(target string) (int, error) { + nbDel, err := o.bManager.Delete(target) + return nbDel, err +} + +func (o *Output) DeleteAll() error { + err := o.bManager.DeleteAll() + return err +} + +func (o *Output) Insert(sig types.SignalOccurence) error { + err := o.bManager.Insert(sig) + return err +} + +func (o *Output) Flush() error { + err := o.bManager.Flush() + return err +} + +func (o *Output) ReadAT(timeAT time.Time) ([]map[string]string, error) { + ret, err := o.bManager.ReadAT(timeAT) + if err != nil { + return nil, err + } + return ret, nil +} + +func NewOutput(config *OutputFactory, isDaemon bool) (*Output, error) { + var output Output + err := output.load(config, isDaemon) + if err != nil { + return nil, err + } + return &output, nil +} diff --git a/pkg/parser/README.md b/pkg/parser/README.md new file mode 100644 index 000000000..fc0f47111 --- /dev/null +++ b/pkg/parser/README.md @@ -0,0 +1,181 @@ +![gopherbadger-tag-do-not-edit] + +# Parser + +Parser is in charge of turning raw log lines into objects that can be manipulated by heuristics. +Parsing has several stages represented by directories on config/stages. +The alphabetical order dictates the order in which the stages/parsers are processed. + +The runtime representation of a line being parsed (or an overflow) is an `Event`, and has fields that can be manipulated by user : + - Parsed : a string dict containing parser outputs + - Meta : a string dict containing meta informations about the event + - Line : a raw line representation + - Overflow : a representation of the overflow if applicable + +The Event structure goes trough the stages, being altered with each parsing step. +It's the same object that will be later poured into buckets. + +# Parser configuration + +A parser configuration is a `Node` object, that can contain grok patterns, enrichement instructions. + +For example : + +```yaml +filter: "evt.Line.Labels.type == 'testlog'" +debug: true +onsuccess: next_stage +name: tests/base-grok +pattern_syntax: + MYCAP: ".*" +nodes: + - grok: + pattern: ^xxheader %{MYCAP:extracted_value} trailing stuff$ + apply_on: Line.Raw +statics: + - meta: log_type + value: parsed_testlog +``` + +### Name + +*optional* if present and prometheus or profiling are activated, stats will be generated for this node. + +### Filter + +> `filter: "Line.Src endsWith '/foobar'"` + + - *optional* `filter` : an [expression](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md) that will be evaluated against the runtime of a line (`Event`) + - if the `filter` is present and returns false, node is not evaluated + - if `filter` is absent or present and returns true, node is evaluated + +### Debug flag + +> `debug: true` + + - *optional* `debug` : a bool that sets debug of the node to true (applies at runtime and configuration parsing) + +### OnSuccess flag +> `onsuccess: next_stage|continue` + + - *mandatory* indicates the behaviour to follow if node succeeds. `next_stage` make line go to next stage, while `continue` will continue processing of current stage. + +### Statics + +```yaml +statics: + - meta: service + value: tcp + - meta: source_ip + expression: "Event['source_ip']" + - parsed: "new_connection" + expression: "Event['tcpflags'] contains 'S' ? 'true' : 'false'" + - target: Parsed.this_is_a_test + value: foobar +``` + +Statics apply when a node is considered successful, and are used to alter the `Event` structure. +An empty node, a node with a grok pattern that succeeded or an enrichment directive that worked are successful nodes. +Statics can : + - meta: add/alter an entry in the `Meta` dict + - parsed: add/alter an entry in the `Parsed` dict + - target: indicate a destination field by name, such as Meta.my_key +The source of data can be : + - value: a static value + - expr_result : the result of an expression + + +### Grok patterns + +Grok patterns are used to parse one field of `Event` into one or several others : + +```yaml +grok: + name: "TCPDUMP_OUTPUT" + apply_on: message +``` + +`name` is the name of a pattern loaded from `patterns/`. +Base patterns can be seen on the repo : https://github.com/logrusorgru/grokky/blob/master/base.go + + +--- + + +```yaml +grok: + pattern: "^%{GREEDYDATA:request}\\?%{GREEDYDATA:http_args}$" + apply_on: request +``` +`pattern` which is a valid pattern, optionally with a `apply_on` that indicates to which field it should be applied + + +### Patterns syntax + +Present at the `Event` level, the `pattern_syntax` is a list of subgroks to be declared. + +```yaml +pattern_syntax: + DIR: "^.*/" + FILE: "[^/].*$" +``` + + +### Enrichment + +Enrichment mecanism is exposed via statics : + +```yaml +statics: + - method: GeoIpCity + expression: Meta.source_ip + - meta: IsoCode + expression: Enriched.IsoCode + - meta: IsInEU + expression: Enriched.IsInEU +``` + +The `GeoIpCity` method is called with the value of `Meta.source_ip`. +Enrichment plugins can output one or more key:values in the `Enriched` map, +and it's up to the user to copy the relevant values to `Meta` or such. + +# Trees + +The `Node` object allows as well a `nodes` entry, which is a list of `Node` entries, allowing you to build trees. + +```yaml +filter: "Event['program'] == 'nginx'" #A +nodes: #A' + - grok: #B + name: "NGINXACCESS" + # this statics will apply only if the above grok pattern matched + statics: #B' + - meta: log_type + value: "http_access-log" + - grok: #C + name: "NGINXERROR" + statics: + - meta: log_type + value: "http_error-log" +statics: #D + - meta: service + value: http +``` + +The evaluation process of a node is as follow : + - apply the `filter` (A), if it doesn't match, exit + - iterate over the list of nodes (A') and apply the node process to each. + - if a `grok` entry is present, process it + - if the `grok` entry returned data, apply the local statics of the node (if the grok 'B' was successful, apply B' statics) + - if any of the `nodes` or the `grok` was successful, apply the statics (D) + +# Code Organisation + +Main structs : + - Node (config.go) : the runtime representation of parser configuration + - Event (runtime.go) : the runtime representation of the line being parsed + +Main funcs : + - CompileNode : turns YAML into runtime-ready tree (Node) + - ProcessNode : process the raw line against the parser tree, and produces ready-for-buckets data + diff --git a/pkg/parser/enrich.go b/pkg/parser/enrich.go new file mode 100644 index 000000000..e4a47a886 --- /dev/null +++ b/pkg/parser/enrich.go @@ -0,0 +1,95 @@ +package parser + +import ( + "plugin" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +/* should be part of a packaged shared with enrich/geoip.go */ +type EnrichFunc func(string, *types.Event, interface{}) (map[string]string, error) +type InitFunc func(map[string]string) (interface{}, error) + +type EnricherCtx struct { + Funcs map[string]EnrichFunc + Init InitFunc + Plugin *plugin.Plugin //pointer to the actual plugin + Name string + Path string //path to .so ? + RuntimeCtx interface{} //the internal context of plugin, given back over every call +} + +/* mimic plugin loading */ +func Loadplugin(path string) (EnricherCtx, error) { + var err error + + c := EnricherCtx{} + c.Name = path + c.Path = path + /* we don't want to deal with plugin loading for now :p */ + c.Funcs = map[string]EnrichFunc{ + "GeoIpASN": GeoIpASN, + "GeoIpCity": GeoIpCity, + "reverse_dns": reverse_dns, + "ParseDate": ParseDate, + "IpToRange": IpToRange, + } + c.Init = GeoIpInit + + c.RuntimeCtx, err = c.Init(map[string]string{"datadir": path}) + if err != nil { + log.Fatalf("load (fake) plugin load : %v", err) + } + return c, nil +} + +func GenDateParse(date string) (string, time.Time) { + var retstr string + var layouts = [...]string{ + time.RFC3339, + "02/Jan/2006:15:04:05 -0700", + "Mon Jan 2 15:04:05 2006", + "02-Jan-2006 15:04:05 europe/paris", + "01/02/2006 15:04:05", + "2006-01-02 15:04:05.999999999 -0700 MST", + //Jan 5 06:25:11 + "Jan 2 15:04:05", + "Mon Jan 02 15:04:05.000000 2006", + "2006-01-02T15:04:05Z07:00", + "2006/01/02", + "2006/01/02 15:04", + "2006-01-02", + "2006-01-02 15:04", + } + + for _, dateFormat := range layouts { + t, err := time.Parse(dateFormat, date) + if err == nil && !t.IsZero() { + //if the year isn't set, set it to current date :) + if t.Year() == 0 { + t = t.AddDate(time.Now().Year(), 0, 0) + } + retstr, err := t.MarshalText() + if err != nil { + log.Warningf("Failed marshaling '%v'", t) + continue + } + return string(retstr), t + } + } + return retstr, time.Time{} +} + +func ParseDate(in string, p *types.Event, x interface{}) (map[string]string, error) { + + var ret map[string]string = make(map[string]string) + + tstr, tbin := GenDateParse(in) + if !tbin.IsZero() { + ret["MarshaledTime"] = string(tstr) + return ret, nil + } + return nil, nil +} diff --git a/pkg/parser/enrich_dns.go b/pkg/parser/enrich_dns.go new file mode 100644 index 000000000..39a6e3079 --- /dev/null +++ b/pkg/parser/enrich_dns.go @@ -0,0 +1,27 @@ +package parser + +import ( + "net" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + //"github.com/crowdsecurity/crowdsec/pkg/parser" +) + +/* All plugins must export a list of function pointers for exported symbols */ +//var ExportedFuncs = []string{"reverse_dns"} + +func reverse_dns(field string, p *types.Event, ctx interface{}) (map[string]string, error) { + ret := make(map[string]string) + if field == "" { + return nil, nil + } + rets, err := net.LookupAddr(field) + if err != nil { + log.Infof("failed to resolve '%s'", field) + return nil, nil + } + //When using the host C library resolver, at most one result will be returned. To bypass the host resolver, use a custom Resolver. + ret["reverse_dns"] = rets[0] + return ret, nil +} diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go new file mode 100644 index 000000000..ce04bef46 --- /dev/null +++ b/pkg/parser/enrich_geoip.go @@ -0,0 +1,111 @@ +package parser + +import ( + "fmt" + "net" + "strconv" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + + "github.com/oschwald/geoip2-golang" + "github.com/oschwald/maxminddb-golang" + //"github.com/crowdsecurity/crowdsec/pkg/parser" +) + +type GeoIpEnricherCtx struct { + dbc *geoip2.Reader + dba *geoip2.Reader + dbraw *maxminddb.Reader +} + +/* All plugins must export a list of function pointers for exported symbols */ +var ExportedFuncs = []string{"GeoIpASN", "GeoIpCity"} + +func IpToRange(field string, p *types.Event, ctx interface{}) (map[string]string, error) { + var dummy interface{} + ret := make(map[string]string) + + if field == "" { + return nil, nil + } + ip := net.ParseIP(field) + if ip == nil { + log.Infof("Can't parse ip %s, no range enrich", field) + return nil, nil + } + net, ok, err := ctx.(GeoIpEnricherCtx).dbraw.LookupNetwork(ip, &dummy) + if err != nil { + log.Errorf("Failed to fetch network for %s : %v", ip.String(), err) + return nil, nil + } + if !ok { + log.Debugf("Unable to find range of %s", ip.String()) + return nil, nil + } + ret["SourceRange"] = net.String() + return ret, nil +} + +func GeoIpASN(field string, p *types.Event, ctx interface{}) (map[string]string, error) { + ret := make(map[string]string) + if field == "" { + return nil, nil + } + + ip := net.ParseIP(field) + record, err := ctx.(GeoIpEnricherCtx).dba.ASN(ip) + if err != nil { + log.Debugf("Unable to enrich ip '%s'", field) + return nil, nil + } + ret["ASNNumber"] = fmt.Sprintf("%d", record.AutonomousSystemNumber) + ret["ASNOrg"] = record.AutonomousSystemOrganization + log.Tracef("geoip ASN %s -> %s, %s", field, ret["ASNNumber"], ret["ASNOrg"]) + return ret, nil +} + +func GeoIpCity(field string, p *types.Event, ctx interface{}) (map[string]string, error) { + ret := make(map[string]string) + if field == "" { + return nil, nil + } + ip := net.ParseIP(field) + record, err := ctx.(GeoIpEnricherCtx).dbc.City(ip) + if err != nil { + log.Fatal(err) + return nil, err + } + ret["IsoCode"] = record.Country.IsoCode + ret["IsInEU"] = strconv.FormatBool(record.Country.IsInEuropeanUnion) + ret["Latitude"] = fmt.Sprintf("%f", record.Location.Latitude) + ret["Longitude"] = fmt.Sprintf("%f", record.Location.Longitude) + + log.Tracef("geoip City %s -> %s, %s", field, ret["IsoCode"], ret["IsInEU"]) + + return ret, nil +} + +/* All plugins must export an Init function */ +func GeoIpInit(cfg map[string]string) (interface{}, error) { + var ctx GeoIpEnricherCtx + var err error + ctx.dbc, err = geoip2.Open(cfg["datadir"] + "/GeoLite2-City.mmdb") + if err != nil { + log.Errorf("couldn't open geoip : %v", err) + return nil, err + } + ctx.dba, err = geoip2.Open(cfg["datadir"] + "/GeoLite2-ASN.mmdb") + if err != nil { + log.Errorf("couldn't open geoip : %v", err) + return nil, err + } + + ctx.dbraw, err = maxminddb.Open(cfg["datadir"] + "/GeoLite2-ASN.mmdb") + if err != nil { + log.Errorf("couldn't open geoip : %v", err) + return nil, err + } + + return ctx, nil +} diff --git a/pkg/parser/node.go b/pkg/parser/node.go new file mode 100644 index 000000000..bd4f16ec2 --- /dev/null +++ b/pkg/parser/node.go @@ -0,0 +1,481 @@ +package parser + +import ( + "fmt" + "net" + + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + log "github.com/sirupsen/logrus" +) + +type Node struct { + FormatVersion string `yaml:"format"` + //Enable config + runtime debug of node via config o/ + Debug bool `yaml:"debug,omitempty"` + //If enabled, the node (and its child) will report their own statistics + Profiling bool `yaml:"profiling,omitempty"` + //Name, author, description and reference(s) for parser pattern + Name string `yaml:"name,omitempty"` + Author string `yaml:"author,omitempty"` + Description string `yaml:"description,omitempty"` + Rerferences []string `yaml:"references,omitempty"` + //if debug is present in the node, keep its specific logger in runtime structure + logger *log.Entry `yaml:"-"` + //This is mostly a hack to make writting less repetive. + //relying on stage, we know which field to parse, and we + //can as well promote log to next stage on success + Stage string `yaml:"stage,omitempty"` + //OnSuccess allows to tag a node to be able to move log to next stage on success + OnSuccess string `yaml:"onsuccess,omitempty"` + rn string //this is only for us in debug, a random generated name for each node + //Filter is executed at runtime (with current log line as context) + //and must succeed or node is exited + Filter string `yaml:"filter,omitempty"` + RunTimeFilter *vm.Program `yaml:"-" json:"-"` //the actual compiled filter + + //If node has leafs, execute all of them until one asks for a 'break' + SuccessNodes []Node `yaml:"nodes,omitempty"` + //Flag used to describe when to 'break' or return an 'error' + // BreakBehaviour string `yaml:"break,omitempty"` + // Error string `yaml:"error,omitempty"` + + /* If the node is actually a leaf, it can have : grok, enrich, statics */ + //pattern_syntax are named grok patterns that are re-utilised over several grok patterns + SubGroks map[string]string `yaml:"pattern_syntax,omitempty"` + //Holds a grok pattern + Grok types.GrokPattern `yaml:"grok,omitempty"` + //Statics can be present in any type of node and is executed last + Statics []types.ExtraField `yaml:"statics,omitempty"` + //Whitelists + Whitelist types.Whitelist `yaml:"whitelist,omitempty"` +} + +func (n *Node) validate(pctx *UnixParserCtx) error { + + //stage is being set automagically + if n.Stage == "" { + return fmt.Errorf("Stage needs to be an existing stage") + } + + /* "" behaves like continue */ + if n.OnSuccess != "continue" && n.OnSuccess != "next_stage" && n.OnSuccess != "" { + return fmt.Errorf("onsuccess '%s' not continue,next_stage", n.OnSuccess) + } + if n.Filter != "" && n.RunTimeFilter == nil { + return fmt.Errorf("non-empty filter '%s' was not compiled", n.Filter) + } + + if n.Grok.RunTimeRegexp != nil || n.Grok.TargetField != "" { + if n.Grok.TargetField == "" { + return fmt.Errorf("grok's apply_on can't be empty") + } + if n.Grok.RegexpName == "" && n.Grok.RegexpValue == "" { + return fmt.Errorf("grok needs 'pattern' or 'name'") + } + } + + for idx, static := range n.Statics { + if static.Method != "" { + if static.ExpValue == "" { + return fmt.Errorf("static %d : when method is set, expression must be present", idx) + } + method_found := false + for _, enricherCtx := range ECTX { + if _, ok := enricherCtx.Funcs[static.Method]; ok { + method_found = true + break + } + } + if method_found == false { + return fmt.Errorf("the method '%s' doesn't exist", static.Method) + } + } else { + if static.Meta == "" && static.Parsed == "" && static.TargetByName == "" { + return fmt.Errorf("static %d : at least one of meta/event/target must be set", idx) + } + if static.Value == "" && static.RunTimeValue == nil { + return fmt.Errorf("static %d value or expression must be set", idx) + } + } + } + return nil +} + +func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) { + var NodeState bool = true + clog := n.logger + + clog.Debugf("Event entering node") + if n.RunTimeFilter != nil { + //Evaluate node's filter + output, err := expr.Run(n.RunTimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p})) + if err != nil { + clog.Warningf("failed to run filter : %v", err) + clog.Debugf("Event leaving node : ko") + return false, nil + } + switch output.(type) { + case bool: + /* filter returned false, don't process Node */ + if output.(bool) == false { + NodeState = false + clog.Debugf("eval(FALSE) '%s'", n.Filter) + clog.Debugf("Event leaving node : ko") + return false, nil + } + default: + clog.Warningf("Expr '%s' returned non-bool, abort : %T", n.Filter, output) + clog.Debugf("Event leaving node : ko") + NodeState = false + return false, nil + } + NodeState = true + clog.Debugf("eval(TRUE) '%s'", n.Filter) + } else { + clog.Tracef("Node has not filter, enter") + NodeState = true + } + + if n.Profiling == true && n.Name != "" { + NodesHits.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc() + } + set := false + var src net.IP + /*overflow and log don't hold the source ip in the same field, should be changed */ + /* perform whitelist checks for ips, cidr accordingly */ + if p.Type == types.LOG { + if _, ok := p.Meta["source_ip"]; ok { + src = net.ParseIP(p.Meta["source_ip"]) + } + } else if p.Type == types.OVFLW { + src = net.ParseIP(p.Overflow.Source_ip) + } + if src != nil { + for _, v := range n.Whitelist.B_Ips { + if v.Equal(src) { + clog.Infof("Event from [%s] is whitelisted by Ips !", src) + p.Whitelisted = true + set = true + } + } + + for _, v := range n.Whitelist.B_Cidrs { + if v.Contains(src) { + clog.Debugf("Event from [%s] is whitelisted by Cidrs !", src) + p.Whitelisted = true + set = true + } else { + clog.Debugf("whitelist: %s not in [%s]", src, v) + } + } + } else { + clog.Debugf("no ip in event, cidr/ip whitelists not checked") + } + /* run whitelist expression tests anyway */ + for _, e := range n.Whitelist.B_Exprs { + output, err := expr.Run(e, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p})) + if err != nil { + clog.Warningf("failed to run whitelist expr : %v", err) + clog.Debugf("Event leaving node : ko") + return false, nil + } + switch output.(type) { + case bool: + /* filter returned false, don't process Node */ + if output.(bool) == true { + clog.Infof("Event is whitelisted by Expr !") + p.Whitelisted = true + set = true + } + } + } + if set == true { + p.WhiteListReason = n.Whitelist.Reason + /*huglily wipe the ban order if the event is whitelisted and it's an overflow */ + if p.Type == types.OVFLW { /*don't do this at home kids */ + // p.Overflow.OverflowAction = "" + //Break this for now. Souldn't have been done this way, but that's not taht serious + /*only display logs when we discard ban to avoid spam*/ + clog.Infof("Ban for %s whitelisted, reason [%s]", p.Overflow.Source.Ip.String(), n.Whitelist.Reason) + } + } + + //Iterate on leafs + if len(n.SuccessNodes) > 0 { + for _, leaf := range n.SuccessNodes { + //clog.Debugf("Processing sub-node %d/%d : %s", idx, len(n.SuccessNodes), leaf.rn) + ret, err := leaf.process(p, ctx) + if err != nil { + clog.Tracef("\tNode (%s) failed : %v", leaf.rn, err) + clog.Debugf("Event leaving node : ko") + return false, err + } + clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess) + if ret == true { + NodeState = true + /* if chil is successful, stop processing */ + if n.OnSuccess == "next_stage" { + clog.Debugf("child is success, OnSuccess=next_stage, skip") + break + } + } else { + NodeState = false + } + } + } + /*todo : check if a node made the state change ?*/ + /* should the childs inherit the on_success behaviour */ + + clog.Tracef("State after nodes : %v", NodeState) + + //Process grok if present, should be exclusive with nodes :) + gstr := "" + if n.Grok.RunTimeRegexp != nil { + clog.Tracef("Processing grok pattern : %s : %p", n.Grok.RegexpName, n.Grok.RunTimeRegexp) + //for unparsed, parsed etc. set sensible defaults to reduce user hassle + if n.Grok.TargetField == "" { + clog.Fatalf("not default field and no specified on stage '%s'", n.Stage) + + } else { + //it's a hack to avoid using real reflect + if n.Grok.TargetField == "Line.Raw" { + gstr = p.Line.Raw + } else if val, ok := p.Parsed[n.Grok.TargetField]; ok { + gstr = val + } else { + clog.Debugf("(%s) target field '%s' doesn't exist in %v", n.rn, n.Grok.TargetField, p.Parsed) + NodeState = false + //return false, nil + } + } + + grok := n.Grok.RunTimeRegexp.Parse(gstr) + if len(grok) > 0 { + clog.Debugf("+ Grok '%s' returned %d entries to merge in Parsed", n.Grok.RegexpName, len(grok)) + //We managed to grok stuff, merged into parse + for k, v := range grok { + clog.Debugf("\t.Parsed['%s'] = '%s'", k, v) + p.Parsed[k] = v + } + // if the grok succeed, process associated statics + err := ProcessStatics(n.Grok.Statics, p, clog) + if err != nil { + clog.Fatalf("(%s) Failed to process statics : %v", n.rn, err) + } + } else { + //grok failed, node failed + clog.Debugf("+ Grok '%s' didn't return data on '%s'", n.Grok.RegexpName, gstr) + //clog.Tracef("on '%s'", gstr) + NodeState = false + } + + } else { + clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp) + } + + //grok or leafs failed, don't process statics + if NodeState == false { + if n.Profiling == true && n.Name != "" { + NodesHitsKo.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc() + } + clog.Debugf("Event leaving node : ko") + return NodeState, nil + } + + if n.Profiling == true && n.Name != "" { + NodesHitsOk.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc() + } + if len(n.Statics) > 0 { + clog.Debugf("+ Processing %d statics", len(n.Statics)) + // if all else is good, process node's statics + err := ProcessStatics(n.Statics, p, clog) + if err != nil { + clog.Fatalf("Failed to process statics : %v", err) + } + } else { + clog.Tracef("! No node statics") + } + + if NodeState == true { + clog.Debugf("Event leaving node : ok") + log.Tracef("node is successful, check strategy") + if n.OnSuccess == "next_stage" { + idx := stageidx(p.Stage, ctx.Stages) + //we're at the last stage + if idx+1 == len(ctx.Stages) { + clog.Debugf("node reached the last stage : %s", p.Stage) + } else { + clog.Debugf("move Event from stage %s to %s", p.Stage, ctx.Stages[idx+1]) + p.Stage = ctx.Stages[idx+1] + } + } else { + clog.Tracef("no strategy on success (%s), continue !", n.OnSuccess) + } + } else { + clog.Debugf("Event leaving node : ko") + } + clog.Tracef("Node successful, continue") + return NodeState, nil +} + +func (n *Node) compile(pctx *UnixParserCtx) error { + var err error + var valid bool + + valid = false + + dumpr := spew.ConfigState{MaxDepth: 1, DisablePointerAddresses: true} + n.rn = seed.Generate() + + log.Debugf("compile, node is %s", n.Stage) + /* if the node has debugging enabled, create a specific logger with debug + that will be used only for processing this node ;) */ + if n.Debug == true { + var clog = logrus.New() + clog.SetLevel(log.DebugLevel) + n.logger = clog.WithFields(log.Fields{ + "id": n.rn, + }) + n.logger.Infof("%s has debug enabled", n.Name) + } else { + /* else bind it to the default one (might find something more elegant here)*/ + n.logger = log.WithFields(log.Fields{ + "id": n.rn, + }) + } + + /* display info about top-level nodes, they should be the only one with explicit stage name ?*/ + n.logger = n.logger.WithFields(log.Fields{"stage": n.Stage, "name": n.Name}) + + n.logger.Tracef("Compiling : %s", dumpr.Sdump(n)) + + //compile filter if present + if n.Filter != "" { + n.RunTimeFilter, err = expr.Compile(n.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + return fmt.Errorf("compilation of '%s' failed: %v", n.Filter, err) + } + } + + /* handle pattern_syntax and groks */ + for node, pattern := range n.SubGroks { + n.logger.Debugf("Adding subpattern '%s' : '%s'", node, pattern) + if err := pctx.Grok.Add(node, pattern); err != nil { + n.logger.Errorf("Unable to compile subpattern %s : %v", node, err) + return err + } + } + /* load grok by name or compile in-place */ + if n.Grok.RegexpName != "" { + n.logger.Debugf("+ Regexp Compilation '%s'", n.Grok.RegexpName) + n.Grok.RunTimeRegexp, err = pctx.Grok.Get(n.Grok.RegexpName) + if err != nil { + n.logger.Fatalf("Unable to find grok '%s' : %v\n", n.Grok.RegexpName, err) + } + if n.Grok.RunTimeRegexp == nil { + n.logger.Fatalf("Didn't find regexp : %s", n.Grok.RegexpName) + } + n.logger.Debugf("%s regexp: %s", n.Grok.RegexpName, n.Grok.RunTimeRegexp.Regexp.String()) + valid = true + } else if n.Grok.RegexpValue != "" { + //n.logger.Debugf("+ Regexp Compilation '%s'", n.Grok.RegexpValue) + n.Grok.RunTimeRegexp, err = pctx.Grok.Compile(n.Grok.RegexpValue) + if err != nil { + n.logger.Fatalf("Failed to compile grok '%s': %v\n", n.Grok.RegexpValue, err) + } + if n.Grok.RunTimeRegexp == nil { + // We shouldn't be here because compilation succeeded, so regexp shouldn't be nil + n.logger.Fatalf("Grok compilation failure: %s", n.Grok.RegexpValue) + } + n.logger.Debugf("%s regexp : %s", n.Grok.RegexpValue, n.Grok.RunTimeRegexp.Regexp.String()) + valid = true + } + /* load grok statics */ + if len(n.Grok.Statics) > 0 { + //compile expr statics if present + for idx, _ := range n.Grok.Statics { + if n.Grok.Statics[idx].ExpValue != "" { + n.Grok.Statics[idx].RunTimeValue, err = expr.Compile(n.Grok.Statics[idx].ExpValue, + expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + return err + } + } + } + valid = true + } + /* compile leafs if present */ + if len(n.SuccessNodes) > 0 { + for idx, _ := range n.SuccessNodes { + /*propagate debug/stats to child nodes*/ + if n.SuccessNodes[idx].Debug == false && n.Debug == true { + n.SuccessNodes[idx].Debug = true + } + if n.SuccessNodes[idx].Profiling == false && n.Profiling == true { + n.SuccessNodes[idx].Profiling = true + } + n.SuccessNodes[idx].Stage = n.Stage + err = n.SuccessNodes[idx].compile(pctx) + if err != nil { + return err + } else { + //n.logger.Debugf("Leaf compilation suceeded: %v\n", n.SuccessNodes[idx]) + } + //set child node to parent stage + } + valid = true + } + /* load statics if present */ + for idx, _ := range n.Statics { + if n.Statics[idx].ExpValue != "" { + n.Statics[idx].RunTimeValue, err = expr.Compile(n.Statics[idx].ExpValue, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + n.logger.Errorf("Statics Compilation failed %v.", err) + return err + } + } + valid = true + } + + /* compile whitelists if present */ + for _, v := range n.Whitelist.Ips { + n.Whitelist.B_Ips = append(n.Whitelist.B_Ips, net.ParseIP(v)) + n.logger.Debugf("adding ip %s to whitelists", net.ParseIP(v)) + valid = true + } + for _, v := range n.Whitelist.Cidrs { + _, tnet, err := net.ParseCIDR(v) + if err != nil { + n.logger.Fatalf("Unable to parse cidr whitelist '%s' : %v.", v, err) + } + n.Whitelist.B_Cidrs = append(n.Whitelist.B_Cidrs, tnet) + n.logger.Debugf("adding cidr %s to whitelists", tnet) + valid = true + } + for _, v := range n.Whitelist.Exprs { + expr, err := expr.Compile(v, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + n.logger.Fatalf("Unable to compile whitelist expression '%s' : %v.", v, err) + } + n.Whitelist.B_Exprs = append(n.Whitelist.B_Exprs, expr) + n.logger.Debugf("adding expression %s to whitelists", v) + valid = true + } + + if valid == false { + /* node is empty, error force return */ + n.logger.Infof("Node is empty: %s", spew.Sdump(n)) + n.Stage = "" + } + if err := n.validate(pctx); err != nil { + return err + //n.logger.Fatalf("Node is invalid : %s", err) + } + return nil +} diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go new file mode 100644 index 000000000..fe49fe1b1 --- /dev/null +++ b/pkg/parser/parsing_test.go @@ -0,0 +1,259 @@ +package parser + +import ( + "bytes" + "fmt" + "html/template" + "io" + "io/ioutil" + "os" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +type TestFile struct { + Lines []types.Event `yaml:"lines,omitempty"` + Results []types.Event `yaml:"results,omitempty"` +} + +func TestParser(t *testing.T) { + + var envSetting = os.Getenv("TEST_ONLY") + + if envSetting != "" { + if err := testOneParser(t, envSetting); err != nil { + t.Fatalf("Test '%s' failed : %s", envSetting, err) + } + } else { + fds, err := ioutil.ReadDir("./tests/") + if err != nil { + t.Fatalf("Unable to read test directory : %s", err) + } + for _, fd := range fds { + fname := "./tests/" + fd.Name() + log.Infof("Running test on %s", fname) + if err := testOneParser(t, fname); err != nil { + t.Fatalf("Test '%s' failed : %s", fname, err) + } + } + } + +} + +func testOneParser(t *testing.T, dir string) error { + var p UnixParser + var pctx *UnixParserCtx + var err error + var pnodes []Node = make([]Node, 0) + + log.SetLevel(log.DebugLevel) + + datadir := "../../data/" + cfgdir := "../../config/" + + /* this should be refactored to 2 lines :p */ + // Init the parser + pctx, err = p.Init(map[string]interface{}{"patterns": cfgdir + string("/patterns/")}) + if err != nil { + return fmt.Errorf("failed to initialize parser : %v", err) + } + //Init the enricher + pplugins, err := Loadplugin(datadir) + if err != nil { + return fmt.Errorf("Failed to load plugin geoip : %v", err) + } + ECTX = append(ECTX, pplugins) + log.Debugf("Geoip ctx : %v", ECTX) + //Load the parser configuration + var parser_configs []Stagefile + //TBD var po_parser_configs []Stagefile + + parser_cfg_file := fmt.Sprintf("%s/parsers.yaml", dir) + b, err := ioutil.ReadFile(parser_cfg_file) + if err != nil { + return fmt.Errorf("failed opening %s : %s", parser_cfg_file, err) + } + tmpl, err := template.New("test").Parse(string(b)) + if err != nil { + return fmt.Errorf("failed to parse template %s : %s", b, err) + } + var out bytes.Buffer + err = tmpl.Execute(&out, map[string]string{"TestDirectory": dir}) + if err != nil { + panic(err) + } + if err := yaml.UnmarshalStrict(out.Bytes(), &parser_configs); err != nil { + return fmt.Errorf("failed unmarshaling %s : %s", parser_cfg_file, err) + } + + pnodes, err = LoadStages(parser_configs, pctx) + if err != nil { + return fmt.Errorf("Unable to load parser config : %s", err) + } + + //TBD: Load post overflows + //func testFile(t *testing.T, file string, pctx UnixParserCtx, nodes []Node) bool { + parser_test_file := fmt.Sprintf("%s/test.yaml", dir) + if testFile(t, parser_test_file, *pctx, pnodes) != true { + return fmt.Errorf("test failed !") + } + return nil +} + +func testFile(t *testing.T, file string, pctx UnixParserCtx, nodes []Node) bool { + + var expects []types.Event + + /* now we can load the test files */ + //process the yaml + yamlFile, err := os.Open(file) + if err != nil { + t.Errorf("yamlFile.Get err #%v ", err) + } + dec := yaml.NewDecoder(yamlFile) + dec.SetStrict(true) + for { + tf := TestFile{} + err := dec.Decode(&tf) + if err != nil { + if err == io.EOF { + log.Warningf("end of test file") + break + } + t.Errorf("Failed to load testfile '%s' yaml error : %v", file, err) + return false + } + for _, in := range tf.Lines { + log.Debugf("Parser input : %s", spew.Sdump(in)) + out, err := Parse(pctx, in, nodes) + if err != nil { + log.Errorf("Failed to process %s : %v", spew.Sdump(in), err) + } + log.Debugf("Parser output : %s", spew.Sdump(out)) + expects = append(expects, out) + } + /* + check the results we got against the expected ones + only the keys of the expected part are checked against result + */ + if len(tf.Results) == 0 && len(expects) == 0 { + t.Errorf("No results, no tests, abort.") + return false + + //return false + } + redo: + if len(tf.Results) == 0 && len(expects) == 0 { + log.Warningf("Test is successfull") + return true + } else { + log.Warningf("%d results to check against %d expected results", len(expects), len(tf.Results)) + } + for eidx, out := range expects { + for ridx, expected := range tf.Results { + + log.Debugf("Checking next expected result.") + valid := true + + //allow to check as well for stage and processed flags + if expected.Stage != "" { + if expected.Stage != out.Stage { + log.Infof("out/expected mismatch 'Stage' value : (got) '%s' != (expected) '%s'", out.Stage, expected.Stage) + valid = false + goto CheckFailed + } else { + log.Infof("Stage == '%s'", expected.Stage) + } + } + if expected.Process != out.Process { + log.Infof("out/expected mismatch 'Process' value : (got) '%t' != (expected) '%t'", out.Process, expected.Process) + valid = false + goto CheckFailed + } else { + log.Infof("Process == '%t'", out.Process) + } + + if expected.Whitelisted != out.Whitelisted { + log.Infof("out/expected mismatch 'Whitelisted' value : (got) '%t' != (expected) '%t'", out.Whitelisted, expected.Whitelisted) + valid = false + goto CheckFailed + } else { + log.Infof("Whitelisted == '%t'", out.Whitelisted) + } + + for k, v := range expected.Parsed { + /*check 3 main dicts : event, enriched, meta */ + if val, ok := out.Parsed[k]; ok { + if val != v { + log.Infof("out/expected mismatch 'event' entry [%s] : (got) '%s' != (expected) '%s'", k, val, v) + valid = false + goto CheckFailed + } else { + log.Infof(".Parsed[%s] == '%s'", k, val) + } + } else { + log.Infof("missing event entry [%s] in expected : %v", k, out.Parsed) + valid = false + goto CheckFailed + } + } + + for k, v := range expected.Meta { + /*check 3 main dicts : event, enriched, meta */ + if val, ok := out.Meta[k]; ok { + if val != v { + log.Infof("out/expected mismatch 'meta' entry [%s] : (got) '%s' != (expected) '%s'", k, val, v) + valid = false + goto CheckFailed + } else { + log.Infof("Meta[%s] == '%s'", k, val) + } + } else { + log.Warningf("missing meta entry [%s] in expected", k) + valid = false + goto CheckFailed + } + } + + for k, v := range expected.Enriched { + /*check 3 main dicts : event, enriched, meta */ + if val, ok := out.Enriched[k]; ok { + if val != v { + log.Infof("out/expected mismatch 'Enriched' entry [%s] : (got) '%s' != (expected) '%s'", k, val, v) + valid = false + goto CheckFailed + } else { + log.Infof("Enriched[%s] == '%s'", k, val) + } + } else { + log.Warningf("missing enriched entry [%s] in expected", k) + valid = false + goto CheckFailed + } + } + + CheckFailed: + + if valid == true { + //log.Infof("Found result [%s], skip", spew.Sdump(tf.Results[ridx])) + log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx) + //don't do this at home : delete current element from list and redo + expects[eidx] = expects[len(expects)-1] + expects = expects[:len(expects)-1] + tf.Results[ridx] = tf.Results[len(tf.Results)-1] + tf.Results = tf.Results[:len(tf.Results)-1] + goto redo + } + + } + + } + + } + t.Errorf("failed test") + return false +} diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go new file mode 100644 index 000000000..4e01a789c --- /dev/null +++ b/pkg/parser/runtime.go @@ -0,0 +1,313 @@ +package parser + +/* + This file contains + - the runtime parsing routines +*/ + +import ( + "errors" + "fmt" + "reflect" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" + + "strconv" + + "github.com/davecgh/go-spew/spew" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + log "github.com/sirupsen/logrus" + + "github.com/antonmedv/expr" +) + +//ECTX : DID YOU SEE THAT GLOBAL, ISN'T IT HUGLY +var ECTX []EnricherCtx + +type Parser interface { + Init(map[string]interface{}) (interface{}, error) + IsParsable(types.Line) (bool, error) + Parse(interface{}, types.Line) (map[string]interface{}, error) +} + +/* ok, this is kinda experimental, I don't know how bad of an idea it is .. */ +func SetTargetByName(target string, value string, evt *types.Event) bool { + + if evt == nil { + return false + } + //it's a hack, we do it for the user + if strings.HasPrefix(target, "evt.") { + target = target[4:] + } + + log.Debugf("setting target %s to %s", target, value) + defer func() { + if r := recover(); r != nil { + log.Errorf("Runtime error while trying to set '%s' in %s : %+v", target, spew.Sdump(evt), r) + return + } + }() + + iter := reflect.ValueOf(evt).Elem() + if (iter == reflect.Value{}) || iter.IsZero() { + log.Tracef("event is nill") + //event is nill + return false + } + for _, f := range strings.Split(target, ".") { + /* + ** According to current Event layout we only have to handle struct and map + */ + switch iter.Kind() { + case reflect.Map: + tmp := iter.MapIndex(reflect.ValueOf(f)) + /*if we're in a map and the field doesn't exist, the user wants to add it :) */ + if (tmp == reflect.Value{}) || tmp.IsZero() { + log.Debugf("map entry is zero in '%s'", target) + //return false + } + iter.SetMapIndex(reflect.ValueOf(f), reflect.ValueOf(value)) + return true + case reflect.Struct: + tmp := iter.FieldByName(f) + if !tmp.IsValid() { + log.Debugf("%s IsValid false", f) + return false + } + iter = tmp + break + default: + log.Errorf("unexpected type %s in '%s'", iter.Kind(), target) + return false + } + } + //now we should have the final member :) + if !iter.CanSet() { + log.Errorf("'%s' can't be set", target) + return false + } + if iter.Kind() != reflect.String { + log.Errorf("Expected string, got %v when handling '%s'", iter.Kind(), target) + return false + } + iter.Set(reflect.ValueOf(value)) + return true +} + +func printStaticTarget(static types.ExtraField) string { + + if static.Method != "" { + return static.Method + } else if static.Parsed != "" { + return fmt.Sprintf(".Parsed[%s]", static.Parsed) + } else if static.Meta != "" { + return fmt.Sprintf(".Meta[%s]", static.Meta) + } else if static.Enriched != "" { + return fmt.Sprintf(".Enriched[%s]", static.Enriched) + } else if static.TargetByName != "" { + return static.TargetByName + } else { + return "?" + } +} + +func ProcessStatics(statics []types.ExtraField, p *types.Event, clog *logrus.Entry) error { + //we have a few cases : + //(meta||key) + (static||reference||expr) + var value string + + for _, static := range statics { + value = "" + if static.Value != "" { + value = static.Value + } else if static.RunTimeValue != nil { + output, err := expr.Run(static.RunTimeValue, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p})) + if err != nil { + clog.Warningf("failed to run RunTimeValue : %v", err) + continue + } + switch output.(type) { + case string: + value = output.(string) + case int: + value = strconv.Itoa(output.(int)) + default: + clog.Fatalf("unexpected return type for RunTimeValue : %T", output) + return errors.New("unexpected return type for RunTimeValue") + } + } + + if value == "" { + clog.Debugf("Empty value for %s, skip.", printStaticTarget(static)) + continue + } + + if static.Method != "" { + processed := false + /*still way too hackish, but : inject all the results in enriched, and */ + for _, x := range ECTX { + if fptr, ok := x.Funcs[static.Method]; ok { + clog.Tracef("Found method '%s'", static.Method) + ret, err := fptr(value, p, x.RuntimeCtx) + if err != nil { + clog.Fatalf("plugin function error : %v", err) + } + processed = true + clog.Debugf("+ Method %s('%s') returned %d entries to merge in .Enriched\n", static.Method, value, len(ret)) + if len(ret) == 0 { + clog.Debugf("+ Method '%s' empty response on '%s'", static.Method, value) + } + for k, v := range ret { + clog.Debugf("\t.Enriched[%s] = '%s'\n", k, v) + p.Enriched[k] = v + } + break + } else { + clog.Warningf("method '%s' doesn't exist", static.Method) + } + } + if processed == false { + clog.Warningf("method '%s' doesn't exist", static.Method) + } + } else if static.Parsed != "" { + clog.Debugf(".Parsed[%s] = '%s'", static.Parsed, value) + p.Parsed[static.Parsed] = value + } else if static.Meta != "" { + clog.Debugf(".Meta[%s] = '%s'", static.Meta, value) + p.Meta[static.Meta] = value + } else if static.Enriched != "" { + clog.Debugf(".Enriched[%s] = '%s'", static.Enriched, value) + p.Enriched[static.Enriched] = value + } else if static.TargetByName != "" { + if !SetTargetByName(static.TargetByName, value, p) { + clog.Errorf("Unable to set value of '%s'", static.TargetByName) + } else { + clog.Debugf("%s = '%s'", static.TargetByName, value) + } + } else { + clog.Fatalf("unable to process static : unknown tartget") + } + + } + return nil +} + +var NodesHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_node_hits", + Help: "How many time an event entered this node.", + }, + []string{"source", "name"}, +) + +var NodesHitsOk = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_node_hits_ok", + Help: "How many time an event successfuly exited this node.", + }, + []string{"source", "name"}, +) + +var NodesHitsKo = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_node_hits_ko", + Help: "How many time an event unsuccessfuly exited this node.", + }, + []string{"source", "name"}, +) + +func stageidx(stage string, stages []string) int { + for i, v := range stages { + if stage == v { + return i + } + } + return -1 +} + +func /*(u types.UnixParser)*/ Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error) { + var event types.Event + event = xp + /* the stage is undefined, probably line is freshly acquired, set to first stage !*/ + if event.Stage == "" && len(ctx.Stages) > 0 { + event.Stage = ctx.Stages[0] + log.Debugf("no stage, set to : %s", event.Stage) + } + event.Process = false + + if event.Parsed == nil { + event.Parsed = make(map[string]string) + } + if event.Enriched == nil { + event.Enriched = make(map[string]string) + } + if event.Meta == nil { + event.Meta = make(map[string]string) + } + if event.Type == types.LOG { + log.Tracef("INPUT '%s'", event.Line.Raw) + } + + for _, stage := range ctx.Stages { + /* if the node is forward in stages, seek to its stage */ + /* this is for example used by testing system to inject logs in post-syslog-parsing phase*/ + if stageidx(event.Stage, ctx.Stages) > stageidx(stage, ctx.Stages) { + log.Tracef("skipping stage, we are already at [%s] expecting [%s]", event.Stage, stage) + continue + } + log.Tracef("node stage : %s, current stage : %s", event.Stage, stage) + + /* if the stage is wrong, it means that the log didn't manage "pass" a stage with a onsuccess: next_stage tag */ + if event.Stage != stage { + log.Debugf("Event not parsed, expected stage '%s' got '%s', abort", stage, event.Stage) + return types.Event{Process: false}, nil + } + + isStageOK := false + for idx, node := range nodes { + clog := log.WithFields(log.Fields{ + "node-name": node.rn, + "stage": event.Stage, + }) + //Only process current stage's nodes + if event.Stage != node.Stage { + continue + } + clog.Tracef("Processing node %d/%d -> %s", idx, len(nodes), node.rn) + if ctx.Profiling == true { + node.Profiling = true + } + ret, err := node.process(&event, ctx) + if err != nil { + clog.Fatalf("Error while processing node : %v", err) + } + clog.Tracef("node (%s) ret : %v", node.rn, ret) + if ret == true { + isStageOK = true + } + if ret == true && node.OnSuccess == "next_stage" { + clog.Debugf("node successful, stop end stage %s", stage) + break + } + //the parsed object moved onto the next phase + if event.Stage != stage { + clog.Tracef("node moved stage, break and redo") + break + } + } + if isStageOK == false { + log.Debugf("Log didn't finish stage %s", event.Stage) + event.Process = false + return event, nil + } + + } + + event.Process = true + return event, nil + +} diff --git a/pkg/parser/stage.go b/pkg/parser/stage.go new file mode 100644 index 000000000..cfc526b1a --- /dev/null +++ b/pkg/parser/stage.go @@ -0,0 +1,143 @@ +package parser + +/* + This file contains + - the runtime definition of parser + - the compilation/parsing routines of parser configuration +*/ + +import ( + //"fmt" + + "fmt" + "io" + _ "net/http/pprof" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + + log "github.com/sirupsen/logrus" + + "github.com/goombaio/namegenerator" + yaml "gopkg.in/yaml.v2" +) + +var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()) + +/* + identify generic component to alter maps, smartfilters ? (static, conditional static etc.) +*/ + +type Stagefile struct { + Filename string `yaml:"filename"` + Stage string `yaml:"stage"` +} + +func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx) ([]Node, error) { + var nodes []Node + tmpstages := make(map[string]bool) + + for _, stageFile := range stageFiles { + if !strings.HasSuffix(stageFile.Filename, ".yaml") { + log.Warningf("skip non yaml : %s", stageFile.Filename) + continue + } + st, err := os.Stat(stageFile.Filename) + if err != nil { + return nil, fmt.Errorf("Failed to stat %s : %v", stageFile, err) + } + if st.IsDir() { + continue + } + yamlFile, err := os.Open(stageFile.Filename) + if err != nil { + return nil, fmt.Errorf("Can't access parsing configuration file %s : %s", stageFile.Filename, err) + } + //process the yaml + dec := yaml.NewDecoder(yamlFile) + dec.SetStrict(true) + nodesCount := 0 + for { + node := Node{} + node.OnSuccess = "continue" //default behaviour is to continue + err = dec.Decode(&node) + if err != nil { + if err == io.EOF { + log.Tracef("End of yaml file") + break + } + log.Fatalf("Error decoding parsing configuration file '%s': %v", stageFile.Filename, err) + } + + //check for empty bucket + if node.Name == "" && node.Description == "" && node.Author == "" { + log.Infof("Node has no name,author or description. Skipping.") + continue + } + //check compat + if node.FormatVersion == "" { + log.Debugf("no version in %s, assuming '1.0'", stageFile.Filename) + node.FormatVersion = "1.0" + } + ok, err := cwversion.Statisfies(node.FormatVersion, cwversion.Constraint_parser) + if err != nil { + log.Fatalf("Failed to check version : %s", err) + } + if !ok { + log.Errorf("%s doesn't satisfy parser format %s, skip", node.FormatVersion, cwversion.Constraint_parser) + continue + } + + node.Stage = stageFile.Stage + if _, ok := tmpstages[stageFile.Stage]; !ok { + tmpstages[stageFile.Stage] = true + } + //compile the node : grok pattern and expression + err = node.compile(pctx) + if err != nil { + if node.Name != "" { + return nil, fmt.Errorf("failed to compile node '%s' in '%s' : %s", node.Name, stageFile.Filename, err.Error()) + } + return nil, fmt.Errorf("failed to compile node in '%s' : %s", stageFile.Filename, err.Error()) + } + /* if the stage is empty, the node is empty, it's a trailing entry in users yaml file */ + if node.Stage == "" { + continue + } + nodes = append(nodes, node) + nodesCount++ + } + log.WithFields(log.Fields{"file": stageFile.Filename}).Infof("Loaded %d parser nodes", nodesCount) + } + + for k := range tmpstages { + pctx.Stages = append(pctx.Stages, k) + } + sort.Strings(pctx.Stages) + log.Debugf("Stages loaded: %+v", pctx.Stages) + return nodes, nil +} + +func LoadStageDir(dir string, pctx *UnixParserCtx) ([]Node, error) { + + var files []Stagefile + + m, err := filepath.Glob(dir + "/*/*") + if err != nil { + return nil, fmt.Errorf("Unable to find configs in '%s' : %v", dir, err) + } + for _, f := range m { + tmp := Stagefile{} + tmp.Filename = f + //guess stage : (prefix - file).split('/')[0] + stages := strings.Split(f, "/") + stage := stages[len(stages)-2] + tmp.Stage = stage + files = append(files, tmp) + } + return LoadStages(files, pctx) +} diff --git a/pkg/parser/tests/base-grok-import/base-grok.yaml b/pkg/parser/tests/base-grok-import/base-grok.yaml new file mode 100644 index 000000000..0d451d0fd --- /dev/null +++ b/pkg/parser/tests/base-grok-import/base-grok.yaml @@ -0,0 +1,16 @@ +filter: "evt.Line.Labels.type == 'testlog'" +debug: true +onsuccess: next_stage +name: tests/base-grok +nodes: + - grok: + #USERNAME is a pattern defined by the grokky library we are using + name: SYSLOGFACILITY + apply_on: Line.Raw + statics: + - enriched: subgrok_static_why_is_it_still_here + value: because +statics: + - meta: log_type + value: parsed_testlog + diff --git a/pkg/parser/tests/base-grok-import/parsers.yaml b/pkg/parser/tests/base-grok-import/parsers.yaml new file mode 100644 index 000000000..775f8893e --- /dev/null +++ b/pkg/parser/tests/base-grok-import/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/base-grok-import/test.yaml b/pkg/parser/tests/base-grok-import/test.yaml new file mode 100644 index 000000000..4ce26fdcc --- /dev/null +++ b/pkg/parser/tests/base-grok-import/test.yaml @@ -0,0 +1,43 @@ +#these are the events we input into parser +lines: + - Line: + Labels: + #this one will be checked by a filter + type: testlog + Raw: <123.120> + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: testlog + Raw: <123.121> + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: testlog + Raw: XXXX +#these are the results we expect from the parser +results: + - Meta: + log_type: parsed_testlog + Parsed: + facility: 123 + priority: 120 + Enriched: + subgrok_static_why_is_it_still_here: because + Process: true + Stage: s00-raw + - Meta: + log_type: parsed_testlog + Parsed: + facility: 123 + priority: 121 + Enriched: + subgrok_static_why_is_it_still_here: because + Process: true + Stage: s00-raw + - Process: false + Stage: s00-raw + Line: + Raw: XXXX diff --git a/pkg/parser/tests/base-grok-no-subnode/base-grok.yaml b/pkg/parser/tests/base-grok-no-subnode/base-grok.yaml new file mode 100644 index 000000000..be697c0e6 --- /dev/null +++ b/pkg/parser/tests/base-grok-no-subnode/base-grok.yaml @@ -0,0 +1,13 @@ +filter: "evt.Line.Labels.type == 'testlog'" +debug: true +onsuccess: next_stage +name: tests/base-grok +pattern_syntax: + MYCAP: ".*" +grok: + pattern: ^xxheader %{MYCAP:extracted_value} trailing stuff$ + apply_on: Line.Raw +statics: + - meta: log_type + value: parsed_testlog + diff --git a/pkg/parser/tests/base-grok-no-subnode/parsers.yaml b/pkg/parser/tests/base-grok-no-subnode/parsers.yaml new file mode 100644 index 000000000..775f8893e --- /dev/null +++ b/pkg/parser/tests/base-grok-no-subnode/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/base-grok-no-subnode/test.yaml b/pkg/parser/tests/base-grok-no-subnode/test.yaml new file mode 100644 index 000000000..bc8300238 --- /dev/null +++ b/pkg/parser/tests/base-grok-no-subnode/test.yaml @@ -0,0 +1,29 @@ +#these are the events we input into parser +lines: + - Line: + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE1 trailing stuff + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE2 trailing stuff +#these are the results we expect from the parser +results: + + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE1 + Process: true + Stage: s00-raw + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE2 + Process: true + Stage: s00-raw + diff --git a/pkg/parser/tests/base-grok/base-grok.yaml b/pkg/parser/tests/base-grok/base-grok.yaml new file mode 100644 index 000000000..715390031 --- /dev/null +++ b/pkg/parser/tests/base-grok/base-grok.yaml @@ -0,0 +1,14 @@ +filter: "evt.Line.Labels.type == 'testlog'" +debug: true +onsuccess: next_stage +name: tests/base-grok +pattern_syntax: + MYCAP: ".*" +nodes: + - grok: + pattern: ^xxheader %{MYCAP:extracted_value} trailing stuff$ + apply_on: Line.Raw +statics: + - meta: log_type + value: parsed_testlog + diff --git a/pkg/parser/tests/base-grok/parsers.yaml b/pkg/parser/tests/base-grok/parsers.yaml new file mode 100644 index 000000000..775f8893e --- /dev/null +++ b/pkg/parser/tests/base-grok/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/base-grok/test.yaml b/pkg/parser/tests/base-grok/test.yaml new file mode 100644 index 000000000..bc8300238 --- /dev/null +++ b/pkg/parser/tests/base-grok/test.yaml @@ -0,0 +1,29 @@ +#these are the events we input into parser +lines: + - Line: + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE1 trailing stuff + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE2 trailing stuff +#these are the results we expect from the parser +results: + + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE1 + Process: true + Stage: s00-raw + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE2 + Process: true + Stage: s00-raw + diff --git a/pkg/parser/tests/base-tree/base-grok.yaml b/pkg/parser/tests/base-tree/base-grok.yaml new file mode 100644 index 000000000..dd1ae7b7a --- /dev/null +++ b/pkg/parser/tests/base-tree/base-grok.yaml @@ -0,0 +1,33 @@ +#Here we are testing the trees within the node +filter: "evt.Line.Labels.type == 'type1'" +debug: true +name: tests/base-grok-root +pattern_syntax: + MYCAP: ".*" +grok: + pattern: ^xxheader %{MYCAP:extracted_value} trailing stuff$ + apply_on: Line.Raw +statics: + - meta: state + value: root-done + - meta: state_sub + expression: evt.Parsed.extracted_value +--- +filter: "evt.Line.Labels.type == 'type1' && evt.Meta.state == 'root-done'" +debug: true +onsuccess: next_stage +name: tests/base-grok-leafs +#the sub-nodes will process the result of the master node +nodes: + - filter: "evt.Parsed.extracted_value == 'VALUE1'" + debug: true + statics: + - meta: final_state + value: leaf1 + - filter: "evt.Parsed.extracted_value == 'VALUE2'" + debug: true + statics: + - meta: final_state + value: leaf2 + + diff --git a/pkg/parser/tests/base-tree/parsers.yaml b/pkg/parser/tests/base-tree/parsers.yaml new file mode 100644 index 000000000..775f8893e --- /dev/null +++ b/pkg/parser/tests/base-tree/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/base-tree/test.yaml b/pkg/parser/tests/base-tree/test.yaml new file mode 100644 index 000000000..2650e81ef --- /dev/null +++ b/pkg/parser/tests/base-tree/test.yaml @@ -0,0 +1,30 @@ +#these are the events we input into parser +lines: + - Line: + Labels: + #this one will be checked by a filter + type: type1 + Raw: xxheader VALUE1 trailing stuff + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: type1 + Raw: xxheader VALUE2 trailing stuff +#these are the results we expect from the parser +results: + - Meta: + final_state: leaf1 + state_sub: VALUE1 + Parsed: + extracted_value: VALUE1 + Process: true + Stage: s00-raw + - Meta: + final_state: leaf2 + state_sub: VALUE2 + Parsed: + extracted_value: VALUE2 + Process: true + Stage: s00-raw + diff --git a/pkg/parser/tests/dateparser-enrich/base-grok.yaml b/pkg/parser/tests/dateparser-enrich/base-grok.yaml new file mode 100644 index 000000000..781b4a6c0 --- /dev/null +++ b/pkg/parser/tests/dateparser-enrich/base-grok.yaml @@ -0,0 +1,10 @@ +filter: "evt.StrTime != ''" +name: test/dateparse +debug: true +#it's a hack lol +statics: + - method: ParseDate + expression: evt.StrTime + - target: MarshaledTime + expression: evt.Enriched.MarshaledTime + diff --git a/pkg/parser/tests/dateparser-enrich/parsers.yaml b/pkg/parser/tests/dateparser-enrich/parsers.yaml new file mode 100644 index 000000000..775f8893e --- /dev/null +++ b/pkg/parser/tests/dateparser-enrich/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/dateparser-enrich/test.yaml b/pkg/parser/tests/dateparser-enrich/test.yaml new file mode 100644 index 000000000..67edd9aa3 --- /dev/null +++ b/pkg/parser/tests/dateparser-enrich/test.yaml @@ -0,0 +1,22 @@ +#these are the events we input into parser +lines: + - StrTime: 2012/11/01 + Parsed: + test: format1 + - StrTime: 11/02/2012 13:37:05 + Parsed: + test: format2 +#these are the results we expect from the parser +results: + - Parsed: + test: format1 + Enriched: + MarshaledTime: "2012-11-01T00:00:00Z" + Process: true + Stage: s00-raw + - Parsed: + test: format2 + Enriched: + MarshaledTime: "2012-11-02T13:37:05Z" + Process: true + Stage: s00-raw diff --git a/pkg/parser/tests/geoip-enrich/base-grok.yaml b/pkg/parser/tests/geoip-enrich/base-grok.yaml new file mode 100644 index 000000000..a25875c1a --- /dev/null +++ b/pkg/parser/tests/geoip-enrich/base-grok.yaml @@ -0,0 +1,22 @@ +filter: "'source_ip' in evt.Meta" +name: tests/geoip-enrich +description: "Populate event with geoloc info : as, country, coords, source range." +statics: + - method: GeoIpCity + expression: evt.Meta.source_ip + - meta: IsoCode + expression: evt.Enriched.IsoCode + - meta: IsInEU + expression: evt.Enriched.IsInEU + - meta: GeoCoords + expression: evt.Enriched.GeoCoords + - method: GeoIpASN + expression: evt.Meta.source_ip + - meta: ASNNumber + expression: evt.Enriched.ASNNumber + - meta: ASNOrg + expression: evt.Enriched.ASNOrg + - method: IpToRange + expression: evt.Meta.source_ip + - meta: SourceRange + expression: evt.Enriched.SourceRange diff --git a/pkg/parser/tests/geoip-enrich/parsers.yaml b/pkg/parser/tests/geoip-enrich/parsers.yaml new file mode 100644 index 000000000..775f8893e --- /dev/null +++ b/pkg/parser/tests/geoip-enrich/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/geoip-enrich/test.yaml b/pkg/parser/tests/geoip-enrich/test.yaml new file mode 100644 index 000000000..5d0abcb35 --- /dev/null +++ b/pkg/parser/tests/geoip-enrich/test.yaml @@ -0,0 +1,27 @@ +#these are the events we input into parser +lines: + - Meta: + test: test1 + source_ip: 8.8.8.8 + - Meta: + test: test2 + source_ip: 192.168.0.1 +#these are the results we expect from the parser +results: + - Process: true + Enriched: + IsoCode: US + IsInEU: false + ASNOrg: Google LLC + Meta: + source_ip: 8.8.8.8 + - Process: true + Enriched: + IsInEU: false + IsoCode: + ASNOrg: + Meta: + source_ip: 192.168.0.1 + + + diff --git a/pkg/parser/tests/multi-stage-grok/base-grok-s00.yaml b/pkg/parser/tests/multi-stage-grok/base-grok-s00.yaml new file mode 100644 index 000000000..0425f9087 --- /dev/null +++ b/pkg/parser/tests/multi-stage-grok/base-grok-s00.yaml @@ -0,0 +1,12 @@ +filter: "evt.Line.Labels.type == 'testlog'" +debug: true +onsuccess: next_stage +name: tests/base-grok +nodes: + - grok: + pattern: ^xxheader %{GREEDYDATA:extracted_value} trailing stuff$ + apply_on: Line.Raw +statics: + - meta: log_type + value: parsed_testlog + diff --git a/pkg/parser/tests/multi-stage-grok/base-grok-s01.yaml b/pkg/parser/tests/multi-stage-grok/base-grok-s01.yaml new file mode 100644 index 000000000..1e06a85ea --- /dev/null +++ b/pkg/parser/tests/multi-stage-grok/base-grok-s01.yaml @@ -0,0 +1,11 @@ +#only one of the events is going to throu filter +filter: "evt.Parsed.extracted_value == 'VALUE1'" +debug: true +onsuccess: next_stage +name: tests/second-stage-grok +statics: + - meta: did_second_stage + value: yes + - target: evt.Parsed.test_bis + value: lolilol + diff --git a/pkg/parser/tests/multi-stage-grok/parsers.yaml b/pkg/parser/tests/multi-stage-grok/parsers.yaml new file mode 100644 index 000000000..c0e4c7452 --- /dev/null +++ b/pkg/parser/tests/multi-stage-grok/parsers.yaml @@ -0,0 +1,4 @@ + - filename: {{.TestDirectory}}/base-grok-s00.yaml + stage: s00-raw + - filename: {{.TestDirectory}}/base-grok-s01.yaml + stage: s01-raw diff --git a/pkg/parser/tests/multi-stage-grok/test.yaml b/pkg/parser/tests/multi-stage-grok/test.yaml new file mode 100644 index 000000000..2113aff26 --- /dev/null +++ b/pkg/parser/tests/multi-stage-grok/test.yaml @@ -0,0 +1,29 @@ +#these are the events we input into parser +lines: + - Line: + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE1 trailing stuff + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE2 trailing stuff +#these are the results we expect from the parser +results: + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE1 + test_bis: lolilol + Process: true + Stage: s01-raw + #because of how our second stage parser is done, this one won't pass stage + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE2 + Process: false + Stage: s01-raw diff --git a/pkg/parser/tests/reverse-dns-enrich/base-grok.yaml b/pkg/parser/tests/reverse-dns-enrich/base-grok.yaml new file mode 100644 index 000000000..188b439c3 --- /dev/null +++ b/pkg/parser/tests/reverse-dns-enrich/base-grok.yaml @@ -0,0 +1,8 @@ +#filter: "evt.Overflow.Labels.remediation == 'true'" +name: tests/rdns +description: "Lookup the DNS assiocated to the source IP only for overflows" +statics: + - method: reverse_dns + expression: evt.Enriched.IpToResolve + - meta: did_dns_succeeded + expression: 'evt.Enriched.reverse_dns == "" ? "no" : "yes"' diff --git a/pkg/parser/tests/reverse-dns-enrich/parsers.yaml b/pkg/parser/tests/reverse-dns-enrich/parsers.yaml new file mode 100644 index 000000000..775f8893e --- /dev/null +++ b/pkg/parser/tests/reverse-dns-enrich/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/reverse-dns-enrich/test.yaml b/pkg/parser/tests/reverse-dns-enrich/test.yaml new file mode 100644 index 000000000..1495d3f86 --- /dev/null +++ b/pkg/parser/tests/reverse-dns-enrich/test.yaml @@ -0,0 +1,21 @@ +#these are the events we input into parser +lines: + - Enriched: + IpToResolve: 8.8.8.8 + - Enriched: + IpToResolve: 1.2.3.4 +#these are the results we expect from the parser +results: + - Enriched: + reverse_dns: dns.google. + IpToResolve: 8.8.8.8 + Meta: + did_dns_succeeded: yes + Process: true + Stage: s00-raw + - Enriched: + IpToResolve: 1.2.3.4 + Meta: + did_dns_succeeded: no + Process: true + Stage: s00-raw diff --git a/pkg/parser/tests/whitelist-base/base-grok.yaml b/pkg/parser/tests/whitelist-base/base-grok.yaml new file mode 100644 index 000000000..2db38dc4e --- /dev/null +++ b/pkg/parser/tests/whitelist-base/base-grok.yaml @@ -0,0 +1,11 @@ +name: test/whitelists +description: "Whitelist tests" +debug: true +whitelist: + reason: "Whitelist tests" + ip: + - 8.8.8.8 + cidr: + - "1.2.3.0/24" + expression: + - "'supertoken1234' == evt.Enriched.test_token" diff --git a/pkg/parser/tests/whitelist-base/parsers.yaml b/pkg/parser/tests/whitelist-base/parsers.yaml new file mode 100644 index 000000000..775f8893e --- /dev/null +++ b/pkg/parser/tests/whitelist-base/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/whitelist-base/test.yaml b/pkg/parser/tests/whitelist-base/test.yaml new file mode 100644 index 000000000..471e635f9 --- /dev/null +++ b/pkg/parser/tests/whitelist-base/test.yaml @@ -0,0 +1,43 @@ +#these are the events we input into parser +lines: + - Meta: + test: test1 + source_ip: 8.8.8.8 + - Meta: + test: test2 + source_ip: 1.2.3.4 + - Meta: + test: test3 + source_ip: 2.2.3.4 + - Meta: + test: test4 + source_ip: 8.8.8.9 + - Enriched: + test_token: supertoken1234 + Meta: + test: test5 +#these are the results we expect from the parser +results: + - Whitelisted: true + Process: true + Meta: + test: test1 + - Whitelisted: true + Process: true + Meta: + test: test2 + - Whitelisted: false + Process: true + Meta: + test: test3 + - Whitelisted: false + Process: true + Meta: + test: test4 + - Whitelisted: true + Process: true + Meta: + test: test5 + + + diff --git a/pkg/parser/unix_parser.go b/pkg/parser/unix_parser.go new file mode 100644 index 000000000..676bc6daa --- /dev/null +++ b/pkg/parser/unix_parser.go @@ -0,0 +1,39 @@ +package parser + +import ( + "io/ioutil" + + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/logrusorgru/grokky" + "github.com/prometheus/common/log" +) + +type UnixParser struct { +} + +type UnixParserCtx struct { + Grok grokky.Host + Stages []string + Profiling bool +} + +func (u UnixParser) IsParsable(ctx interface{}, l types.Line) (bool, error) { + return true, nil +} + +func (u UnixParser) Init(c map[string]interface{}) (*UnixParserCtx, error) { + r := UnixParserCtx{} + r.Grok = grokky.NewBase() + files, err := ioutil.ReadDir(c["patterns"].(string)) + if err != nil { + return nil, err + } + for _, f := range files { + log.Debugf("Loading %s", f.Name()) + if err := r.Grok.AddFromFile(c["patterns"].(string) + f.Name()); err != nil { + log.Errorf("failed to load pattern %s : %v", f.Name(), err) + return nil, err + } + } + return &r, nil +} diff --git a/pkg/sqlite/commit.go b/pkg/sqlite/commit.go new file mode 100644 index 000000000..dfdbd0e4a --- /dev/null +++ b/pkg/sqlite/commit.go @@ -0,0 +1,45 @@ +package sqlite + +import ( + "fmt" + "sync/atomic" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +func (c *Context) Flush() error { + c.lock.Lock() + defer c.lock.Unlock() + + ret := c.tx.Commit() + if ret.Error != nil { + return fmt.Errorf("failed to commit records : %v", ret.Error) + } + c.tx = c.Db.Begin() + c.lastCommit = time.Now() + //Delete the expired records + if c.flush { + retx := c.Db.Where(`strftime("%s", until) < strftime("%s", "now")`).Delete(types.BanApplication{}) + if retx.RowsAffected > 0 { + log.Infof("Flushed %d expired entries from Ban Application", retx.RowsAffected) + } + } + return nil +} + +func (c *Context) AutoCommit() { + ticker := time.NewTicker(200 * time.Millisecond) + for { + select { + case <-ticker.C: + if atomic.LoadInt32(&c.count) != 0 && + (atomic.LoadInt32(&c.count)%100 == 0 || time.Since(c.lastCommit) >= 500*time.Millisecond) { + if err := c.Flush(); err != nil { + log.Fatalf("failed to flush : %s", err) + } + } + } + } +} diff --git a/pkg/sqlite/delete.go b/pkg/sqlite/delete.go new file mode 100644 index 000000000..9da02132f --- /dev/null +++ b/pkg/sqlite/delete.go @@ -0,0 +1,31 @@ +package sqlite + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +/*try to delete entries with matching fields */ +func (c *Context) DeleteBan(target string) (int, error) { + + if target != "" { + ret := c.Db.Delete(types.BanApplication{}, "ip_text = ?", target) + if ret.Error != nil { + log.Errorf("Failed to delete record with BanTarget %s : %v", target, ret.Error) + return 0, ret.Error + } + return int(ret.RowsAffected), nil + } + return 0, fmt.Errorf("No target provided") +} + +func (c *Context) DeleteAll() error { + allBa := types.BanApplication{} + records := c.Db.Delete(&allBa) + if records.Error != nil { + return records.Error + } + return nil +} diff --git a/pkg/sqlite/sqlite.go b/pkg/sqlite/sqlite.go new file mode 100644 index 000000000..9234dc66d --- /dev/null +++ b/pkg/sqlite/sqlite.go @@ -0,0 +1,67 @@ +package sqlite + +import ( + "fmt" + "strconv" + "sync" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + + "github.com/jinzhu/gorm" + _ "github.com/jinzhu/gorm/dialects/sqlite" + _ "github.com/mattn/go-sqlite3" +) + +type Context struct { + Db *gorm.DB //Pointer to sqlite db + tx *gorm.DB //Pointer to current transaction (flushed on a regular basis) + lastCommit time.Time + flush bool + count int32 + lock sync.Mutex //booboo +} + +func NewSQLite(cfg map[string]string) (*Context, error) { + var err error + c := &Context{} + + if _, ok := cfg["db_path"]; !ok { + return nil, fmt.Errorf("please specify a 'db_path' to SQLite db in the configuration") + } + + if cfg["db_path"] == "" { + return nil, fmt.Errorf("please specify a 'db_path' to SQLite db in the configuration") + } + + c.Db, err = gorm.Open("sqlite3", cfg["db_path"]+"?_busy_timeout=1000") + if err != nil { + return nil, fmt.Errorf("failed to open %s : %s", cfg["db_path"], err) + } + + if val, ok := cfg["debug"]; ok && val == "true" { + log.Infof("Enabling debug for sqlite") + c.Db.LogMode(true) + } + + c.flush, _ = strconv.ParseBool(cfg["flush"]) + // Migrate the schema + c.Db.AutoMigrate(&types.EventSequence{}, &types.SignalOccurence{}, &types.BanApplication{}) + c.Db.Model(&types.SignalOccurence{}).Related(&types.EventSequence{}) + c.Db.Model(&types.SignalOccurence{}).Related(&types.BanApplication{}) + c.tx = c.Db.Begin() + c.lastCommit = time.Now() + ret := c.tx.Commit() + + if ret.Error != nil { + return nil, fmt.Errorf("failed to commit records : %v", ret.Error) + + } + c.tx = c.Db.Begin() + if c.tx == nil { + return nil, fmt.Errorf("failed to begin sqlite transac : %s", err) + } + go c.AutoCommit() + return c, nil +} diff --git a/pkg/sqlite/stats.go b/pkg/sqlite/stats.go new file mode 100644 index 000000000..0c20482d2 --- /dev/null +++ b/pkg/sqlite/stats.go @@ -0,0 +1,163 @@ +package sqlite + +import ( + "fmt" + "strconv" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/jinzhu/gorm" + log "github.com/sirupsen/logrus" +) + +func (c *Context) GetStats(since time.Duration) ([]map[string]string, error) { + sos := []types.SignalOccurence{} + stats := make([]map[string]string, 0) + as_stats := make(map[string]string) + scenar_stats := make(map[string]string) + country_stats := make(map[string]string) + + /*get records that are younger than 'since' */ + records := c.Db.Order("updated_at desc").Where(`strftime("%s", created_at) >= strftime("%s", ?)`, time.Now().Add(-since)).Find(&sos) + if records.Error != nil { + return nil, records.Error + } + + for _, ld := range sos { + /*by scenario*/ + if ld.Scenario == "" { + ld.Scenario = "unknown" + } + if _, ok := scenar_stats[ld.Scenario]; !ok { + scenar_stats[ld.Scenario] = "1" + } else { + nv, err := strconv.Atoi(scenar_stats[ld.Scenario]) + if err != nil { + log.Fatalf("Unable to update internal stats : %v", err) + } + scenar_stats[ld.Scenario] = fmt.Sprintf("%d", nv+1) + } + /*by country*/ + if ld.Source_Country == "" { + ld.Source_Country = "unknown" + } + if _, ok := country_stats[ld.Source_Country]; !ok { + country_stats[ld.Source_Country] = "1" + } else { + nv, err := strconv.Atoi(country_stats[ld.Source_Country]) + if err != nil { + log.Fatalf("Unable to update internal stats : %v", err) + } + country_stats[ld.Source_Country] = fmt.Sprintf("%d", nv+1) + } + /*by AS*/ + if ld.Source_AutonomousSystemNumber == "" { + ld.Source_AutonomousSystemNumber = "unknown" + } + if _, ok := as_stats[ld.Source_AutonomousSystemNumber]; !ok { + as_stats[ld.Source_AutonomousSystemNumber] = "1" + } else { + nv, err := strconv.Atoi(as_stats[ld.Source_AutonomousSystemNumber]) + if err != nil { + log.Fatalf("Unable to update internal stats : %v", err) + } + as_stats[ld.Source_AutonomousSystemNumber] = fmt.Sprintf("%d", nv+1) + } + } + stats = append(stats, as_stats) + stats = append(stats, scenar_stats) + stats = append(stats, country_stats) + + return stats, nil +} + +//GetBansAt returns the IPs that were banned at a given time +func (c *Context) GetBansAt(at time.Time) ([]map[string]string, error) { + + bas := []types.BanApplication{} + rets := make([]map[string]string, 0) + /*get non-expired records*/ + //c.Db.LogMode(true) + records := c.Db.Order("updated_at desc").Where(`strftime("%s", until) >= strftime("%s", ?) AND strftime("%s", created_at) < strftime("%s", ?)`, at, at).Group("ip_text").Find(&bas) /*.Count(&count)*/ + if records.Error != nil { + return nil, records.Error + } + for _, ba := range bas { + var count int + /* + fetch count of bans for this specific ip_text + */ + ret := c.Db.Table("ban_applications").Order("updated_at desc").Where(`ip_text = ? AND strftime("%s", until) >= strftime("%s", ?) AND strftime("%s", created_at) < strftime("%s", ?) AND deleted_at is NULL`, ba.IpText, at, at).Count(&count) + if ret.Error != nil { + return nil, fmt.Errorf("Failed to fetch records count for %s : %v", ba.IpText, ret.Error) + } + sOs := []types.SignalOccurence{} + nbSo := 0 + records := c.Db.Where(`source_ip = ?`, ba.IpText).Group("id").Find(&sOs).Count(&nbSo) + if records.Error != nil { + //record not found can be ok + if gorm.IsRecordNotFoundError(records.Error) { + bancom := make(map[string]string) + bancom["iptext"] = ba.IpText + bancom["bancount"] = fmt.Sprintf("%d", count) + bancom["as"] = ba.TargetASName + bancom["asnum"] = fmt.Sprintf("%d", ba.TargetAS) + bancom["cn"] = ba.TargetCN + bancom["scenario"] = "?" + bancom["source"] = ba.MeasureSource + bancom["events_count"] = "0" + bancom["action"] = ba.MeasureType + bancom["until"] = fmt.Sprintf("%s", ba.Until.Sub(time.Now()).Round(time.Second)) + bancom["reason"] = ba.Reason + rets = append(rets, bancom) + continue + } + } + + evtCount := 0 + for _, s := range sOs { + evtCount += s.Events_count + } + + so := types.SignalOccurence{} + records = c.Db.Where(`id = ?`, ba.SignalOccurenceID).Find(&so) + if records.Error != nil { + //record not found can be ok + if gorm.IsRecordNotFoundError(records.Error) { + bancom := make(map[string]string) + bancom["iptext"] = ba.IpText + bancom["bancount"] = fmt.Sprintf("%d", count) + bancom["as"] = ba.TargetASName + bancom["asnum"] = fmt.Sprintf("%d", ba.TargetAS) + bancom["cn"] = ba.TargetCN + bancom["source"] = ba.MeasureSource + bancom["scenario"] = "?" + bancom["events_count"] = "0" + bancom["action"] = ba.MeasureType + bancom["until"] = fmt.Sprintf("%s", ba.Until.Sub(time.Now()).Round(time.Second)) + bancom["reason"] = ba.Reason + rets = append(rets, bancom) + continue + } + fmt.Printf("err : %v", records.Error) + return nil, records.Error + } + if records.RowsAffected != 1 { + log.Errorf("more than one signal_occurence for local_decision, discard") + break + } + bancom := make(map[string]string) + bancom["iptext"] = ba.IpText + bancom["as"] = so.Source_AutonomousSystemNumber + " " + so.Source_AutonomousSystemOrganization + bancom["cn"] = so.Source_Country + bancom["bancount"] = fmt.Sprintf("%d", nbSo) + bancom["scenario"] = so.Scenario + bancom["events_count"] = fmt.Sprintf("%d", evtCount) + bancom["action"] = ba.MeasureType + bancom["source"] = ba.MeasureSource + bancom["until"] = fmt.Sprintf("%s", ba.Until.Sub(time.Now()).Round(time.Second)) + bancom["reason"] = so.Scenario + rets = append(rets, bancom) + } + return rets, nil +} diff --git a/pkg/sqlite/write.go b/pkg/sqlite/write.go new file mode 100644 index 000000000..876d9d708 --- /dev/null +++ b/pkg/sqlite/write.go @@ -0,0 +1,37 @@ +package sqlite + +import ( + "fmt" + "sync/atomic" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +//we simply append the event to the transaction +func (c *Context) WriteBanApplication(ban types.BanApplication) error { + atomic.AddInt32(&c.count, 1) + + c.lock.Lock() + defer c.lock.Unlock() + log.Debugf("Ban application being called : %s %s", ban.Scenario, ban.IpText) + ret := c.tx.Where(types.BanApplication{IpText: ban.IpText}).Assign(types.BanApplication{Until: ban.Until}).Assign(types.BanApplication{Reason: ban.Reason}).Assign(types.BanApplication{MeasureType: ban.MeasureType}).FirstOrCreate(&ban) + if ret.Error != nil { + return fmt.Errorf("failed to write ban record : %v", ret.Error) + } + return nil +} + +func (c *Context) WriteSignal(sig types.SignalOccurence) error { + atomic.AddInt32(&c.count, 1) + c.lock.Lock() + defer c.lock.Unlock() + //log.Debugf("Ban signal being called : %s %s", sig.Scenario, sig.Source.Ip.String()) + ret := c.tx.Create(&sig) + //sig.Scenario = sig.Scenario + if ret.Error != nil { + log.Errorf("FAILED : %+v \n", ret.Error) + return fmt.Errorf("failed to write signal occurence : %v", ret.Error) + } + return nil +} diff --git a/pkg/time/AUTHORS b/pkg/time/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/pkg/time/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/pkg/time/CONTRIBUTING.md b/pkg/time/CONTRIBUTING.md new file mode 100644 index 000000000..d0485e887 --- /dev/null +++ b/pkg/time/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/pkg/time/CONTRIBUTORS b/pkg/time/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/pkg/time/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/pkg/time/LICENSE b/pkg/time/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/pkg/time/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/time/PATENTS b/pkg/time/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/pkg/time/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/pkg/time/README.md b/pkg/time/README.md new file mode 100644 index 000000000..ce9becdde --- /dev/null +++ b/pkg/time/README.md @@ -0,0 +1,17 @@ +# Go Time + +This repository provides supplementary Go time packages. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/time`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/time`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the time repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/time:" in the +subject line, so it is easy to find. diff --git a/pkg/time/rate/rate.go b/pkg/time/rate/rate.go new file mode 100644 index 000000000..4b7f5d980 --- /dev/null +++ b/pkg/time/rate/rate.go @@ -0,0 +1,477 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rate provides a rate limiter. +package rate + +import ( + "context" + "fmt" + "math" + "sync" + "time" +) + +// Limit defines the maximum frequency of some events. +// Limit is represented as number of events per second. +// A zero Limit allows no events. +type Limit float64 + +// Inf is the infinite rate limit; it allows all events (even if burst is zero). +const Inf = Limit(math.MaxFloat64) + +// Every converts a minimum time interval between events to a Limit. +func Every(interval time.Duration) Limit { + if interval <= 0 { + return Inf + } + return 1 / Limit(interval.Seconds()) +} + +// A Limiter controls how frequently events are allowed to happen. +// It implements a "token bucket" of size b, initially full and refilled +// at rate r tokens per second. +// Informally, in any large enough time interval, the Limiter limits the +// rate to r tokens per second, with a maximum burst size of b events. +// As a special case, if r == Inf (the infinite rate), b is ignored. +// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. +// +// The zero value is a valid Limiter, but it will reject all events. +// Use NewLimiter to create non-zero Limiters. +// +// Limiter has three main methods, Allow, Reserve, and Wait. +// Most callers should use Wait. +// +// Each of the three methods consumes a single token. +// They differ in their behavior when no token is available. +// If no token is available, Allow returns false. +// If no token is available, Reserve returns a reservation for a future token +// and the amount of time the caller must wait before using it. +// If no token is available, Wait blocks until one can be obtained +// or its associated context.Context is canceled. +// +// The methods AllowN, ReserveN, and WaitN consume n tokens. +type Limiter struct { + limit Limit + burst int + + mu sync.Mutex + tokens float64 + // last is the last time the limiter's tokens field was updated + last time.Time + // lastEvent is the latest time of a rate-limited event (past or future) + lastEvent time.Time +} + +type RateLimiter interface { + Allow() bool + AllowN(time.Time, int) bool + GetTokensCount() float64 + GetTokensCountAt(time.Time) float64 + Dump() Lstate + Load(Lstate) +} + +type Lstate struct { + Limit Limit + Burst int + Tokens float64 + Last time.Time + LastEvent time.Time +} + +func (lim *Limiter) Dump() Lstate { + st := Lstate{} + st.Limit = lim.limit + st.Burst = lim.burst + st.Tokens = lim.tokens + st.Last = lim.last + st.LastEvent = lim.lastEvent + return st +} + +func (lim *Limiter) Load(st Lstate) { + lim.limit = st.Limit + lim.burst = st.Burst + lim.tokens = st.Tokens + lim.last = st.Last + lim.lastEvent = st.LastEvent + return +} + +// Limit returns the maximum overall event rate. +func (lim *Limiter) Limit() Limit { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.limit +} + +// Burst returns the maximum burst size. Burst is the maximum number of tokens +// that can be consumed in a single call to Allow, Reserve, or Wait, so higher +// Burst values allow more events to happen at once. +// A zero Burst allows no events, unless limit == Inf. +func (lim *Limiter) Burst() int { + return lim.burst +} + +// NewLimiter returns a new Limiter that allows events up to rate r and permits +// bursts of at most b tokens. +func NewLimiter(r Limit, b int) *Limiter { + return &Limiter{ + limit: r, + burst: b, + } +} + +// Allow is shorthand for AllowN(time.Now(), 1). +func (lim *Limiter) Allow() bool { + return lim.AllowN(time.Now(), 1) +} + +// AllowN reports whether n events may happen at time now. +// Use this method if you intend to drop / skip events that exceed the rate limit. +// Otherwise use Reserve or Wait. +func (lim *Limiter) AllowN(now time.Time, n int) bool { + return lim.reserveN(now, n, 0).ok +} + +// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. +// A Reservation may be canceled, which may enable the Limiter to permit additional events. +type Reservation struct { + ok bool + lim *Limiter + tokens int + timeToAct time.Time + // This is the Limit at reservation time, it can change later. + limit Limit +} + +// OK returns whether the limiter can provide the requested number of tokens +// within the maximum wait time. If OK is false, Delay returns InfDuration, and +// Cancel does nothing. +func (r *Reservation) OK() bool { + return r.ok +} + +// Delay is shorthand for DelayFrom(time.Now()). +func (r *Reservation) Delay() time.Duration { + return r.DelayFrom(time.Now()) +} + +// InfDuration is the duration returned by Delay when a Reservation is not OK. +const InfDuration = time.Duration(1<<63 - 1) + +// DelayFrom returns the duration for which the reservation holder must wait +// before taking the reserved action. Zero duration means act immediately. +// InfDuration means the limiter cannot grant the tokens requested in this +// Reservation within the maximum wait time. +func (r *Reservation) DelayFrom(now time.Time) time.Duration { + if !r.ok { + return InfDuration + } + delay := r.timeToAct.Sub(now) + if delay < 0 { + return 0 + } + return delay +} + +// Cancel is shorthand for CancelAt(time.Now()). +func (r *Reservation) Cancel() { + r.CancelAt(time.Now()) + return +} + +// CancelAt indicates that the reservation holder will not perform the reserved action +// and reverses the effects of this Reservation on the rate limit as much as possible, +// considering that other reservations may have already been made. +func (r *Reservation) CancelAt(now time.Time) { + if !r.ok { + return + } + + r.lim.mu.Lock() + defer r.lim.mu.Unlock() + + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + return + } + + // calculate tokens to restore + // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved + // after r was obtained. These tokens should not be restored. + restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) + if restoreTokens <= 0 { + return + } + // advance time to now + now, _, tokens := r.lim.advance(now) + // calculate new number of tokens + tokens += restoreTokens + if burst := float64(r.lim.burst); tokens > burst { + tokens = burst + } + // update state + r.lim.last = now + r.lim.tokens = tokens + if r.timeToAct == r.lim.lastEvent { + prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) + if !prevEvent.Before(now) { + r.lim.lastEvent = prevEvent + } + } + + return +} + +// Reserve is shorthand for ReserveN(time.Now(), 1). +func (lim *Limiter) Reserve() *Reservation { + return lim.ReserveN(time.Now(), 1) +} + +// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. +// The Limiter takes this Reservation into account when allowing future events. +// ReserveN returns false if n exceeds the Limiter's burst size. +// Usage example: +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. +// If you need to respect a deadline or cancel the delay, use Wait instead. +// To drop or skip events exceeding rate limit, use Allow instead. +func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { + r := lim.reserveN(now, n, InfDuration) + return &r +} + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.WaitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +// The burst limit is ignored if the rate limit is Inf. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + lim.mu.Lock() + burst := lim.burst + limit := lim.limit + lim.mu.Unlock() + + if n > burst && limit != Inf { + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) + } + // Check if ctx is already cancelled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + // Determine wait limit + now := time.Now() + waitLimit := InfDuration + if deadline, ok := ctx.Deadline(); ok { + waitLimit = deadline.Sub(now) + } + // Reserve + r := lim.reserveN(now, n, waitLimit) + if !r.ok { + return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) + } + // Wait if necessary + delay := r.DelayFrom(now) + if delay == 0 { + return nil + } + t := time.NewTimer(delay) + defer t.Stop() + select { + case <-t.C: + // We can proceed. + return nil + case <-ctx.Done(): + // Context was canceled before we could proceed. Cancel the + // reservation, which may permit other events to proceed sooner. + r.Cancel() + return ctx.Err() + } +} + +// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). +func (lim *Limiter) SetLimit(newLimit Limit) { + lim.SetLimitAt(time.Now(), newLimit) +} + +// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated +// or underutilized by those which reserved (using Reserve or Wait) but did not yet act +// before SetLimitAt was called. +func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.limit = newLimit +} + +// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst). +func (lim *Limiter) SetBurst(newBurst int) { + lim.SetBurstAt(time.Now(), newBurst) +} + +// SetBurstAt sets a new burst size for the limiter. +func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.burst = newBurst +} + +// reserveN is a helper method for AllowN, ReserveN, and WaitN. +// maxFutureReserve specifies the maximum reservation wait duration allowed. +// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. +func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { + lim.mu.Lock() + + if lim.limit == Inf { + lim.mu.Unlock() + return Reservation{ + ok: true, + lim: lim, + tokens: n, + timeToAct: now, + } + } + + now, last, tokens := lim.advance(now) + + // Calculate the remaining number of tokens resulting from the request. + tokens -= float64(n) + + // Calculate the wait duration + var waitDuration time.Duration + if tokens < 0 { + waitDuration = lim.limit.durationFromTokens(-tokens) + } + + // Decide result + ok := n <= lim.burst && waitDuration <= maxFutureReserve + + // Prepare reservation + r := Reservation{ + ok: ok, + lim: lim, + limit: lim.limit, + } + if ok { + r.tokens = n + r.timeToAct = now.Add(waitDuration) + } + + // Update state + if ok { + lim.last = now + lim.tokens = tokens + lim.lastEvent = r.timeToAct + } else { + lim.last = last + } + + lim.mu.Unlock() + return r +} + +// advance calculates and returns an updated state for lim resulting from the passage of time. +// lim is not changed. +func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { + last := lim.last + if now.Before(last) { + last = now + } + + // Avoid making delta overflow below when last is very old. + maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) + elapsed := now.Sub(last) + if elapsed > maxElapsed { + elapsed = maxElapsed + } + + // Calculate the new number of tokens, due to time that passed. + delta := lim.limit.tokensFromDuration(elapsed) + tokens := lim.tokens + delta + if burst := float64(lim.burst); tokens > burst { + tokens = burst + } + + return now, last, tokens +} + +// durationFromTokens is a unit conversion function from the number of tokens to the duration +// of time it takes to accumulate them at a rate of limit tokens per second. +func (limit Limit) durationFromTokens(tokens float64) time.Duration { + seconds := tokens / float64(limit) + return time.Nanosecond * time.Duration(1e9*seconds) +} + +// tokensFromDuration is a unit conversion function from a time duration to the number of tokens +// which could be accumulated during that duration at a rate of limit tokens per second. +func (limit Limit) tokensFromDuration(d time.Duration) float64 { + // Split the integer and fractional parts ourself to minimize rounding errors. + // See golang.org/issues/34861. + sec := float64(d/time.Second) * float64(limit) + nsec := float64(d%time.Second) * float64(limit) + return sec + nsec/1e9 +} + +//return the number of token available in the bucket +func (lim *Limiter) GetTokensCount() float64 { + _, _, tokens := lim.advance(time.Now()) + return tokens +} + +//return the number of token available in the bucket +func (lim *Limiter) GetTokensCountAt(t time.Time) float64 { + _, _, tokens := lim.advance(t) + return tokens +} + +//A rate limiter that doesn't limit anything +//this is compliant to the earlier interface +type AlwaysFull struct { +} + +func (af *AlwaysFull) Dump() Lstate { + return Lstate{} +} + +func (af *AlwaysFull) Load(st Lstate) { + return +} + +func (af *AlwaysFull) Allow() bool { + return true +} + +func (af *AlwaysFull) AllowN(time.Time, int) bool { + return true +} + +func (af *AlwaysFull) GetTokensCount() float64 { + return float64(int(^uint(0) >> 1)) +} + +func (af *AlwaysFull) GetTokensCountAt(t time.Time) float64 { + return float64(int(^uint(0) >> 1)) +} diff --git a/pkg/time/rate/rate_test.go b/pkg/time/rate/rate_test.go new file mode 100644 index 000000000..b0ed34dfb --- /dev/null +++ b/pkg/time/rate/rate_test.go @@ -0,0 +1,477 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package rate + +import ( + "context" + "math" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestLimit(t *testing.T) { + if Limit(10) == Inf { + t.Errorf("Limit(10) == Inf should be false") + } +} + +func closeEnough(a, b Limit) bool { + return (math.Abs(float64(a)/float64(b)) - 1.0) < 1e-9 +} + +func TestEvery(t *testing.T) { + cases := []struct { + interval time.Duration + lim Limit + }{ + {0, Inf}, + {-1, Inf}, + {1 * time.Nanosecond, Limit(1e9)}, + {1 * time.Microsecond, Limit(1e6)}, + {1 * time.Millisecond, Limit(1e3)}, + {10 * time.Millisecond, Limit(100)}, + {100 * time.Millisecond, Limit(10)}, + {1 * time.Second, Limit(1)}, + {2 * time.Second, Limit(0.5)}, + {time.Duration(2.5 * float64(time.Second)), Limit(0.4)}, + {4 * time.Second, Limit(0.25)}, + {10 * time.Second, Limit(0.1)}, + {time.Duration(math.MaxInt64), Limit(1e9 / float64(math.MaxInt64))}, + } + for _, tc := range cases { + lim := Every(tc.interval) + if !closeEnough(lim, tc.lim) { + t.Errorf("Every(%v) = %v want %v", tc.interval, lim, tc.lim) + } + } +} + +const ( + d = 100 * time.Millisecond +) + +var ( + t0 = time.Now() + t1 = t0.Add(time.Duration(1) * d) + t2 = t0.Add(time.Duration(2) * d) + t3 = t0.Add(time.Duration(3) * d) + t4 = t0.Add(time.Duration(4) * d) + t5 = t0.Add(time.Duration(5) * d) + t9 = t0.Add(time.Duration(9) * d) +) + +type allow struct { + t time.Time + n int + ok bool +} + +func run(t *testing.T, lim *Limiter, allows []allow) { + for i, allow := range allows { + ok := lim.AllowN(allow.t, allow.n) + if ok != allow.ok { + t.Errorf("step %d: lim.AllowN(%v, %v) = %v want %v", + i, allow.t, allow.n, ok, allow.ok) + } + } +} + +func TestLimiterBurst1(t *testing.T) { + run(t, NewLimiter(10, 1), []allow{ + {t0, 1, true}, + {t0, 1, false}, + {t0, 1, false}, + {t1, 1, true}, + {t1, 1, false}, + {t1, 1, false}, + {t2, 2, false}, // burst size is 1, so n=2 always fails + {t2, 1, true}, + {t2, 1, false}, + }) +} + +func TestLimiterBurst3(t *testing.T) { + run(t, NewLimiter(10, 3), []allow{ + {t0, 2, true}, + {t0, 2, false}, + {t0, 1, true}, + {t0, 1, false}, + {t1, 4, false}, + {t2, 1, true}, + {t3, 1, true}, + {t4, 1, true}, + {t4, 1, true}, + {t4, 1, false}, + {t4, 1, false}, + {t9, 3, true}, + {t9, 0, true}, + }) +} + +func TestLimiterJumpBackwards(t *testing.T) { + run(t, NewLimiter(10, 3), []allow{ + {t1, 1, true}, // start at t1 + {t0, 1, true}, // jump back to t0, two tokens remain + {t0, 1, true}, + {t0, 1, false}, + {t0, 1, false}, + {t1, 1, true}, // got a token + {t1, 1, false}, + {t1, 1, false}, + {t2, 1, true}, // got another token + {t2, 1, false}, + {t2, 1, false}, + }) +} + +// Ensure that tokensFromDuration doesn't produce +// rounding errors by truncating nanoseconds. +// See golang.org/issues/34861. +func TestLimiter_noTruncationErrors(t *testing.T) { + if !NewLimiter(0.7692307692307693, 1).Allow() { + t.Fatal("expected true") + } +} + +func TestSimultaneousRequests(t *testing.T) { + const ( + limit = 1 + burst = 5 + numRequests = 15 + ) + var ( + wg sync.WaitGroup + numOK = uint32(0) + ) + + // Very slow replenishing bucket. + lim := NewLimiter(limit, burst) + + // Tries to take a token, atomically updates the counter and decreases the wait + // group counter. + f := func() { + defer wg.Done() + if ok := lim.Allow(); ok { + atomic.AddUint32(&numOK, 1) + } + } + + wg.Add(numRequests) + for i := 0; i < numRequests; i++ { + go f() + } + wg.Wait() + if numOK != burst { + t.Errorf("numOK = %d, want %d", numOK, burst) + } +} + +func TestLongRunningQPS(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + if runtime.GOOS == "openbsd" { + t.Skip("low resolution time.Sleep invalidates test (golang.org/issue/14183)") + return + } + + // The test runs for a few seconds executing many requests and then checks + // that overall number of requests is reasonable. + const ( + limit = 100 + burst = 100 + ) + var numOK = int32(0) + + lim := NewLimiter(limit, burst) + + var wg sync.WaitGroup + f := func() { + if ok := lim.Allow(); ok { + atomic.AddInt32(&numOK, 1) + } + wg.Done() + } + + start := time.Now() + end := start.Add(5 * time.Second) + for time.Now().Before(end) { + wg.Add(1) + go f() + + // This will still offer ~500 requests per second, but won't consume + // outrageous amount of CPU. + time.Sleep(2 * time.Millisecond) + } + wg.Wait() + elapsed := time.Since(start) + ideal := burst + (limit * float64(elapsed) / float64(time.Second)) + + // We should never get more requests than allowed. + if want := int32(ideal + 1); numOK > want { + t.Errorf("numOK = %d, want %d (ideal %f)", numOK, want, ideal) + } + // We should get very close to the number of requests allowed. + if want := int32(0.999 * ideal); numOK < want { + t.Errorf("numOK = %d, want %d (ideal %f)", numOK, want, ideal) + } +} + +type request struct { + t time.Time + n int + act time.Time + ok bool +} + +// dFromDuration converts a duration to a multiple of the global constant d +func dFromDuration(dur time.Duration) int { + // Adding a millisecond to be swallowed by the integer division + // because we don't care about small inaccuracies + return int((dur + time.Millisecond) / d) +} + +// dSince returns multiples of d since t0 +func dSince(t time.Time) int { + return dFromDuration(t.Sub(t0)) +} + +func runReserve(t *testing.T, lim *Limiter, req request) *Reservation { + return runReserveMax(t, lim, req, InfDuration) +} + +func runReserveMax(t *testing.T, lim *Limiter, req request, maxReserve time.Duration) *Reservation { + r := lim.reserveN(req.t, req.n, maxReserve) + if r.ok && (dSince(r.timeToAct) != dSince(req.act)) || r.ok != req.ok { + t.Errorf("lim.reserveN(t%d, %v, %v) = (t%d, %v) want (t%d, %v)", + dSince(req.t), req.n, maxReserve, dSince(r.timeToAct), r.ok, dSince(req.act), req.ok) + } + return &r +} + +func TestSimpleReserve(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + runReserve(t, lim, request{t0, 2, t2, true}) + runReserve(t, lim, request{t3, 2, t4, true}) +} + +func TestMix(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 3, t1, false}) // should return false because n > Burst + runReserve(t, lim, request{t0, 2, t0, true}) + run(t, lim, []allow{{t1, 2, false}}) // not enought tokens - don't allow + runReserve(t, lim, request{t1, 2, t2, true}) + run(t, lim, []allow{{t1, 1, false}}) // negative tokens - don't allow + run(t, lim, []allow{{t3, 1, true}}) +} + +func TestCancelInvalid(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + r := runReserve(t, lim, request{t0, 3, t3, false}) + r.CancelAt(t0) // should have no effect + runReserve(t, lim, request{t0, 2, t2, true}) // did not get extra tokens +} + +func TestCancelLast(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + r := runReserve(t, lim, request{t0, 2, t2, true}) + r.CancelAt(t1) // got 2 tokens back + runReserve(t, lim, request{t1, 2, t2, true}) +} + +func TestCancelTooLate(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + r := runReserve(t, lim, request{t0, 2, t2, true}) + r.CancelAt(t3) // too late to cancel - should have no effect + runReserve(t, lim, request{t3, 2, t4, true}) +} + +func TestCancel0Tokens(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + r := runReserve(t, lim, request{t0, 1, t1, true}) + runReserve(t, lim, request{t0, 1, t2, true}) + r.CancelAt(t0) // got 0 tokens back + runReserve(t, lim, request{t0, 1, t3, true}) +} + +func TestCancel1Token(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + r := runReserve(t, lim, request{t0, 2, t2, true}) + runReserve(t, lim, request{t0, 1, t3, true}) + r.CancelAt(t2) // got 1 token back + runReserve(t, lim, request{t2, 2, t4, true}) +} + +func TestCancelMulti(t *testing.T) { + lim := NewLimiter(10, 4) + + runReserve(t, lim, request{t0, 4, t0, true}) + rA := runReserve(t, lim, request{t0, 3, t3, true}) + runReserve(t, lim, request{t0, 1, t4, true}) + rC := runReserve(t, lim, request{t0, 1, t5, true}) + rC.CancelAt(t1) // get 1 token back + rA.CancelAt(t1) // get 2 tokens back, as if C was never reserved + runReserve(t, lim, request{t1, 3, t5, true}) +} + +func TestReserveJumpBack(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t1, 2, t1, true}) // start at t1 + runReserve(t, lim, request{t0, 1, t1, true}) // should violate Limit,Burst + runReserve(t, lim, request{t2, 2, t3, true}) +} + +func TestReserveJumpBackCancel(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t1, 2, t1, true}) // start at t1 + r := runReserve(t, lim, request{t1, 2, t3, true}) + runReserve(t, lim, request{t1, 1, t4, true}) + r.CancelAt(t0) // cancel at t0, get 1 token back + runReserve(t, lim, request{t1, 2, t4, true}) // should violate Limit,Burst +} + +func TestReserveSetLimit(t *testing.T) { + lim := NewLimiter(5, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + runReserve(t, lim, request{t0, 2, t4, true}) + lim.SetLimitAt(t2, 10) + runReserve(t, lim, request{t2, 1, t4, true}) // violates Limit and Burst +} + +func TestReserveSetBurst(t *testing.T) { + lim := NewLimiter(5, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + runReserve(t, lim, request{t0, 2, t4, true}) + lim.SetBurstAt(t3, 4) + runReserve(t, lim, request{t0, 4, t9, true}) // violates Limit and Burst +} + +func TestReserveSetLimitCancel(t *testing.T) { + lim := NewLimiter(5, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + r := runReserve(t, lim, request{t0, 2, t4, true}) + lim.SetLimitAt(t2, 10) + r.CancelAt(t2) // 2 tokens back + runReserve(t, lim, request{t2, 2, t3, true}) +} + +func TestReserveMax(t *testing.T) { + lim := NewLimiter(10, 2) + maxT := d + + runReserveMax(t, lim, request{t0, 2, t0, true}, maxT) + runReserveMax(t, lim, request{t0, 1, t1, true}, maxT) // reserve for close future + runReserveMax(t, lim, request{t0, 1, t2, false}, maxT) // time to act too far in the future +} + +type wait struct { + name string + ctx context.Context + n int + delay int // in multiples of d + nilErr bool +} + +func runWait(t *testing.T, lim *Limiter, w wait) { + start := time.Now() + err := lim.WaitN(w.ctx, w.n) + delay := time.Now().Sub(start) + if (w.nilErr && err != nil) || (!w.nilErr && err == nil) || w.delay != dFromDuration(delay) { + errString := "" + if !w.nilErr { + errString = "" + } + t.Errorf("lim.WaitN(%v, lim, %v) = %v with delay %v ; want %v with delay %v", + w.name, w.n, err, delay, errString, d*time.Duration(w.delay)) + } +} + +func TestWaitSimple(t *testing.T) { + lim := NewLimiter(10, 3) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + runWait(t, lim, wait{"already-cancelled", ctx, 1, 0, false}) + + runWait(t, lim, wait{"exceed-burst-error", context.Background(), 4, 0, false}) + + runWait(t, lim, wait{"act-now", context.Background(), 2, 0, true}) + runWait(t, lim, wait{"act-later", context.Background(), 3, 2, true}) +} + +func TestWaitCancel(t *testing.T) { + lim := NewLimiter(10, 3) + + ctx, cancel := context.WithCancel(context.Background()) + runWait(t, lim, wait{"act-now", ctx, 2, 0, true}) // after this lim.tokens = 1 + go func() { + time.Sleep(d) + cancel() + }() + runWait(t, lim, wait{"will-cancel", ctx, 3, 1, false}) + // should get 3 tokens back, and have lim.tokens = 2 + t.Logf("tokens:%v last:%v lastEvent:%v", lim.tokens, lim.last, lim.lastEvent) + runWait(t, lim, wait{"act-now-after-cancel", context.Background(), 2, 0, true}) +} + +func TestWaitTimeout(t *testing.T) { + lim := NewLimiter(10, 3) + + ctx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + runWait(t, lim, wait{"act-now", ctx, 2, 0, true}) + runWait(t, lim, wait{"w-timeout-err", ctx, 3, 0, false}) +} + +func TestWaitInf(t *testing.T) { + lim := NewLimiter(Inf, 0) + + runWait(t, lim, wait{"exceed-burst-no-error", context.Background(), 3, 0, true}) +} + +func BenchmarkAllowN(b *testing.B) { + lim := NewLimiter(Every(1*time.Second), 1) + now := time.Now() + b.ReportAllocs() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + lim.AllowN(now, 1) + } + }) +} + +func BenchmarkWaitNNoDelay(b *testing.B) { + lim := NewLimiter(Limit(b.N), b.N) + ctx := context.Background() + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + lim.WaitN(ctx, 1) + } +} diff --git a/pkg/types/ban_application.go b/pkg/types/ban_application.go new file mode 100644 index 000000000..5f7a3d5b6 --- /dev/null +++ b/pkg/types/ban_application.go @@ -0,0 +1,34 @@ +package types + +import ( + "time" + + "github.com/jinzhu/gorm" +) + +//BanApplication is the in-db representation of a ban order. IPs/Ranges are represented as a integer interval. +//one BanOrder can lead to multiple BanApplication +type BanApplication struct { + gorm.Model `json:"-"` + + MeasureSource string /*api,local*/ + MeasureType string /*ban,slow,captcha*/ + MeasureExtra string /*in case we need extra info for the connector ?*/ + Until time.Time /*expiration of ban*/ + + StartIp uint32 + EndIp uint32 + + TargetCN string + TargetAS int + TargetASName string + + IpText string /*only for humans*/ + + Reason string /*long human reason of the ban 'ban AS1234' */ + Scenario string /*the type of scenario that led to ban*/ + + //SignalOccurence *parser.SignalOccurence /*the signal occurence it's attached to */ + SignalOccurenceID uint //so we can link local decision to actual overflow + +} diff --git a/pkg/types/ban_order.go b/pkg/types/ban_order.go new file mode 100644 index 000000000..6de3aa809 --- /dev/null +++ b/pkg/types/ban_order.go @@ -0,0 +1,58 @@ +package types + +import ( + "log" + "net" + "time" +) + +//BanOrder is what is generated from a SignalOccurence : it describes what action to take +//it is in-memory only and never touches the DB. It will be turned into one or several "parser.BanApplication" +type BanOrder struct { + MeasureSource string /*api,local*/ + MeasureType string /*ban,slow,captcha*/ + Scope string /*ip,multi_ip,as,country*/ + TargetAS int /*if non-empty, applies to this AS*/ + TargetASName string /*if non-empty, applies to this AS*/ + TargetRange net.IPNet /*if non-empty, applies to this IP*/ + TargetIP net.IP /*if non-empty, applies to this range*/ + TargetCountry string + Until time.Time /*when would the measure expire*/ + TxtTarget string + Reason string +} + +func OrderToApplications(ordr *BanOrder) ([]BanApplication, error) { + var bas []BanApplication + var ba BanApplication + /* + pseudo-code for as/country scope would be : + - fetch ranges of AS/Country + - for ipnet := range Country.Ranges { + ba.append(...) + } + */ + + ba.MeasureType = ordr.MeasureType + ba.MeasureSource = ordr.MeasureSource + ba.Until = ordr.Until + ba.Reason = ordr.Reason + ba.TargetAS = ordr.TargetAS + ba.TargetASName = ordr.TargetASName + + ba.TargetCN = ordr.TargetCountry + if ordr.Scope == "ip" { + ba.StartIp = IP2Int(ordr.TargetIP) + ba.EndIp = IP2Int(ordr.TargetIP) + ba.IpText = ordr.TargetIP.String() + bas = append(bas, ba) + } else if ordr.Scope == "range" { + ba.StartIp = IP2Int(ordr.TargetRange.IP) + ba.EndIp = IP2Int(LastAddress(&ordr.TargetRange)) + ba.IpText = ordr.TargetRange.String() + bas = append(bas, ba) + } else { + log.Fatalf("only 'ip' and 'range' scopes are supported.") + } + return bas, nil +} diff --git a/pkg/types/event.go b/pkg/types/event.go new file mode 100644 index 000000000..e1a43d382 --- /dev/null +++ b/pkg/types/event.go @@ -0,0 +1,35 @@ +package types + +import ( + "time" +) + +const ( + LOG = iota + OVFLW +) + +type Event struct { + /* is it a log or an overflow */ + Type int `yaml:"Type,omitempty"` + ExpectMode int `yaml:"ExpectMode,omitempty"` //how to buckets should handle event : leaky.TIMEMACHINE or leaky.LIVE + Whitelisted bool `yaml:"Whitelisted,omitempty"` + WhiteListReason string `json:"whitelist_reason,omitempty"` + //should add whitelist reason ? + /* the current stage of the line being parsed */ + Stage string `yaml:"Stage,omitempty"` + /* original line (produced by acquisition) */ + Line Line `json:"-" yaml:"Line,omitempty"` + /* output of groks */ + Parsed map[string]string `json:"-" yaml:"Parsed,omitempty"` + /* output of enrichment */ + Enriched map[string]string `json:"Enriched,omitempty" yaml:"Enriched,omitempty"` + /* Overflow */ + Overflow SignalOccurence `yaml:"Overflow,omitempty"` + Time time.Time `json:"Time,omitempty"` //parsed time `json:"-"` `` + StrTime string `yaml:"StrTime,omitempty"` + MarshaledTime string `yaml:"MarshaledTime,omitempty"` + Process bool `yaml:"Process,omitempty"` //can be set to false to avoid processing line + /* Meta is the only part that will make it to the API - it should be normalized */ + Meta map[string]string `json:"Meta,omitempty" yaml:"Meta,omitempty"` +} diff --git a/pkg/types/event_sequence.go b/pkg/types/event_sequence.go new file mode 100644 index 000000000..d0d00640d --- /dev/null +++ b/pkg/types/event_sequence.go @@ -0,0 +1,23 @@ +package types + +import ( + "time" + + "github.com/jinzhu/gorm" +) + +//EventSequence is used to represent the summarized version of events that lead to overflow +type EventSequence struct { + gorm.Model `json:"-"` + Time time.Time + Source Source `json:"-"` + /*for db only :/ */ + Source_ip string + Source_range string + Source_AutonomousSystemNumber string + Source_AutonomousSystemOrganization string + Source_Country string + /*stop db only */ + SignalOccurenceID uint //unique ID for the hasMany relation + Serialized string //the serialized dict +} diff --git a/pkg/types/grok_pattern.go b/pkg/types/grok_pattern.go new file mode 100644 index 000000000..7da071e68 --- /dev/null +++ b/pkg/types/grok_pattern.go @@ -0,0 +1,38 @@ +package types + +import ( + "github.com/antonmedv/expr/vm" + "github.com/logrusorgru/grokky" +) + +//Used mostly for statics +type ExtraField struct { + //if the target is indicated by name Struct.Field etc, + TargetByName string `yaml:"target,omitempty"` + //if the target field is in Event map + Parsed string `yaml:"parsed,omitempty"` + //if the target field is in Meta map + Meta string `yaml:"meta,omitempty"` + //if the target field is in Enriched map + Enriched string `yaml:"enriched,omitempty"` + //the source is a static value + Value string `yaml:"value,omitempty"` + //or the result of an Expression + ExpValue string `yaml:"expression,omitempty"` + RunTimeValue *vm.Program `json:"-"` //the actual compiled filter + //or an enrichment method + Method string `yaml:"method,omitempty"` +} + +type GrokPattern struct { + //the field to which regexp is going to apply + TargetField string `yaml:"apply_on,omitempty"` + //the grok/regexp by name (loaded from patterns/*) + RegexpName string `yaml:"name,omitempty"` + //a proper grok pattern + RegexpValue string `yaml:"pattern,omitempty"` + //the runtime form of regexpname / regexpvalue + RunTimeRegexp *grokky.Pattern `json:"-"` //the actual regexp + //a grok can contain statics that apply if pattern is successfull + Statics []ExtraField `yaml:"statics,omitempty"` +} diff --git a/pkg/types/line.go b/pkg/types/line.go new file mode 100644 index 000000000..582f94c7b --- /dev/null +++ b/pkg/types/line.go @@ -0,0 +1,11 @@ +package types + +import "time" + +type Line struct { + Raw string `yaml:"Raw,omitempty"` + Src string `yaml:"Src,omitempty"` + Time time.Time //acquis time + Labels map[string]string `yaml:"Labels,omitempty"` + Process bool +} diff --git a/pkg/types/profile.go b/pkg/types/profile.go new file mode 100644 index 000000000..e8034210c --- /dev/null +++ b/pkg/types/profile.go @@ -0,0 +1,25 @@ +package types + +import ( + "time" + + "github.com/antonmedv/expr/vm" +) + +/*Action profiles*/ +type RemediationProfile struct { + Apply bool + Ban bool + Slow bool + Captcha bool + Duration string + TimeDuration time.Duration +} +type Profile struct { + Profile string `yaml:"profile"` + Filter string `yaml:"filter"` + Remediation RemediationProfile `yaml:"remediation"` + RunTimeFilter *vm.Program + ApiPush *bool `yaml:"api"` + OutputConfigs []map[string]string `yaml:"outputs,omitempty"` +} diff --git a/pkg/types/signal_occurence.go b/pkg/types/signal_occurence.go new file mode 100644 index 000000000..2e9f712f6 --- /dev/null +++ b/pkg/types/signal_occurence.go @@ -0,0 +1,43 @@ +package types + +import ( + "time" + + "github.com/jinzhu/gorm" +) + +type SignalOccurence struct { + gorm.Model `json:"-"` + // ID uint // `json:"-" gorm:"primary_key,AUTO_INCREMENT"` + MapKey string //for Delete + Scenario string `json:"scenario,omitempty"` //The unique name of the scenario, ie. ssh_bruteforce_multi-user + Bucket_id string `json:"bucket_id,omitempty"` //The 'runtime' bucket-name (mostly for debug), ie. `sunny-flower` + Alert_message string `json:"alert_message,omitempty"` //Human-friendly label (to be displayed) + Events_count int `json:"events_count,omitempty" yaml:"Events_count,omitempty"` //Number of events between first occurence and ban + Events_sequence []EventSequence `json:"-" gorm:"foreignkey:SignalOccurenceID;association_foreignkey:ID"` //When adapted, a unique list of string representing the individual events that lead to the overflow + Start_at time.Time `json:"start_at,omitempty"` //first event (usually bucket creation time) + BanApplications []BanApplication `json:"ban_applications,omitempty" gorm:"foreignkey:SignalOccurenceID;association_foreignkey:ID"` + Stop_at time.Time `json:"stop_at,omitempty"` //last event (usually bucket overflow time) + Source *Source `json:"source"` //`json:"source,omitempty"` + /*for db*/ + Source_ip string `yaml:"Source_ip,omitempty"` + Source_range string + Source_AutonomousSystemNumber string + Source_AutonomousSystemOrganization string + Source_Country string + Source_Latitude float64 + Source_Longitude float64 + /*/for db*/ + Sources map[string]Source `json:"sources,omitempty" gorm:"-"` + // Source_ip string `json:"src_ip,omitempty"` //for now just the IP + // Source_as string `json:"src_as,omitempty"` //for now just the as (AS number) + // Source_country string `json:"src_country,omitempty"` //for now just the county (two-letter iso-code) + Dest_ip string `json:"dst_ip,omitempty"` //for now just the destination IP + //Policy string `json:"policy,omitempty"` //for now we forward it as well :) + //bucket info + Capacity int `json:"capacity,omitempty"` + Leak_speed time.Duration `json:"leak_speed,omitempty"` + + Reprocess bool //Reprocess, when true, will make the overflow being processed again as a fresh log would + Labels map[string]string `gorm:"-"` +} diff --git a/pkg/types/source.go b/pkg/types/source.go new file mode 100644 index 000000000..6dc8577ca --- /dev/null +++ b/pkg/types/source.go @@ -0,0 +1,20 @@ +package types + +import ( + "net" + + "github.com/jinzhu/gorm" +) + +//Source is the generic representation of a source ip implicated in events / overflows. It contains both information extracted directly from logs and enrichment +type Source struct { + gorm.Model `json:"-"` + Ip net.IP + Range net.IPNet + AutonomousSystemNumber string + AutonomousSystemOrganization string + Country string + Latitude float64 + Longitude float64 + Flags map[string]bool //a list of flags we can set +} diff --git a/pkg/types/utils.go b/pkg/types/utils.go new file mode 100644 index 000000000..d71dced6e --- /dev/null +++ b/pkg/types/utils.go @@ -0,0 +1,42 @@ +package types + +import ( + "encoding/binary" + "net" +) + +func IP2Int(ip net.IP) uint32 { + if len(ip) == 16 { + return binary.BigEndian.Uint32(ip[12:16]) + } + return binary.BigEndian.Uint32(ip) +} + +func Int2ip(nn uint32) net.IP { + ip := make(net.IP, 4) + binary.BigEndian.PutUint32(ip, nn) + return ip +} + +//Stolen from : https://github.com/llimllib/ipaddress/ +// Return the final address of a net range. Convert to IPv4 if possible, +// otherwise return an ipv6 +func LastAddress(n *net.IPNet) net.IP { + ip := n.IP.To4() + if ip == nil { + ip = n.IP + return net.IP{ + ip[0] | ^n.Mask[0], ip[1] | ^n.Mask[1], ip[2] | ^n.Mask[2], + ip[3] | ^n.Mask[3], ip[4] | ^n.Mask[4], ip[5] | ^n.Mask[5], + ip[6] | ^n.Mask[6], ip[7] | ^n.Mask[7], ip[8] | ^n.Mask[8], + ip[9] | ^n.Mask[9], ip[10] | ^n.Mask[10], ip[11] | ^n.Mask[11], + ip[12] | ^n.Mask[12], ip[13] | ^n.Mask[13], ip[14] | ^n.Mask[14], + ip[15] | ^n.Mask[15]} + } + + return net.IPv4( + ip[0]|^n.Mask[0], + ip[1]|^n.Mask[1], + ip[2]|^n.Mask[2], + ip[3]|^n.Mask[3]) +} diff --git a/pkg/types/whitelist.go b/pkg/types/whitelist.go new file mode 100644 index 000000000..8c856fd26 --- /dev/null +++ b/pkg/types/whitelist.go @@ -0,0 +1,17 @@ +package types + +import ( + "net" + + "github.com/antonmedv/expr/vm" +) + +type Whitelist struct { + Reason string `yaml:"reason,omitempty"` + Ips []string `yaml:"ip,omitempty"` + B_Ips []net.IP + Cidrs []string `yaml:"cidr,omitempty"` + B_Cidrs []*net.IPNet + Exprs []string `yaml:"expression,omitempty"` + B_Exprs []*vm.Program +} diff --git a/plugins/backend/sqlite.go b/plugins/backend/sqlite.go new file mode 100644 index 000000000..8b8579c74 --- /dev/null +++ b/plugins/backend/sqlite.go @@ -0,0 +1,73 @@ +package main + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/sqlite" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +type pluginDB struct { + CTX *sqlite.Context +} + +func (p *pluginDB) Init(config map[string]string) error { + var err error + log.Debugf("sqlite config : %+v \n", config) + p.CTX, err = sqlite.NewSQLite(config) + + if err != nil { + return err + } + return nil +} + +func (p *pluginDB) Delete(target string) (int, error) { + nbDel, err := p.CTX.DeleteBan(target) + if err != nil { + return 0, err + } + log.Debugf("deleted '%s' entry from database", nbDel) + return nbDel, nil +} + +func (p *pluginDB) DeleteAll() error { + err := p.CTX.DeleteAll() + if err != nil { + return err + } + return nil +} + +func (p *pluginDB) Insert(sig types.SignalOccurence) error { + err := p.CTX.WriteSignal(sig) + if err != nil { + return err + } + return nil +} + +func (p *pluginDB) Flush() error { + err := p.CTX.Flush() + if err != nil { + return err + } + + return nil +} + +func (p *pluginDB) ReadAT(timeAT time.Time) ([]map[string]string, error) { + ret, err := p.CTX.GetBansAt(timeAT) + if err != nil { + return nil, err + } + return ret, nil +} + +func New() interface{} { + return &pluginDB{} +} + +// empty main function is mandatory since we are in a main package +func main() {} diff --git a/scripts/build_plugins.sh b/scripts/build_plugins.sh new file mode 100644 index 000000000..b28cdcfdc --- /dev/null +++ b/scripts/build_plugins.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +PLUGIN_DIR="./plugins/" + + +goto() { + echo "[*] Going to $1" + cd $1 +} + + +goto $PLUGIN_DIR +CURRENT_DIR=$(pwd) +for path in $(ls); +do + goto $path + modules=$(find . -name "*.go") + CURRENT_PLUGDIN_DIR=$(pwd) + for mod in $modules; + do + folder=$(dirname $mod) + plugin_file=$(basename -- "$mod") + plugin_name=(${plugin_file%.*}) + echo "[*] Building plugin $plugin_name from $mod" + go build -buildmode=plugin -o "$plugin_name.so" $plugin_file + goto $CURRENT_PLUGDIN_DIR + done + goto $CURRENT_DIR +done \ No newline at end of file diff --git a/scripts/test_env.sh b/scripts/test_env.sh new file mode 100755 index 000000000..cc0439a52 --- /dev/null +++ b/scripts/test_env.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +BASE="./tests" + +while [[ $# -gt 0 ]] +do + key="${1}" + case ${key} in + -d|--directory) + BASE=${2} + shift #past argument + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) # unknown option + log_err "Unknown argument ${key}." + usage + exit 1 + ;; + esac +done + +BASE=$(realpath $BASE) + +DATA_DIR="$BASE/data" + +LOG_DIR="$BASE/logs/" + +CONFIG_DIR="$BASE/config" +CSCLI_DIR="$CONFIG_DIR/crowdsec-cli" +PARSER_DIR="$CONFIG_DIR/parsers" +PARSER_S00="$PARSER_DIR/s00-raw" +PARSER_S01="$PARSER_DIR/s01-parse" +PARSER_S02="$PARSER_DIR/s02-enrich" +SCENARIOS_DIR="$CONFIG_DIR/scenarios" +POSTOVERFLOWS_DIR="$CONFIG_DIR/postoverflows" +PLUGIN_BACKEND_DIR="$CONFIG_DIR/plugins/backend/" +SQLITE_PLUGIN_FILE="$PLUGIN_BACKEND_DIR/sqlite.yaml" + +gen_sqlite_config() { + echo "name: sqlite" >> "$SQLITE_PLUGIN_FILE" + echo "path: ./plugins/backend/sqlite.so" >> "$SQLITE_PLUGIN_FILE" + echo "config:" >> "$SQLITE_PLUGIN_FILE" + echo " db_path: ./test.db" >> "$SQLITE_PLUGIN_FILE" + echo " flush: true" >> "$SQLITE_PLUGIN_FILE" +} + +log_info() { + msg=$1 + date=$(date +%x:%X) + echo -e "[$date][INFO] $msg" +} + +create_arbo() { + mkdir -p "$BASE" + mkdir -p "$DATA_DIR" + mkdir -p "$LOG_DIR" + mkdir -p "$CONFIG_DIR" + mkdir -p "$PARSER_DIR" + mkdir -p "$PARSER_S00" + mkdir -p "$PARSER_S01" + mkdir -p "$PARSER_S02" + mkdir -p "$SCENARIOS_DIR" + mkdir -p "$POSTOVERFLOWS_DIR" + mkdir -p "$CSCLI_DIR" + mkdir -p "$PLUGIN_BACKEND_DIR" +} + +copy_files() { + cp "./config/profiles.yaml" "$CONFIG_DIR" + cp "./config/dev.yaml" "$BASE" + cp "./cmd/crowdsec/crowdsec" "$BASE" + cp "./cmd/crowdsec-cli/cscli" "$BASE" + cp -r "./config/patterns" "$CONFIG_DIR" + cp -r "./data/" "$BASE" + cp -r "./plugins/" "$BASE" +} + + +setup() { + $BASE/cscli -c "$CSCLI_DIR" config installdir "$CONFIG_DIR" + $BASE/cscli -c "$CSCLI_DIR" config backend "$PLUGIN_BACKEND_DIR" + $BASE/cscli -c "$CSCLI_DIR" update + $BASE/cscli -c "$CSCLI_DIR" install collection crowdsecurity/linux +} + + +main() { + log_info "Creating test arboresence in $BASE" + create_arbo + log_info "Arboresence created" + log_info "Copying needed files for tests environment" + copy_files + log_info "Files copied" + log_info "Setting up configurations" + setup + gen_sqlite_config + log_info "Environment is ready in $BASE" +} + + + +usage() { + echo "Usage:" + echo " ./wizard.sh -h Display this help message." + echo " ./env_test.sh -d ./tests Create test environment in './tests' folder" + exit 0 +} + + + +main \ No newline at end of file diff --git a/tests/scenario/01ssh/file.log b/tests/scenario/01ssh/file.log new file mode 100644 index 000000000..1b8af76cd --- /dev/null +++ b/tests/scenario/01ssh/file.log @@ -0,0 +1,32 @@ +2018-02-07T18:00:06+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2 +2018-02-07T18:00:09+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2 +2018-02-07T18:00:12+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2 +2018-02-07T18:00:12+01:00 eqx10863 sshd[13934]: Disconnecting: Too many authentication failures for root from 192.168.13.38 port 39596 ssh2 [preauth] +2018-02-07T18:00:21+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:23+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:26+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:29+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Disconnecting: Too many authentication failures for root from 192.168.13.38 port 2377 ssh2 [preauth] +2018-02-07T18:00:06+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2 +2018-02-07T18:00:09+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2 +2018-02-07T18:00:12+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2 +2018-02-07T18:00:12+01:00 eqx10863 sshd[13934]: Disconnecting: Too many authentication failures for root from 192.168.13.38 port 39596 ssh2 [preauth] +2018-02-07T18:00:21+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:23+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:26+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:29+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Disconnecting: Too many authentication failures for root from 192.168.13.38 port 2377 ssh2 [preauth] +2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: PAM 5 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=192.168.13.38 user=root +2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:32+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:32+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:33+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 +2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2 diff --git a/tests/scenario/01ssh/labels b/tests/scenario/01ssh/labels new file mode 100644 index 000000000..9bf921c4b --- /dev/null +++ b/tests/scenario/01ssh/labels @@ -0,0 +1,2 @@ +type: syslog + diff --git a/tests/scenario/01ssh/parsers.yaml b/tests/scenario/01ssh/parsers.yaml new file mode 100644 index 000000000..0f84306dc --- /dev/null +++ b/tests/scenario/01ssh/parsers.yaml @@ -0,0 +1,6 @@ + - filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml + stage: s00-raw + - filename: ./hub/parsers/s01-parse/crowdsecurity/sshd-logs.yaml + stage: s01-parse + - filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml + stage: s02-enrich diff --git a/tests/scenario/01ssh/scenarios.yaml b/tests/scenario/01ssh/scenarios.yaml new file mode 100644 index 000000000..c38132371 --- /dev/null +++ b/tests/scenario/01ssh/scenarios.yaml @@ -0,0 +1 @@ + - filename: ./hub/scenarios/crowdsecurity/ssh-bf.yaml diff --git a/tests/scenario/01ssh/success.sqlite b/tests/scenario/01ssh/success.sqlite new file mode 100644 index 000000000..503dd9470 --- /dev/null +++ b/tests/scenario/01ssh/success.sqlite @@ -0,0 +1,3 @@ +select count(*) == 1 from signal_occurences where source_ip = "192.168.13.38" and scenario = "crowdsecurity/ssh-bf" +select count(*) == 1 from signal_occurences where source_ip = "192.168.13.37" and scenario = "crowdsecurity/ssh-bf" + diff --git a/tests/scenario/02naxsi/file.log b/tests/scenario/02naxsi/file.log new file mode 100644 index 000000000..d8f610aae --- /dev/null +++ b/tests/scenario/02naxsi/file.log @@ -0,0 +1 @@ +2018-04-27T15:46:50+02:00 rp-ch-01 nginx: 2018/04/27 15:46:50 [error] 20329#0: *81170632 NAXSI_EXLOG: ip=191.154.37.115&server=cogedis.trustelem.com&uri=/app/55773/sso&id=10091&zone=ARGS&var_name=signature&content=gTyxddzKMBjOQ6iiNXsauWKyznrWzgzobNS5L226v23%2BSvh0z8uKrZbErckzPs7sF1Yif/T9P1O2Fmm05mSu1%2BL/TBAt1G2JsDv2%2B0zp2blECZFMMTfpgcyIeITDgh8HGM5GR9K2diB6/d1g5yShZs6Vm9%2BMCtXVO4gfpFwH4sSM7jbjU5xbShmiKkYNn3O8f3ZAdnZpk3%2BELVcODIGWwhRuN9Hy6agMirzx4PMTUWcDmdnB9W4iDcV/k28xnxuBE0vNw1JAL9sOSqrBnzqKk%2BUx9kt9hfEofvDYPvLfWiU56oEd8yzT1fEn21dzA6BcOCetzYoNjSdYDreKQm4O%2BVAgn90WKjvcORK%2BO3CkPR5%2B9N4d1hMLc10ZrKps4iHiJMG%2BRHvzBxL3yeYGdmdjX%2Bf6ZKjPkI3dTwP9379Wong0/DZ4BQ8ZC6SozID68PXybKynOGauaUxKCt3y3fAXSLH1Qtcl70kVQ9eQa1q%2B%2BZxujCGJ33sVl6ps10iLn2lYoJ85CAXCk%2B7p%2BMKOQzwGaFUBuVMgVbxATRQPnCN%2BHPymQ23LwWtKQbvRtJpahyPR9Yb6mUbf7JO1H2XF6%2BsPp4pcIZqv/SwJlgxSkPT5ehnJjLUhVIFu6SGlau1C0B/LUgHoZ8c%2Bkoy%2BfzzPqQPO2I1Y5SXFWwFPU6dbBgz1p%2BQ=, client: 77.136.47.223, server: www.trustelem.com, request: "GET /app/55773/sso?SAMLRequest=fZJbc6owFIX%2FCpN3NCJUZIqdtHihglfU2hcmjRGwQDAJaPvrD%2Bpxpuc8dM%2FkIbP3WiuX7%2FHpnKVKRblIWG6DVgMCheaE7ZI8ssEqGKgmeOo9CpylhYVKGecLeiypkEqty4V1bdig5LnFsEiEleOMCksSa4l8z9Ia0Co4k4ywFChICMplHfTCclFmlC8prxJCVwvPBrGUhbCazWRHsSopiXOWsiihopF9NQROqdgzTmiDsOxJMBtCxzDhtWbaNgKKUx8qybG83uNuRlhEd4loSF4KSVOaXeRNXBRNw%2Bh02k0hGFBcxwah9oLq2kzf1PMG%2BX3zNAmik%2B%2Bgy4Lz7094abe8aDMIk%2B3gIYz7zmrGzYU26n8Rrnn7c3beIndjurm63Q2HqTg%2Ff3M1LeHSgL67LraTKD6ij5ggPVjrHwjiKqlN8cP3J0F9nfnF4ICNlbtIzdepF3jxpDIO%2BxF3dv336t1cqN0Xz5fz1f4Ai7QfszOVejUMsoOero9V130bw8ioxsjcxQe9%2B6qy6tBpif0Yh1lZlGietsnpzRkQj0WOxK%2BeHh4jDTPzxMQUr8LhKFTna6KNfX5oLRblftyuw4elQMOQH1MXn7OsTVD9WkKU1M2FxLm0gQZbpgp1VesELcPSHyy929DbnXegzP5%2B%2B3OS32D6jZGP25CwRkEwU2fTZQCU9R3KegDcELSu4fwHe7%2Fb4jtwoHcn4iL6D6fH5g%2Fv3m33L%2By9Pw%3D%3D&RelayState=%2Fa085800002amsSg&SigAlg=http%3A%2F%2Fwww.w3.org%2F2001%2F04%2Fxmldsig-more%23rsa-sha256&Signature=gTyxddzKMBjOQ6iiNXsauWKyznrWzgzobNS5L226v23%2BSvh0z8uKrZbErckzPs7sF1Yif%2FT9P1O2Fmm05mSu1%2BL%2FTBAt1G2JsDv2%2B0zp2blECZFMMTfpgcyIeITDgh8HGM5GR9K2diB6%2Fd1g5yShZs6Vm9%2BMCt diff --git a/tests/scenario/02naxsi/labels b/tests/scenario/02naxsi/labels new file mode 100644 index 000000000..c2988205b --- /dev/null +++ b/tests/scenario/02naxsi/labels @@ -0,0 +1 @@ +type: syslog diff --git a/tests/scenario/02naxsi/parsers.yaml b/tests/scenario/02naxsi/parsers.yaml new file mode 100644 index 000000000..595b67745 --- /dev/null +++ b/tests/scenario/02naxsi/parsers.yaml @@ -0,0 +1,9 @@ + - filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml + stage: s00-raw + - filename: ./hub/parsers/s01-parse/crowdsecurity/nginx-logs.yaml + stage: s01-parse +#it's a bit nasty : naxsi is in enrich phase because it parses nginx error log parser output + - filename: ./hub/parsers/s02-enrich/crowdsecurity/naxsi-logs.yaml + stage: s02-enrich + - filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml + stage: s02-enrich diff --git a/tests/scenario/02naxsi/scenarios.yaml b/tests/scenario/02naxsi/scenarios.yaml new file mode 100644 index 000000000..9c2d18972 --- /dev/null +++ b/tests/scenario/02naxsi/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: ./hub/scenarios/crowdsecurity/naxsi-exploit-vpatch.yaml + diff --git a/tests/scenario/02naxsi/success.sqlite b/tests/scenario/02naxsi/success.sqlite new file mode 100644 index 000000000..7a0ed44f9 --- /dev/null +++ b/tests/scenario/02naxsi/success.sqlite @@ -0,0 +1 @@ +select count(*) == 1 from signal_occurences where source_ip = "191.154.37.115" and scenario = "crowdsecurity/naxsi-exploit-vpatch" diff --git a/tests/scenario/03wpbf/file.log b/tests/scenario/03wpbf/file.log new file mode 100644 index 000000000..7f1752ac4 --- /dev/null +++ b/tests/scenario/03wpbf/file.log @@ -0,0 +1,6 @@ +2017-12-01T14:47:42+01:00 rp-ch-01 nginx: 192.168.13.38 - - [01/Dec/2017:14:47:42 +0000] "POST /lh-magazine/wp-login.php HTTP/1.1" 200 4249 "http://www.lahalle.com/lh-magazine/wp-login.php" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:43+01:00 rp-ch-01 nginx: 192.168.13.38 - - [01/Dec/2017:14:47:43 +0000] "POST /lh-magazine/wp-login.php HTTP/1.1" 200 4249 "http://www.lahalle.com/lh-magazine/wp-login.php" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 rp-ch-01 nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "POST /lh-magazine/wp-login.php HTTP/1.1" 200 4249 "http://www.lahalle.com/lh-magazine/wp-login.php" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:45+01:00 rp-ch-01 nginx: 192.168.13.38 - - [01/Dec/2017:14:47:45 +0000] "POST /lh-magazine/wp-login.php HTTP/1.1" 200 4249 "http://www.lahalle.com/lh-magazine/wp-login.php" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:46+01:00 rp-ch-01 nginx: 192.168.13.38 - - [01/Dec/2017:14:47:46 +0000] "POST /lh-magazine/wp-login.php HTTP/1.1" 200 4249 "http://www.lahalle.com/lh-magazine/wp-login.php" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:48+01:00 rp-ch-01 nginx: 192.168.13.38 - - [01/Dec/2017:14:47:48 +0000] "POST /lh-magazine/wp-login.php HTTP/1.1" 200 4249 "http://www.lahalle.com/lh-magazine/wp-login.php" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" diff --git a/tests/scenario/03wpbf/labels b/tests/scenario/03wpbf/labels new file mode 100644 index 000000000..3a15bed50 --- /dev/null +++ b/tests/scenario/03wpbf/labels @@ -0,0 +1 @@ +type: nginx diff --git a/tests/scenario/03wpbf/parsers.yaml b/tests/scenario/03wpbf/parsers.yaml new file mode 100644 index 000000000..887543e30 --- /dev/null +++ b/tests/scenario/03wpbf/parsers.yaml @@ -0,0 +1,9 @@ + - filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml + stage: s00-raw + - filename: ./hub/parsers/s01-parse/crowdsecurity/nginx-logs.yaml + stage: s01-parse + - filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml + stage: s02-enrich + - filename: ./hub/parsers/s02-enrich/crowdsecurity/http-logs.yaml + stage: s02-enrich + \ No newline at end of file diff --git a/tests/scenario/03wpbf/scenarios.yaml b/tests/scenario/03wpbf/scenarios.yaml new file mode 100644 index 000000000..33b628ee8 --- /dev/null +++ b/tests/scenario/03wpbf/scenarios.yaml @@ -0,0 +1,3 @@ + - filename: ./hub/scenarios/crowdsecurity/http-bf-wordpress_bf.yaml + + diff --git a/tests/scenario/03wpbf/success.sqlite b/tests/scenario/03wpbf/success.sqlite new file mode 100644 index 000000000..b94884ab7 --- /dev/null +++ b/tests/scenario/03wpbf/success.sqlite @@ -0,0 +1 @@ +select count(*) == 1 from signal_occurences where source_ip = "192.168.13.38" and scenario = "crowdsecurity/http-bf-wordpress_bf" diff --git a/tests/scenario/04smb/file.log b/tests/scenario/04smb/file.log new file mode 100644 index 000000000..90555ac0d --- /dev/null +++ b/tests/scenario/04smb/file.log @@ -0,0 +1,7 @@ +Dec 13 00:31:12 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:12.487033 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:65132] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015 +Dec 13 00:31:13 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:13.294397 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:1391] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015 +Dec 13 00:31:14 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:14.108036 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:2154] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015 +Dec 13 00:31:14 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:14.883233 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:2893] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015 +Dec 13 00:31:15 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:13.294397 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:1391] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015 +Dec 13 00:31:16 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:14.108036 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:2154] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015 +Dec 13 00:31:17 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:14.883233 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:2893] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015 diff --git a/tests/scenario/04smb/labels b/tests/scenario/04smb/labels new file mode 100644 index 000000000..c2988205b --- /dev/null +++ b/tests/scenario/04smb/labels @@ -0,0 +1 @@ +type: syslog diff --git a/tests/scenario/04smb/parsers.yaml b/tests/scenario/04smb/parsers.yaml new file mode 100644 index 000000000..6cdf52263 --- /dev/null +++ b/tests/scenario/04smb/parsers.yaml @@ -0,0 +1,6 @@ + - filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml + stage: s00-raw + - filename: ./hub/parsers/s01-parse/crowdsecurity/smb-logs.yaml + stage: s01-parse + - filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml + stage: s02-enrich diff --git a/tests/scenario/04smb/scenarios.yaml b/tests/scenario/04smb/scenarios.yaml new file mode 100644 index 000000000..fe3a6166c --- /dev/null +++ b/tests/scenario/04smb/scenarios.yaml @@ -0,0 +1,4 @@ + - filename: ./hub/scenarios/crowdsecurity/smb-bf.yaml + + + diff --git a/tests/scenario/04smb/success.sqlite b/tests/scenario/04smb/success.sqlite new file mode 100644 index 000000000..c3edc79ae --- /dev/null +++ b/tests/scenario/04smb/success.sqlite @@ -0,0 +1 @@ +select count(*) == 1 from signal_occurences where source_ip = "61.6.206.22" and scenario = "crowdsecurity/smb-bf" diff --git a/tests/scenario/05mysql/file.log b/tests/scenario/05mysql/file.log new file mode 100644 index 000000000..54fb7e0b8 --- /dev/null +++ b/tests/scenario/05mysql/file.log @@ -0,0 +1,5 @@ +Dec 12 22:43:09 ip-172-31-11-1.us-west-1.compute.internal mysql[2762]: 2019-12-12T22:43:09.600659Z 120 [Note] Access denied for user 'root'@'106.3.44.207' (using password: YES) +Dec 12 22:43:10 ip-172-31-11-1.us-west-1.compute.internal mysql[2762]: 2019-12-12T22:43:10.408842Z 121 [Note] Access denied for user 'root'@'106.3.44.207' (using password: YES) +Dec 12 22:43:11 ip-172-31-11-1.us-west-1.compute.internal mysql[2762]: 2019-12-12T22:43:11.218794Z 122 [Note] Access denied for user 'root'@'106.3.44.207' (using password: YES) +Dec 12 22:43:12 ip-172-31-11-1.us-west-1.compute.internal mysql[2762]: 2019-12-12T22:43:12.027695Z 123 [Note] Access denied for user 'root'@'106.3.44.207' (using password: YES) +Dec 12 22:43:12 ip-172-31-11-1.us-west-1.compute.internal mysql[2762]: 2019-12-12T22:43:12.841040Z 124 [Note] Access denied for user 'root'@'106.3.44.207' (using password: YES) \ No newline at end of file diff --git a/tests/scenario/05mysql/labels b/tests/scenario/05mysql/labels new file mode 100644 index 000000000..c2988205b --- /dev/null +++ b/tests/scenario/05mysql/labels @@ -0,0 +1 @@ +type: syslog diff --git a/tests/scenario/05mysql/parsers.yaml b/tests/scenario/05mysql/parsers.yaml new file mode 100644 index 000000000..524ed815b --- /dev/null +++ b/tests/scenario/05mysql/parsers.yaml @@ -0,0 +1,6 @@ + - filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml + stage: s00-raw + - filename: ./hub/parsers/s01-parse/crowdsecurity/mysql-logs.yaml + stage: s01-parse + - filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml + stage: s02-enrich diff --git a/tests/scenario/05mysql/scenarios.yaml b/tests/scenario/05mysql/scenarios.yaml new file mode 100644 index 000000000..dcfb2c79b --- /dev/null +++ b/tests/scenario/05mysql/scenarios.yaml @@ -0,0 +1,5 @@ + - filename: ./hub/scenarios/crowdsecurity/mysql-bf.yaml + + + + diff --git a/tests/scenario/05mysql/success.sqlite b/tests/scenario/05mysql/success.sqlite new file mode 100644 index 000000000..9d62fbc34 --- /dev/null +++ b/tests/scenario/05mysql/success.sqlite @@ -0,0 +1 @@ +select count(*) == 1 from signal_occurences where source_ip = "106.3.44.207" and scenario = "crowdsecurity/mysql-bf" diff --git a/tests/scenario/06ssh_timemachine_blackhole/file.log b/tests/scenario/06ssh_timemachine_blackhole/file.log new file mode 100644 index 000000000..381fe21b9 --- /dev/null +++ b/tests/scenario/06ssh_timemachine_blackhole/file.log @@ -0,0 +1,23 @@ +2018-02-07T18:00:00+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2 +2018-02-07T18:00:00+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2 +2018-02-07T18:00:00+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2 +2018-02-07T18:00:00+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:00+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +#this one will overflow +2018-02-07T18:00:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +#these ones will be blackholed +2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +#these ones won't +2018-02-07T18:02:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:02:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:02:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:02:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:02:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 +2018-02-07T18:02:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2 + diff --git a/tests/scenario/06ssh_timemachine_blackhole/labels b/tests/scenario/06ssh_timemachine_blackhole/labels new file mode 100644 index 000000000..c2988205b --- /dev/null +++ b/tests/scenario/06ssh_timemachine_blackhole/labels @@ -0,0 +1 @@ +type: syslog diff --git a/tests/scenario/06ssh_timemachine_blackhole/parsers.yaml b/tests/scenario/06ssh_timemachine_blackhole/parsers.yaml new file mode 100644 index 000000000..0f84306dc --- /dev/null +++ b/tests/scenario/06ssh_timemachine_blackhole/parsers.yaml @@ -0,0 +1,6 @@ + - filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml + stage: s00-raw + - filename: ./hub/parsers/s01-parse/crowdsecurity/sshd-logs.yaml + stage: s01-parse + - filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml + stage: s02-enrich diff --git a/tests/scenario/06ssh_timemachine_blackhole/scenarios.yaml b/tests/scenario/06ssh_timemachine_blackhole/scenarios.yaml new file mode 100644 index 000000000..313977578 --- /dev/null +++ b/tests/scenario/06ssh_timemachine_blackhole/scenarios.yaml @@ -0,0 +1,6 @@ + - filename: ./hub/scenarios/crowdsecurity/ssh-bf.yaml + + + + + diff --git a/tests/scenario/06ssh_timemachine_blackhole/success.sqlite b/tests/scenario/06ssh_timemachine_blackhole/success.sqlite new file mode 100644 index 000000000..690dd400c --- /dev/null +++ b/tests/scenario/06ssh_timemachine_blackhole/success.sqlite @@ -0,0 +1 @@ +select count(*) == 2 from signal_occurences where source_ip = "192.168.13.38" and scenario = "crowdsecurity/ssh-bf" diff --git a/tests/scenario/07crawling/file.log b/tests/scenario/07crawling/file.log new file mode 100644 index 000000000..71de236d1 --- /dev/null +++ b/tests/scenario/07crawling/file.log @@ -0,0 +1,84 @@ +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page1 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page1" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page2 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page2" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page3 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page3" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page4 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page4" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page5 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page5" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page6 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page6" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page7 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page7" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page8 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page8" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page9 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page9" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page10 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page10" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page11 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page11" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page12 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page12" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page13 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page13" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page14 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page14" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page15 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page15" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page16 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page16" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page17 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page17" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page18 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page18" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page19 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page19" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page20 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page20" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page21 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page1" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page22 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page2" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page23 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page3" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page24 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page4" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page25 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page5" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page26 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page6" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page27 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page7" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page28 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page8" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page29 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page9" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page30 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page10" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page31 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page11" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page32 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page12" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page33 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page13" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page34 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page14" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page35 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page15" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page36 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page16" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page37 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page17" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page38 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page18" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page39 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page19" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page40 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page20" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page41 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page20" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" + +## Those logs should not make an overflow +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page1 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page1" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page2 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page2" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page3 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page3" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page4 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page4" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page5 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page5" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page6 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page6" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page7 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page7" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page8 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page8" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page9 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page9" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page10 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page10" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page11 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page11" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page12 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page12" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page13 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page13" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page14 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page14" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page15 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page15" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page16 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page16" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page17 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page17" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page18 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page18" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page19 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page19" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page20 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page20" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page21 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page1" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page22 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page2" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page23 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page3" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page24 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page4" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page25 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page5" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page26 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page6" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page27 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page7" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page28 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page8" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page29 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page9" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page30 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page10" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page31 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page11" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page32 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page12" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:52:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:52:44 +0000] "GET /crawl_page33 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page13" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:52:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:52:44 +0000] "GET /crawl_page34 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page14" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:52:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:52:44 +0000] "GET /crawl_page35 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page15" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:52:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:52:44 +0000] "GET /crawl_page36 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page16" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:52:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:52:44 +0000] "GET /crawl_page37 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page17" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:53:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:53:44 +0000] "GET /crawl_page38 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page18" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:53:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:53:44 +0000] "GET /crawl_page39 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page19" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:53:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:53:44 +0000] "GET /crawl_page40 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page20" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" +2017-12-01T14:53:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:53:44 +0000] "GET /crawl_page41 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page20" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" diff --git a/tests/scenario/07crawling/labels b/tests/scenario/07crawling/labels new file mode 100644 index 000000000..3a15bed50 --- /dev/null +++ b/tests/scenario/07crawling/labels @@ -0,0 +1 @@ +type: nginx diff --git a/tests/scenario/07crawling/parsers.yaml b/tests/scenario/07crawling/parsers.yaml new file mode 100644 index 000000000..887543e30 --- /dev/null +++ b/tests/scenario/07crawling/parsers.yaml @@ -0,0 +1,9 @@ + - filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml + stage: s00-raw + - filename: ./hub/parsers/s01-parse/crowdsecurity/nginx-logs.yaml + stage: s01-parse + - filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml + stage: s02-enrich + - filename: ./hub/parsers/s02-enrich/crowdsecurity/http-logs.yaml + stage: s02-enrich + \ No newline at end of file diff --git a/tests/scenario/07crawling/scenarios.yaml b/tests/scenario/07crawling/scenarios.yaml new file mode 100644 index 000000000..371c741ad --- /dev/null +++ b/tests/scenario/07crawling/scenarios.yaml @@ -0,0 +1,7 @@ + - filename: ./hub/scenarios/crowdsecurity/http-crawl-non_statics.yaml + + + + + + diff --git a/tests/scenario/07crawling/success.sqlite b/tests/scenario/07crawling/success.sqlite new file mode 100644 index 000000000..bb68aa884 --- /dev/null +++ b/tests/scenario/07crawling/success.sqlite @@ -0,0 +1 @@ +select count(*) == 1 from signal_occurences where source_ip = "192.168.13.38" and scenario = "crowdsecurity/http-crawl-non_statics" diff --git a/tests/scenario/08consensus_base/1/file.log b/tests/scenario/08consensus_base/1/file.log new file mode 100755 index 000000000..8fdf40d19 --- /dev/null +++ b/tests/scenario/08consensus_base/1/file.log @@ -0,0 +1,1701 @@ + +{ + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "7e159c83f45e4cabfe4c2d8653a24ac79506a703", + "scenario": "http_404-scan", + "bucket_id": "morning-sea", + "alert_message": "106.54.3.52 performed 'http_404-scan' (6 events over 2s) at 2020-01-02 15:31:32 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-02T15:31:30Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-02T19:31:32Z", + "StartIp": 1781924660, + "EndIp": 1781924660, + "IpText": "106.54.3.52", + "Reason": "ban on ip 106.54.3.52", + "Scenario": "", + "SignalOccurenceID": 985 + } + ], + "stop_at": "2020-01-02T15:31:32Z", + "Source_ip": "106.54.3.52", + "Source_range": "\u003cnil\u003e", + "Source_AutonomousSystemNumber": "0", + "Source_AutonomousSystemOrganization": "", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "106.54.3.52": { + "Ip": "106.54.3.52", + "Range": { + "IP": "", + "Mask": null + }, + "AutonomousSystemNumber": "0", + "AutonomousSystemOrganization": "", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "6cb069c62a51317feca844ed141e5f1cb61ed1c9", + "scenario": "http_404-scan", + "bucket_id": "purple-star", + "alert_message": "139.199.192.143 performed 'http_404-scan' (6 events over 3s) at 2020-01-01 18:27:32 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-01T18:27:29Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-01T22:27:32Z", + "StartIp": 2345123983, + "EndIp": 2345123983, + "IpText": "139.199.192.143", + "Reason": "ban on ip 139.199.192.143", + "Scenario": "", + "SignalOccurenceID": 986 + } + ], + "stop_at": "2020-01-01T18:27:32Z", + "Source_ip": "139.199.192.143", + "Source_range": "139.199.0.0/16", + "Source_AutonomousSystemNumber": "45090", + "Source_AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "139.199.192.143": { + "Ip": "139.199.192.143", + "Range": { + "IP": "139.199.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "45090", + "AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "04cd7cbe460be2f36d193041c486da7fdffc9056", + "scenario": "aggresive_crawl", + "bucket_id": "restless-tree", + "alert_message": "139.199.192.143 performed 'aggresive_crawl' (101 events over 30s) at 2020-01-01 18:27:59 +0000 UTC", + "events_count": 101, + "start_at": "2020-01-01T18:27:29Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-01T22:27:59Z", + "StartIp": 2345123983, + "EndIp": 2345123983, + "IpText": "139.199.192.143", + "Reason": "ban on ip 139.199.192.143", + "Scenario": "", + "SignalOccurenceID": 987 + } + ], + "stop_at": "2020-01-01T18:27:59Z", + "Source_ip": "139.199.192.143", + "Source_range": "139.199.0.0/16", + "Source_AutonomousSystemNumber": "45090", + "Source_AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "139.199.192.143": { + "Ip": "139.199.192.143", + "Range": { + "IP": "139.199.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "45090", + "AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "04cd7cbe460be2f36d193041c486da7fdffc9056", + "scenario": "aggresive_crawl", + "bucket_id": "divine-rain", + "alert_message": "139.199.192.143 performed 'aggresive_crawl' (195 events over 1m17s) at 2020-01-01 18:29:35 +0000 UTC", + "events_count": 195, + "start_at": "2020-01-01T18:28:18Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-01T22:29:35Z", + "StartIp": 2345123983, + "EndIp": 2345123983, + "IpText": "139.199.192.143", + "Reason": "ban on ip 139.199.192.143", + "Scenario": "", + "SignalOccurenceID": 988 + } + ], + "stop_at": "2020-01-01T18:29:35Z", + "Source_ip": "139.199.192.143", + "Source_range": "139.199.0.0/16", + "Source_AutonomousSystemNumber": "45090", + "Source_AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "139.199.192.143": { + "Ip": "139.199.192.143", + "Range": { + "IP": "139.199.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "45090", + "AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "04cd7cbe460be2f36d193041c486da7fdffc9056", + "scenario": "aggresive_crawl", + "bucket_id": "twilight-mountain", + "alert_message": "139.199.192.143 performed 'aggresive_crawl' (89 events over 24s) at 2020-01-01 18:30:56 +0000 UTC", + "events_count": 89, + "start_at": "2020-01-01T18:30:32Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-01T22:30:56Z", + "StartIp": 2345123983, + "EndIp": 2345123983, + "IpText": "139.199.192.143", + "Reason": "ban on ip 139.199.192.143", + "Scenario": "", + "SignalOccurenceID": 989 + } + ], + "stop_at": "2020-01-01T18:30:56Z", + "Source_ip": "139.199.192.143", + "Source_range": "139.199.0.0/16", + "Source_AutonomousSystemNumber": "45090", + "Source_AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "139.199.192.143": { + "Ip": "139.199.192.143", + "Range": { + "IP": "139.199.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "45090", + "AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "04cd7cbe460be2f36d193041c486da7fdffc9056", + "scenario": "aggresive_crawl", + "bucket_id": "holy-violet", + "alert_message": "139.199.192.143 performed 'aggresive_crawl' (181 events over 1m10s) at 2020-01-01 18:32:07 +0000 UTC", + "events_count": 181, + "start_at": "2020-01-01T18:30:57Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-01T22:32:07Z", + "StartIp": 2345123983, + "EndIp": 2345123983, + "IpText": "139.199.192.143", + "Reason": "ban on ip 139.199.192.143", + "Scenario": "", + "SignalOccurenceID": 990 + } + ], + "stop_at": "2020-01-01T18:32:07Z", + "Source_ip": "139.199.192.143", + "Source_range": "139.199.0.0/16", + "Source_AutonomousSystemNumber": "45090", + "Source_AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "139.199.192.143": { + "Ip": "139.199.192.143", + "Range": { + "IP": "139.199.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "45090", + "AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "6aedd2bf688e9a4315f3a0852e23d6257af56a6d", + "scenario": "http_404-scan", + "bucket_id": "delicate-wind", + "alert_message": "118.25.109.174 performed 'http_404-scan' (6 events over 3s) at 2020-01-02 06:20:42 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-02T06:20:39Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-02T10:20:42Z", + "StartIp": 1981377966, + "EndIp": 1981377966, + "IpText": "118.25.109.174", + "Reason": "ban on ip 118.25.109.174", + "Scenario": "", + "SignalOccurenceID": 991 + } + ], + "stop_at": "2020-01-02T06:20:42Z", + "Source_ip": "118.25.109.174", + "Source_range": "118.24.0.0/15", + "Source_AutonomousSystemNumber": "45090", + "Source_AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "118.25.109.174": { + "Ip": "118.25.109.174", + "Range": { + "IP": "118.24.0.0", + "Mask": "//4AAA==" + }, + "AutonomousSystemNumber": "45090", + "AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "d55d24200351af8d4831cd7e88087b7bc5e02aca", + "scenario": "http_404-scan", + "bucket_id": "misty-waterfall", + "alert_message": "207.38.89.99 performed 'http_404-scan' (6 events over 1s) at 2019-12-31 07:48:07 +0000 UTC", + "events_count": 6, + "start_at": "2019-12-31T07:48:06Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2019-12-31T11:48:07Z", + "StartIp": 3475396963, + "EndIp": 3475396963, + "IpText": "207.38.89.99", + "Reason": "ban on ip 207.38.89.99", + "Scenario": "", + "SignalOccurenceID": 992 + } + ], + "stop_at": "2019-12-31T07:48:07Z", + "Source_ip": "207.38.89.99", + "Source_range": "207.38.80.0/20", + "Source_AutonomousSystemNumber": "30083", + "Source_AutonomousSystemOrganization": "HEG US Inc.", + "Source_Country": "US", + "Source_Latitude": 38.63119888305664, + "Source_Longitude": -90.19219970703125, + "sources": { + "207.38.89.99": { + "Ip": "207.38.89.99", + "Range": { + "IP": "207.38.80.0", + "Mask": "///wAA==" + }, + "AutonomousSystemNumber": "30083", + "AutonomousSystemOrganization": "HEG US Inc.", + "Country": "US", + "Latitude": 38.63119888305664, + "Longitude": -90.19219970703125, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "38523b23fb81133eaf1c2b21083175c942e76883", + "scenario": "aggresive_crawl", + "bucket_id": "restless-haze", + "alert_message": "207.38.89.99 performed 'aggresive_crawl' (53 events over 6s) at 2019-12-31 07:48:12 +0000 UTC", + "events_count": 53, + "start_at": "2019-12-31T07:48:06Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2019-12-31T11:48:12Z", + "StartIp": 3475396963, + "EndIp": 3475396963, + "IpText": "207.38.89.99", + "Reason": "ban on ip 207.38.89.99", + "Scenario": "", + "SignalOccurenceID": 993 + } + ], + "stop_at": "2019-12-31T07:48:12Z", + "Source_ip": "207.38.89.99", + "Source_range": "207.38.80.0/20", + "Source_AutonomousSystemNumber": "30083", + "Source_AutonomousSystemOrganization": "HEG US Inc.", + "Source_Country": "US", + "Source_Latitude": 38.63119888305664, + "Source_Longitude": -90.19219970703125, + "sources": { + "207.38.89.99": { + "Ip": "207.38.89.99", + "Range": { + "IP": "207.38.80.0", + "Mask": "///wAA==" + }, + "AutonomousSystemNumber": "30083", + "AutonomousSystemOrganization": "HEG US Inc.", + "Country": "US", + "Latitude": 38.63119888305664, + "Longitude": -90.19219970703125, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "38523b23fb81133eaf1c2b21083175c942e76883", + "scenario": "aggresive_crawl", + "bucket_id": "ancient-forest", + "alert_message": "207.38.89.99 performed 'aggresive_crawl' (51 events over 5s) at 2019-12-31 07:49:16 +0000 UTC", + "events_count": 51, + "start_at": "2019-12-31T07:49:11Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2019-12-31T11:49:16Z", + "StartIp": 3475396963, + "EndIp": 3475396963, + "IpText": "207.38.89.99", + "Reason": "ban on ip 207.38.89.99", + "Scenario": "", + "SignalOccurenceID": 994 + } + ], + "stop_at": "2019-12-31T07:49:16Z", + "Source_ip": "207.38.89.99", + "Source_range": "207.38.80.0/20", + "Source_AutonomousSystemNumber": "30083", + "Source_AutonomousSystemOrganization": "HEG US Inc.", + "Source_Country": "US", + "Source_Latitude": 38.63119888305664, + "Source_Longitude": -90.19219970703125, + "sources": { + "207.38.89.99": { + "Ip": "207.38.89.99", + "Range": { + "IP": "207.38.80.0", + "Mask": "///wAA==" + }, + "AutonomousSystemNumber": "30083", + "AutonomousSystemOrganization": "HEG US Inc.", + "Country": "US", + "Latitude": 38.63119888305664, + "Longitude": -90.19219970703125, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "57097e2f13de9a441098679dd1ba632d75bc5726", + "scenario": "http_404-scan", + "bucket_id": "hidden-cherry", + "alert_message": "51.159.56.89 performed 'http_404-scan' (6 events over 0s) at 2020-01-12 20:12:33 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-12T20:12:33Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-13T00:12:33Z", + "StartIp": 866072665, + "EndIp": 866072665, + "IpText": "51.159.56.89", + "Reason": "ban on ip 51.159.56.89", + "Scenario": "", + "SignalOccurenceID": 995 + } + ], + "stop_at": "2020-01-12T20:12:33Z", + "Source_ip": "51.159.56.89", + "Source_range": "51.158.0.0/15", + "Source_AutonomousSystemNumber": "12876", + "Source_AutonomousSystemOrganization": "Online S.a.s.", + "Source_Country": "FR", + "Source_Latitude": 48.86669921875, + "Source_Longitude": 2.3333001136779785, + "sources": { + "51.159.56.89": { + "Ip": "51.159.56.89", + "Range": { + "IP": "51.158.0.0", + "Mask": "//4AAA==" + }, + "AutonomousSystemNumber": "12876", + "AutonomousSystemOrganization": "Online S.a.s.", + "Country": "FR", + "Latitude": 48.86669921875, + "Longitude": 2.3333001136779785, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "8329d169b66b77c1ffb1476ee6be6157df0fb01c", + "scenario": "aggresive_crawl", + "bucket_id": "summer-voice", + "alert_message": "51.159.56.89 performed 'aggresive_crawl' (57 events over 8s) at 2020-01-12 20:12:41 +0000 UTC", + "events_count": 57, + "start_at": "2020-01-12T20:12:33Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-13T00:12:41Z", + "StartIp": 866072665, + "EndIp": 866072665, + "IpText": "51.159.56.89", + "Reason": "ban on ip 51.159.56.89", + "Scenario": "", + "SignalOccurenceID": 996 + } + ], + "stop_at": "2020-01-12T20:12:41Z", + "Source_ip": "51.159.56.89", + "Source_range": "51.158.0.0/15", + "Source_AutonomousSystemNumber": "12876", + "Source_AutonomousSystemOrganization": "Online S.a.s.", + "Source_Country": "FR", + "Source_Latitude": 48.86669921875, + "Source_Longitude": 2.3333001136779785, + "sources": { + "51.159.56.89": { + "Ip": "51.159.56.89", + "Range": { + "IP": "51.158.0.0", + "Mask": "//4AAA==" + }, + "AutonomousSystemNumber": "12876", + "AutonomousSystemOrganization": "Online S.a.s.", + "Country": "FR", + "Latitude": 48.86669921875, + "Longitude": 2.3333001136779785, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "e3670eedea41bad31bd62d4bcc3b11e0c0a26373", + "scenario": "http_404-scan", + "bucket_id": "quiet-sunset", + "alert_message": "167.172.50.134 performed 'http_404-scan' (6 events over 1s) at 2020-01-11 06:46:02 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-11T06:46:01Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-11T10:46:02Z", + "StartIp": 2813080198, + "EndIp": 2813080198, + "IpText": "167.172.50.134", + "Reason": "ban on ip 167.172.50.134", + "Scenario": "", + "SignalOccurenceID": 997 + } + ], + "stop_at": "2020-01-11T06:46:02Z", + "Source_ip": "167.172.50.134", + "Source_range": "\u003cnil\u003e", + "Source_AutonomousSystemNumber": "0", + "Source_AutonomousSystemOrganization": "", + "Source_Country": "GB", + "Source_Latitude": 51.91669845581055, + "Source_Longitude": -0.2167000025510788, + "sources": { + "167.172.50.134": { + "Ip": "167.172.50.134", + "Range": { + "IP": "", + "Mask": null + }, + "AutonomousSystemNumber": "0", + "AutonomousSystemOrganization": "", + "Country": "GB", + "Latitude": 51.91669845581055, + "Longitude": -0.2167000025510788, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "fe7c4addc743ea4a3fbbf8abc4768c38a815fb04", + "scenario": "http_404-scan", + "bucket_id": "divine-butterfly", + "alert_message": "103.212.97.45 performed 'http_404-scan' (6 events over 5s) at 2020-01-08 16:22:09 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-08T16:22:04Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-08T20:22:09Z", + "StartIp": 1741971757, + "EndIp": 1741971757, + "IpText": "103.212.97.45", + "Reason": "ban on ip 103.212.97.45", + "Scenario": "", + "SignalOccurenceID": 998 + } + ], + "stop_at": "2020-01-08T16:22:09Z", + "Source_ip": "103.212.97.45", + "Source_range": "103.212.96.0/22", + "Source_AutonomousSystemNumber": "45753", + "Source_AutonomousSystemOrganization": "NETSEC", + "Source_Country": "HK", + "Source_Latitude": 22.283300399780273, + "Source_Longitude": 114.1500015258789, + "sources": { + "103.212.97.45": { + "Ip": "103.212.97.45", + "Range": { + "IP": "103.212.96.0", + "Mask": "///8AA==" + }, + "AutonomousSystemNumber": "45753", + "AutonomousSystemOrganization": "NETSEC", + "Country": "HK", + "Latitude": 22.283300399780273, + "Longitude": 114.1500015258789, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "5a6ac7d4e195547d2b404da4a0d9b6f9cd50b4a9", + "scenario": "aggresive_crawl", + "bucket_id": "old-dawn", + "alert_message": "103.212.97.45 performed 'aggresive_crawl' (232 events over 1m46s) at 2020-01-08 16:23:50 +0000 UTC", + "events_count": 232, + "start_at": "2020-01-08T16:22:04Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-08T20:23:50Z", + "StartIp": 1741971757, + "EndIp": 1741971757, + "IpText": "103.212.97.45", + "Reason": "ban on ip 103.212.97.45", + "Scenario": "", + "SignalOccurenceID": 999 + } + ], + "stop_at": "2020-01-08T16:23:50Z", + "Source_ip": "103.212.97.45", + "Source_range": "103.212.96.0/22", + "Source_AutonomousSystemNumber": "45753", + "Source_AutonomousSystemOrganization": "NETSEC", + "Source_Country": "HK", + "Source_Latitude": 22.283300399780273, + "Source_Longitude": 114.1500015258789, + "sources": { + "103.212.97.45": { + "Ip": "103.212.97.45", + "Range": { + "IP": "103.212.96.0", + "Mask": "///8AA==" + }, + "AutonomousSystemNumber": "45753", + "AutonomousSystemOrganization": "NETSEC", + "Country": "HK", + "Latitude": 22.283300399780273, + "Longitude": 114.1500015258789, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "5a6ac7d4e195547d2b404da4a0d9b6f9cd50b4a9", + "scenario": "aggresive_crawl", + "bucket_id": "weathered-wood", + "alert_message": "103.212.97.45 performed 'aggresive_crawl' (76 events over 18s) at 2020-01-08 16:24:50 +0000 UTC", + "events_count": 76, + "start_at": "2020-01-08T16:24:32Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-08T20:24:50Z", + "StartIp": 1741971757, + "EndIp": 1741971757, + "IpText": "103.212.97.45", + "Reason": "ban on ip 103.212.97.45", + "Scenario": "", + "SignalOccurenceID": 1000 + } + ], + "stop_at": "2020-01-08T16:24:50Z", + "Source_ip": "103.212.97.45", + "Source_range": "103.212.96.0/22", + "Source_AutonomousSystemNumber": "45753", + "Source_AutonomousSystemOrganization": "NETSEC", + "Source_Country": "HK", + "Source_Latitude": 22.283300399780273, + "Source_Longitude": 114.1500015258789, + "sources": { + "103.212.97.45": { + "Ip": "103.212.97.45", + "Range": { + "IP": "103.212.96.0", + "Mask": "///8AA==" + }, + "AutonomousSystemNumber": "45753", + "AutonomousSystemOrganization": "NETSEC", + "Country": "HK", + "Latitude": 22.283300399780273, + "Longitude": 114.1500015258789, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "5a6ac7d4e195547d2b404da4a0d9b6f9cd50b4a9", + "scenario": "aggresive_crawl", + "bucket_id": "wandering-dawn", + "alert_message": "103.212.97.45 performed 'aggresive_crawl' (175 events over 1m7s) at 2020-01-08 16:26:21 +0000 UTC", + "events_count": 175, + "start_at": "2020-01-08T16:25:14Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-08T20:26:21Z", + "StartIp": 1741971757, + "EndIp": 1741971757, + "IpText": "103.212.97.45", + "Reason": "ban on ip 103.212.97.45", + "Scenario": "", + "SignalOccurenceID": 1001 + } + ], + "stop_at": "2020-01-08T16:26:21Z", + "Source_ip": "103.212.97.45", + "Source_range": "103.212.96.0/22", + "Source_AutonomousSystemNumber": "45753", + "Source_AutonomousSystemOrganization": "NETSEC", + "Source_Country": "HK", + "Source_Latitude": 22.283300399780273, + "Source_Longitude": 114.1500015258789, + "sources": { + "103.212.97.45": { + "Ip": "103.212.97.45", + "Range": { + "IP": "103.212.96.0", + "Mask": "///8AA==" + }, + "AutonomousSystemNumber": "45753", + "AutonomousSystemOrganization": "NETSEC", + "Country": "HK", + "Latitude": 22.283300399780273, + "Longitude": 114.1500015258789, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "fe7c4addc743ea4a3fbbf8abc4768c38a815fb04", + "scenario": "http_404-scan", + "bucket_id": "wispy-frog", + "alert_message": "103.212.97.45 performed 'http_404-scan' (6 events over 3s) at 2020-01-08 16:27:12 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-08T16:27:09Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-08T20:27:12Z", + "StartIp": 1741971757, + "EndIp": 1741971757, + "IpText": "103.212.97.45", + "Reason": "ban on ip 103.212.97.45", + "Scenario": "", + "SignalOccurenceID": 1002 + } + ], + "stop_at": "2020-01-08T16:27:12Z", + "Source_ip": "103.212.97.45", + "Source_range": "103.212.96.0/22", + "Source_AutonomousSystemNumber": "45753", + "Source_AutonomousSystemOrganization": "NETSEC", + "Source_Country": "HK", + "Source_Latitude": 22.283300399780273, + "Source_Longitude": 114.1500015258789, + "sources": { + "103.212.97.45": { + "Ip": "103.212.97.45", + "Range": { + "IP": "103.212.96.0", + "Mask": "///8AA==" + }, + "AutonomousSystemNumber": "45753", + "AutonomousSystemOrganization": "NETSEC", + "Country": "HK", + "Latitude": 22.283300399780273, + "Longitude": 114.1500015258789, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "0a2b19cb243f6607e4d95c45eb979424efa1f838", + "scenario": "http_404-scan", + "bucket_id": "restless-dream", + "alert_message": "35.180.132.238 performed 'http_404-scan' (6 events over 0s) at 2020-01-06 15:36:09 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-06T15:36:09Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-06T19:36:09Z", + "StartIp": 599033070, + "EndIp": 599033070, + "IpText": "35.180.132.238", + "Reason": "ban on ip 35.180.132.238", + "Scenario": "", + "SignalOccurenceID": 1003 + } + ], + "stop_at": "2020-01-06T15:36:09Z", + "Source_ip": "35.180.132.238", + "Source_range": "35.180.0.0/16", + "Source_AutonomousSystemNumber": "16509", + "Source_AutonomousSystemOrganization": "Amazon.com, Inc.", + "Source_Country": "FR", + "Source_Latitude": 48.86669921875, + "Source_Longitude": 2.3333001136779785, + "sources": { + "35.180.132.238": { + "Ip": "35.180.132.238", + "Range": { + "IP": "35.180.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "16509", + "AutonomousSystemOrganization": "Amazon.com, Inc.", + "Country": "FR", + "Latitude": 48.86669921875, + "Longitude": 2.3333001136779785, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "76779a7c22da5b031227d205fdc53a1d5c2e0940", + "scenario": "aggresive_crawl", + "bucket_id": "delicate-dust", + "alert_message": "35.180.132.238 performed 'aggresive_crawl' (47 events over 3s) at 2020-01-06 15:36:12 +0000 UTC", + "events_count": 47, + "start_at": "2020-01-06T15:36:09Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-06T19:36:12Z", + "StartIp": 599033070, + "EndIp": 599033070, + "IpText": "35.180.132.238", + "Reason": "ban on ip 35.180.132.238", + "Scenario": "", + "SignalOccurenceID": 1004 + } + ], + "stop_at": "2020-01-06T15:36:12Z", + "Source_ip": "35.180.132.238", + "Source_range": "35.180.0.0/16", + "Source_AutonomousSystemNumber": "16509", + "Source_AutonomousSystemOrganization": "Amazon.com, Inc.", + "Source_Country": "FR", + "Source_Latitude": 48.86669921875, + "Source_Longitude": 2.3333001136779785, + "sources": { + "35.180.132.238": { + "Ip": "35.180.132.238", + "Range": { + "IP": "35.180.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "16509", + "AutonomousSystemOrganization": "Amazon.com, Inc.", + "Country": "FR", + "Latitude": 48.86669921875, + "Longitude": 2.3333001136779785, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "a0c56f23985d1f8fcb844afd95b40c79b6a95d84", + "scenario": "http_404-scan", + "bucket_id": "small-sky", + "alert_message": "129.211.41.26 performed 'http_404-scan' (6 events over 2s) at 2020-01-06 18:34:21 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-06T18:34:19Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-06T22:34:21Z", + "StartIp": 2178099482, + "EndIp": 2178099482, + "IpText": "129.211.41.26", + "Reason": "ban on ip 129.211.41.26", + "Scenario": "", + "SignalOccurenceID": 1005 + } + ], + "stop_at": "2020-01-06T18:34:21Z", + "Source_ip": "129.211.41.26", + "Source_range": "129.211.0.0/16", + "Source_AutonomousSystemNumber": "7091", + "Source_AutonomousSystemOrganization": "ViaNet Communications", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "129.211.41.26": { + "Ip": "129.211.41.26", + "Range": { + "IP": "129.211.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "7091", + "AutonomousSystemOrganization": "ViaNet Communications", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "0a2b19cb243f6607e4d95c45eb979424efa1f838", + "scenario": "http_404-scan", + "bucket_id": "cool-rain", + "alert_message": "35.180.132.238 performed 'http_404-scan' (10 events over 2h58m14s) at 2020-01-06 18:34:25 +0000 UTC", + "events_count": 10, + "start_at": "2020-01-06T15:36:11Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-06T22:34:25Z", + "StartIp": 599033070, + "EndIp": 599033070, + "IpText": "35.180.132.238", + "Reason": "ban on ip 35.180.132.238", + "Scenario": "", + "SignalOccurenceID": 1006 + } + ], + "stop_at": "2020-01-06T18:34:25Z", + "Source_ip": "35.180.132.238", + "Source_range": "35.180.0.0/16", + "Source_AutonomousSystemNumber": "16509", + "Source_AutonomousSystemOrganization": "Amazon.com, Inc.", + "Source_Country": "FR", + "Source_Latitude": 48.86669921875, + "Source_Longitude": 2.3333001136779785, + "sources": { + "35.180.132.238": { + "Ip": "35.180.132.238", + "Range": { + "IP": "35.180.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "16509", + "AutonomousSystemOrganization": "Amazon.com, Inc.", + "Country": "FR", + "Latitude": 48.86669921875, + "Longitude": 2.3333001136779785, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "ca3945158c65616ddf95a814778f47da10c6cb6b", + "scenario": "http_404-scan", + "bucket_id": "long-wildflower", + "alert_message": "180.96.14.25 performed 'http_404-scan' (9 events over 72h37m58s) at 2020-01-07 04:11:11 +0000 UTC", + "events_count": 9, + "start_at": "2020-01-04T03:33:13Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-07T08:11:11Z", + "StartIp": 3026193945, + "EndIp": 3026193945, + "IpText": "180.96.14.25", + "Reason": "ban on ip 180.96.14.25", + "Scenario": "", + "SignalOccurenceID": 1007 + } + ], + "stop_at": "2020-01-07T04:11:11Z", + "Source_ip": "180.96.14.25", + "Source_range": "180.96.8.0/21", + "Source_AutonomousSystemNumber": "23650", + "Source_AutonomousSystemOrganization": "AS Number for CHINANET jiangsu province backbone", + "Source_Country": "CN", + "Source_Latitude": 32.06169891357422, + "Source_Longitude": 118.77780151367188, + "sources": { + "180.96.14.25": { + "Ip": "180.96.14.25", + "Range": { + "IP": "180.96.8.0", + "Mask": "///4AA==" + }, + "AutonomousSystemNumber": "23650", + "AutonomousSystemOrganization": "AS Number for CHINANET jiangsu province backbone", + "Country": "CN", + "Latitude": 32.06169891357422, + "Longitude": 118.77780151367188, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "574814d8651d7500a6325c696067497d4d051274", + "scenario": "http_404-scan", + "bucket_id": "black-shadow", + "alert_message": "176.122.121.249 performed 'http_404-scan' (6 events over 3s) at 2020-01-05 19:15:57 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-05T19:15:54Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-05T23:15:57Z", + "StartIp": 2960816633, + "EndIp": 2960816633, + "IpText": "176.122.121.249", + "Reason": "ban on ip 176.122.121.249", + "Scenario": "", + "SignalOccurenceID": 1008 + } + ], + "stop_at": "2020-01-05T19:15:57Z", + "Source_ip": "176.122.121.249", + "Source_range": "176.122.120.0/21", + "Source_AutonomousSystemNumber": "50581", + "Source_AutonomousSystemOrganization": "Ukraine telecommunication group Ltd.", + "Source_Country": "UA", + "Source_Latitude": 48.4630012512207, + "Source_Longitude": 35.03900146484375, + "sources": { + "176.122.121.249": { + "Ip": "176.122.121.249", + "Range": { + "IP": "176.122.120.0", + "Mask": "///4AA==" + }, + "AutonomousSystemNumber": "50581", + "AutonomousSystemOrganization": "Ukraine telecommunication group Ltd.", + "Country": "UA", + "Latitude": 48.4630012512207, + "Longitude": 35.03900146484375, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "94f52cd832ed322d3bd788565170d5bdabed0f71", + "scenario": "http_404-scan", + "bucket_id": "lively-breeze", + "alert_message": "31.222.187.197 performed 'http_404-scan' (6 events over 0s) at 2020-01-14 00:44:14 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-14T00:44:14Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-14T04:44:14Z", + "StartIp": 534690757, + "EndIp": 534690757, + "IpText": "31.222.187.197", + "Reason": "ban on ip 31.222.187.197", + "Scenario": "", + "SignalOccurenceID": 1009 + } + ], + "stop_at": "2020-01-14T00:44:14Z", + "Source_ip": "31.222.187.197", + "Source_range": "31.222.128.0/18", + "Source_AutonomousSystemNumber": "15395", + "Source_AutonomousSystemOrganization": "Rackspace Ltd.", + "Source_Country": "GB", + "Source_Latitude": 51.49639892578125, + "Source_Longitude": -0.12240000069141388, + "sources": { + "31.222.187.197": { + "Ip": "31.222.187.197", + "Range": { + "IP": "31.222.128.0", + "Mask": "///AAA==" + }, + "AutonomousSystemNumber": "15395", + "AutonomousSystemOrganization": "Rackspace Ltd.", + "Country": "GB", + "Latitude": 51.49639892578125, + "Longitude": -0.12240000069141388, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": false + } diff --git a/tests/scenario/08consensus_base/1/parsers.yaml b/tests/scenario/08consensus_base/1/parsers.yaml new file mode 100644 index 000000000..6e1549cdd --- /dev/null +++ b/tests/scenario/08consensus_base/1/parsers.yaml @@ -0,0 +1,2 @@ + - filename: ./hub/parsers/s00-raw/crowdsecurity/enrich.yaml + stage: s00-raw diff --git a/tests/scenario/08consensus_base/1/scenarios.yaml b/tests/scenario/08consensus_base/1/scenarios.yaml new file mode 100644 index 000000000..9eb8f2d70 --- /dev/null +++ b/tests/scenario/08consensus_base/1/scenarios.yaml @@ -0,0 +1,6 @@ + - filename: ./hub/scenarios/crowdsecurity/basic-consensus.yaml + + + + + diff --git a/tests/scenario/08consensus_base/1/success.sqlite b/tests/scenario/08consensus_base/1/success.sqlite new file mode 100644 index 000000000..72d5f4b97 --- /dev/null +++ b/tests/scenario/08consensus_base/1/success.sqlite @@ -0,0 +1,12 @@ +select count(*) == 1 from signal_occurences where source_ip = "139.199.192.143" and scenario = "specialized_consensus" +select count(*) == 1 from signal_occurences where source_ip = "139.199.192.143" and scenario = "base_consensus" +select count(*) == 1 from signal_occurences where source_ip = "207.38.89.99" and scenario = "base_consensus" +select count(*) == 1 from signal_occurences where source_ip = "207.38.89.99" and scenario = "specialized_consensus" +select count(*) == 1 from signal_occurences where source_ip = "51.159.56.89" and scenario = "base_consensus" +select count(*) == 1 from signal_occurences where source_ip = "103.212.97.45" and scenario = "base_consensus" +select count(*) == 1 from signal_occurences where source_ip = "103.212.97.45" and scenario = "specialized_consensus" +select count(*) == 1 from signal_occurences where source_ip = "35.180.132.238" and scenario = "specialized_consensus" +select count(*) == 1 from signal_occurences where source_ip = "35.180.132.238" and scenario = "base_consensus" + + + diff --git a/tests/scenario/08consensus_base/2/file.log b/tests/scenario/08consensus_base/2/file.log new file mode 100755 index 000000000..cca46fb77 --- /dev/null +++ b/tests/scenario/08consensus_base/2/file.log @@ -0,0 +1,70 @@ + +{ + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "7e159c83f45e4cabfe4c2d8653a24ac79506a703", + "scenario": "http_404-scan", + "bucket_id": "morning-sea", + "alert_message": "31.222.187.197 performed 'http_404-scan' (6 events over 2s) at 2020-01-02 15:31:32 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-02T15:31:30Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-02T19:31:32Z", + "StartIp": 1781924660, + "EndIp": 1781924660, + "IpText": "31.222.187.197", + "Reason": "ban on ip 31.222.187.197", + "Scenario": "", + "SignalOccurenceID": 985 + } + ], + "stop_at": "2020-01-14T06:44:14Z", + "Source_ip": "31.222.187.197", + "Source_range": "\u003cnil\u003e", + "Source_AutonomousSystemNumber": "0", + "Source_AutonomousSystemOrganization": "", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "31.222.187.197": { + "Ip": "31.222.187.197", + "Range": { + "IP": "", + "Mask": null + }, + "AutonomousSystemNumber": "0", + "AutonomousSystemOrganization": "", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + \ No newline at end of file diff --git a/tests/scenario/08consensus_base/2/parsers.yaml b/tests/scenario/08consensus_base/2/parsers.yaml new file mode 100644 index 000000000..6e1549cdd --- /dev/null +++ b/tests/scenario/08consensus_base/2/parsers.yaml @@ -0,0 +1,2 @@ + - filename: ./hub/parsers/s00-raw/crowdsecurity/enrich.yaml + stage: s00-raw diff --git a/tests/scenario/08consensus_base/2/scenarios.yaml b/tests/scenario/08consensus_base/2/scenarios.yaml new file mode 100644 index 000000000..9eb8f2d70 --- /dev/null +++ b/tests/scenario/08consensus_base/2/scenarios.yaml @@ -0,0 +1,6 @@ + - filename: ./hub/scenarios/crowdsecurity/basic-consensus.yaml + + + + + diff --git a/tests/scenario/08consensus_base/2/success.sqlite b/tests/scenario/08consensus_base/2/success.sqlite new file mode 100644 index 000000000..10da3a573 --- /dev/null +++ b/tests/scenario/08consensus_base/2/success.sqlite @@ -0,0 +1,7 @@ +select count(*) == 1 from signal_occurences where source_ip = "31.222.187.197" and scenario = "base_consensus" +select count(*) == 1 from signal_occurences where source_ip = "31.222.187.197" and scenario = "specialized_consensus" + + + + + diff --git a/tests/scenario/09consensus_trust/1/file.log b/tests/scenario/09consensus_trust/1/file.log new file mode 100755 index 000000000..c8ae05234 --- /dev/null +++ b/tests/scenario/09consensus_trust/1/file.log @@ -0,0 +1,1701 @@ + +{ + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "7e159c83f45e4cabfe4c2d8653a24ac79506a703", + "scenario": "http_404-scan", + "bucket_id": "morning-sea", + "alert_message": "106.54.3.52 performed 'http_404-scan' (6 events over 2s) at 2020-01-02 15:31:32 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-02T15:31:30Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-02T19:31:32Z", + "StartIp": 1781924660, + "EndIp": 1781924660, + "IpText": "106.54.3.52", + "Reason": "ban on ip 106.54.3.52", + "Scenario": "", + "SignalOccurenceID": 985 + } + ], + "stop_at": "2020-01-02T15:31:32Z", + "Source_ip": "106.54.3.52", + "Source_range": "\u003cnil\u003e", + "Source_AutonomousSystemNumber": "0", + "Source_AutonomousSystemOrganization": "", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "106.54.3.52": { + "Ip": "106.54.3.52", + "Range": { + "IP": "", + "Mask": null + }, + "AutonomousSystemNumber": "0", + "AutonomousSystemOrganization": "", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "6cb069c62a51317feca844ed141e5f1cb61ed1c9", + "scenario": "http_404-scan", + "bucket_id": "purple-star", + "alert_message": "139.199.192.143 performed 'http_404-scan' (6 events over 3s) at 2020-01-01 18:27:32 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-01T18:27:29Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-01T22:27:32Z", + "StartIp": 2345123983, + "EndIp": 2345123983, + "IpText": "139.199.192.143", + "Reason": "ban on ip 139.199.192.143", + "Scenario": "", + "SignalOccurenceID": 986 + } + ], + "stop_at": "2020-01-01T18:27:32Z", + "Source_ip": "139.199.192.143", + "Source_range": "139.199.0.0/16", + "Source_AutonomousSystemNumber": "45090", + "Source_AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "139.199.192.143": { + "Ip": "139.199.192.143", + "Range": { + "IP": "139.199.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "45090", + "AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "04cd7cbe460be2f36d193041c486da7fdffc9056", + "scenario": "aggresive_crawl", + "bucket_id": "restless-tree", + "alert_message": "139.199.192.143 performed 'aggresive_crawl' (101 events over 30s) at 2020-01-01 18:27:59 +0000 UTC", + "events_count": 101, + "start_at": "2020-01-01T18:27:29Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-01T22:27:59Z", + "StartIp": 2345123983, + "EndIp": 2345123983, + "IpText": "139.199.192.143", + "Reason": "ban on ip 139.199.192.143", + "Scenario": "", + "SignalOccurenceID": 987 + } + ], + "stop_at": "2020-01-01T18:27:59Z", + "Source_ip": "139.199.192.143", + "Source_range": "139.199.0.0/16", + "Source_AutonomousSystemNumber": "45090", + "Source_AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "139.199.192.143": { + "Ip": "139.199.192.143", + "Range": { + "IP": "139.199.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "45090", + "AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "04cd7cbe460be2f36d193041c486da7fdffc9056", + "scenario": "aggresive_crawl", + "bucket_id": "divine-rain", + "alert_message": "139.199.192.143 performed 'aggresive_crawl' (195 events over 1m17s) at 2020-01-01 18:29:35 +0000 UTC", + "events_count": 195, + "start_at": "2020-01-01T18:28:18Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-01T22:29:35Z", + "StartIp": 2345123983, + "EndIp": 2345123983, + "IpText": "139.199.192.143", + "Reason": "ban on ip 139.199.192.143", + "Scenario": "", + "SignalOccurenceID": 988 + } + ], + "stop_at": "2020-01-01T18:29:35Z", + "Source_ip": "139.199.192.143", + "Source_range": "139.199.0.0/16", + "Source_AutonomousSystemNumber": "45090", + "Source_AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "139.199.192.143": { + "Ip": "139.199.192.143", + "Range": { + "IP": "139.199.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "45090", + "AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "04cd7cbe460be2f36d193041c486da7fdffc9056", + "scenario": "aggresive_crawl", + "bucket_id": "twilight-mountain", + "alert_message": "139.199.192.143 performed 'aggresive_crawl' (89 events over 24s) at 2020-01-01 18:30:56 +0000 UTC", + "events_count": 89, + "start_at": "2020-01-01T18:30:32Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-01T22:30:56Z", + "StartIp": 2345123983, + "EndIp": 2345123983, + "IpText": "139.199.192.143", + "Reason": "ban on ip 139.199.192.143", + "Scenario": "", + "SignalOccurenceID": 989 + } + ], + "stop_at": "2020-01-01T18:30:56Z", + "Source_ip": "139.199.192.143", + "Source_range": "139.199.0.0/16", + "Source_AutonomousSystemNumber": "45090", + "Source_AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "139.199.192.143": { + "Ip": "139.199.192.143", + "Range": { + "IP": "139.199.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "45090", + "AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "04cd7cbe460be2f36d193041c486da7fdffc9056", + "scenario": "aggresive_crawl", + "bucket_id": "holy-violet", + "alert_message": "139.199.192.143 performed 'aggresive_crawl' (181 events over 1m10s) at 2020-01-01 18:32:07 +0000 UTC", + "events_count": 181, + "start_at": "2020-01-01T18:30:57Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-01T22:32:07Z", + "StartIp": 2345123983, + "EndIp": 2345123983, + "IpText": "139.199.192.143", + "Reason": "ban on ip 139.199.192.143", + "Scenario": "", + "SignalOccurenceID": 990 + } + ], + "stop_at": "2020-01-01T18:32:07Z", + "Source_ip": "139.199.192.143", + "Source_range": "139.199.0.0/16", + "Source_AutonomousSystemNumber": "45090", + "Source_AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "139.199.192.143": { + "Ip": "139.199.192.143", + "Range": { + "IP": "139.199.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "45090", + "AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "6aedd2bf688e9a4315f3a0852e23d6257af56a6d", + "scenario": "http_404-scan", + "bucket_id": "delicate-wind", + "alert_message": "118.25.109.174 performed 'http_404-scan' (6 events over 3s) at 2020-01-02 06:20:42 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-02T06:20:39Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-02T10:20:42Z", + "StartIp": 1981377966, + "EndIp": 1981377966, + "IpText": "118.25.109.174", + "Reason": "ban on ip 118.25.109.174", + "Scenario": "", + "SignalOccurenceID": 991 + } + ], + "stop_at": "2020-01-02T06:20:42Z", + "Source_ip": "118.25.109.174", + "Source_range": "118.24.0.0/15", + "Source_AutonomousSystemNumber": "45090", + "Source_AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "118.25.109.174": { + "Ip": "118.25.109.174", + "Range": { + "IP": "118.24.0.0", + "Mask": "//4AAA==" + }, + "AutonomousSystemNumber": "45090", + "AutonomousSystemOrganization": "Shenzhen Tencent Computer Systems Company Limited", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "d55d24200351af8d4831cd7e88087b7bc5e02aca", + "scenario": "http_404-scan", + "bucket_id": "misty-waterfall", + "alert_message": "207.38.89.99 performed 'http_404-scan' (6 events over 1s) at 2019-12-31 07:48:07 +0000 UTC", + "events_count": 6, + "start_at": "2019-12-31T07:48:06Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2019-12-31T11:48:07Z", + "StartIp": 3475396963, + "EndIp": 3475396963, + "IpText": "207.38.89.99", + "Reason": "ban on ip 207.38.89.99", + "Scenario": "", + "SignalOccurenceID": 992 + } + ], + "stop_at": "2019-12-31T07:48:07Z", + "Source_ip": "207.38.89.99", + "Source_range": "207.38.80.0/20", + "Source_AutonomousSystemNumber": "30083", + "Source_AutonomousSystemOrganization": "HEG US Inc.", + "Source_Country": "US", + "Source_Latitude": 38.63119888305664, + "Source_Longitude": -90.19219970703125, + "sources": { + "207.38.89.99": { + "Ip": "207.38.89.99", + "Range": { + "IP": "207.38.80.0", + "Mask": "///wAA==" + }, + "AutonomousSystemNumber": "30083", + "AutonomousSystemOrganization": "HEG US Inc.", + "Country": "US", + "Latitude": 38.63119888305664, + "Longitude": -90.19219970703125, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "38523b23fb81133eaf1c2b21083175c942e76883", + "scenario": "aggresive_crawl", + "bucket_id": "restless-haze", + "alert_message": "207.38.89.99 performed 'aggresive_crawl' (53 events over 6s) at 2019-12-31 07:48:12 +0000 UTC", + "events_count": 53, + "start_at": "2019-12-31T07:48:06Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2019-12-31T11:48:12Z", + "StartIp": 3475396963, + "EndIp": 3475396963, + "IpText": "207.38.89.99", + "Reason": "ban on ip 207.38.89.99", + "Scenario": "", + "SignalOccurenceID": 993 + } + ], + "stop_at": "2019-12-31T07:48:12Z", + "Source_ip": "207.38.89.99", + "Source_range": "207.38.80.0/20", + "Source_AutonomousSystemNumber": "30083", + "Source_AutonomousSystemOrganization": "HEG US Inc.", + "Source_Country": "US", + "Source_Latitude": 38.63119888305664, + "Source_Longitude": -90.19219970703125, + "sources": { + "207.38.89.99": { + "Ip": "207.38.89.99", + "Range": { + "IP": "207.38.80.0", + "Mask": "///wAA==" + }, + "AutonomousSystemNumber": "30083", + "AutonomousSystemOrganization": "HEG US Inc.", + "Country": "US", + "Latitude": 38.63119888305664, + "Longitude": -90.19219970703125, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "38523b23fb81133eaf1c2b21083175c942e76883", + "scenario": "aggresive_crawl", + "bucket_id": "ancient-forest", + "alert_message": "207.38.89.99 performed 'aggresive_crawl' (51 events over 5s) at 2019-12-31 07:49:16 +0000 UTC", + "events_count": 51, + "start_at": "2019-12-31T07:49:11Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2019-12-31T11:49:16Z", + "StartIp": 3475396963, + "EndIp": 3475396963, + "IpText": "207.38.89.99", + "Reason": "ban on ip 207.38.89.99", + "Scenario": "", + "SignalOccurenceID": 994 + } + ], + "stop_at": "2019-12-31T07:49:16Z", + "Source_ip": "207.38.89.99", + "Source_range": "207.38.80.0/20", + "Source_AutonomousSystemNumber": "30083", + "Source_AutonomousSystemOrganization": "HEG US Inc.", + "Source_Country": "US", + "Source_Latitude": 38.63119888305664, + "Source_Longitude": -90.19219970703125, + "sources": { + "207.38.89.99": { + "Ip": "207.38.89.99", + "Range": { + "IP": "207.38.80.0", + "Mask": "///wAA==" + }, + "AutonomousSystemNumber": "30083", + "AutonomousSystemOrganization": "HEG US Inc.", + "Country": "US", + "Latitude": 38.63119888305664, + "Longitude": -90.19219970703125, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "57097e2f13de9a441098679dd1ba632d75bc5726", + "scenario": "http_404-scan", + "bucket_id": "hidden-cherry", + "alert_message": "51.159.56.89 performed 'http_404-scan' (6 events over 0s) at 2020-01-12 20:12:33 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-12T20:12:33Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-13T00:12:33Z", + "StartIp": 866072665, + "EndIp": 866072665, + "IpText": "51.159.56.89", + "Reason": "ban on ip 51.159.56.89", + "Scenario": "", + "SignalOccurenceID": 995 + } + ], + "stop_at": "2020-01-12T20:12:33Z", + "Source_ip": "51.159.56.89", + "Source_range": "51.158.0.0/15", + "Source_AutonomousSystemNumber": "12876", + "Source_AutonomousSystemOrganization": "Online S.a.s.", + "Source_Country": "FR", + "Source_Latitude": 48.86669921875, + "Source_Longitude": 2.3333001136779785, + "sources": { + "51.159.56.89": { + "Ip": "51.159.56.89", + "Range": { + "IP": "51.158.0.0", + "Mask": "//4AAA==" + }, + "AutonomousSystemNumber": "12876", + "AutonomousSystemOrganization": "Online S.a.s.", + "Country": "FR", + "Latitude": 48.86669921875, + "Longitude": 2.3333001136779785, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "8329d169b66b77c1ffb1476ee6be6157df0fb01c", + "scenario": "aggresive_crawl", + "bucket_id": "summer-voice", + "alert_message": "51.159.56.89 performed 'aggresive_crawl' (57 events over 8s) at 2020-01-12 20:12:41 +0000 UTC", + "events_count": 57, + "start_at": "2020-01-12T20:12:33Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-13T00:12:41Z", + "StartIp": 866072665, + "EndIp": 866072665, + "IpText": "51.159.56.89", + "Reason": "ban on ip 51.159.56.89", + "Scenario": "", + "SignalOccurenceID": 996 + } + ], + "stop_at": "2020-01-12T20:12:41Z", + "Source_ip": "51.159.56.89", + "Source_range": "51.158.0.0/15", + "Source_AutonomousSystemNumber": "12876", + "Source_AutonomousSystemOrganization": "Online S.a.s.", + "Source_Country": "FR", + "Source_Latitude": 48.86669921875, + "Source_Longitude": 2.3333001136779785, + "sources": { + "51.159.56.89": { + "Ip": "51.159.56.89", + "Range": { + "IP": "51.158.0.0", + "Mask": "//4AAA==" + }, + "AutonomousSystemNumber": "12876", + "AutonomousSystemOrganization": "Online S.a.s.", + "Country": "FR", + "Latitude": 48.86669921875, + "Longitude": 2.3333001136779785, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "e3670eedea41bad31bd62d4bcc3b11e0c0a26373", + "scenario": "http_404-scan", + "bucket_id": "quiet-sunset", + "alert_message": "167.172.50.134 performed 'http_404-scan' (6 events over 1s) at 2020-01-11 06:46:02 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-11T06:46:01Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-11T10:46:02Z", + "StartIp": 2813080198, + "EndIp": 2813080198, + "IpText": "167.172.50.134", + "Reason": "ban on ip 167.172.50.134", + "Scenario": "", + "SignalOccurenceID": 997 + } + ], + "stop_at": "2020-01-11T06:46:02Z", + "Source_ip": "167.172.50.134", + "Source_range": "\u003cnil\u003e", + "Source_AutonomousSystemNumber": "0", + "Source_AutonomousSystemOrganization": "", + "Source_Country": "GB", + "Source_Latitude": 51.91669845581055, + "Source_Longitude": -0.2167000025510788, + "sources": { + "167.172.50.134": { + "Ip": "167.172.50.134", + "Range": { + "IP": "", + "Mask": null + }, + "AutonomousSystemNumber": "0", + "AutonomousSystemOrganization": "", + "Country": "GB", + "Latitude": 51.91669845581055, + "Longitude": -0.2167000025510788, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "fe7c4addc743ea4a3fbbf8abc4768c38a815fb04", + "scenario": "http_404-scan", + "bucket_id": "divine-butterfly", + "alert_message": "103.212.97.45 performed 'http_404-scan' (6 events over 5s) at 2020-01-08 16:22:09 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-08T16:22:04Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-08T20:22:09Z", + "StartIp": 1741971757, + "EndIp": 1741971757, + "IpText": "103.212.97.45", + "Reason": "ban on ip 103.212.97.45", + "Scenario": "", + "SignalOccurenceID": 998 + } + ], + "stop_at": "2020-01-08T16:22:09Z", + "Source_ip": "103.212.97.45", + "Source_range": "103.212.96.0/22", + "Source_AutonomousSystemNumber": "45753", + "Source_AutonomousSystemOrganization": "NETSEC", + "Source_Country": "HK", + "Source_Latitude": 22.283300399780273, + "Source_Longitude": 114.1500015258789, + "sources": { + "103.212.97.45": { + "Ip": "103.212.97.45", + "Range": { + "IP": "103.212.96.0", + "Mask": "///8AA==" + }, + "AutonomousSystemNumber": "45753", + "AutonomousSystemOrganization": "NETSEC", + "Country": "HK", + "Latitude": 22.283300399780273, + "Longitude": 114.1500015258789, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "5a6ac7d4e195547d2b404da4a0d9b6f9cd50b4a9", + "scenario": "aggresive_crawl", + "bucket_id": "old-dawn", + "alert_message": "103.212.97.45 performed 'aggresive_crawl' (232 events over 1m46s) at 2020-01-08 16:23:50 +0000 UTC", + "events_count": 232, + "start_at": "2020-01-08T16:22:04Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-08T20:23:50Z", + "StartIp": 1741971757, + "EndIp": 1741971757, + "IpText": "103.212.97.45", + "Reason": "ban on ip 103.212.97.45", + "Scenario": "", + "SignalOccurenceID": 999 + } + ], + "stop_at": "2020-01-08T16:23:50Z", + "Source_ip": "103.212.97.45", + "Source_range": "103.212.96.0/22", + "Source_AutonomousSystemNumber": "45753", + "Source_AutonomousSystemOrganization": "NETSEC", + "Source_Country": "HK", + "Source_Latitude": 22.283300399780273, + "Source_Longitude": 114.1500015258789, + "sources": { + "103.212.97.45": { + "Ip": "103.212.97.45", + "Range": { + "IP": "103.212.96.0", + "Mask": "///8AA==" + }, + "AutonomousSystemNumber": "45753", + "AutonomousSystemOrganization": "NETSEC", + "Country": "HK", + "Latitude": 22.283300399780273, + "Longitude": 114.1500015258789, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "5a6ac7d4e195547d2b404da4a0d9b6f9cd50b4a9", + "scenario": "aggresive_crawl", + "bucket_id": "weathered-wood", + "alert_message": "103.212.97.45 performed 'aggresive_crawl' (76 events over 18s) at 2020-01-08 16:24:50 +0000 UTC", + "events_count": 76, + "start_at": "2020-01-08T16:24:32Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-08T20:24:50Z", + "StartIp": 1741971757, + "EndIp": 1741971757, + "IpText": "103.212.97.45", + "Reason": "ban on ip 103.212.97.45", + "Scenario": "", + "SignalOccurenceID": 1000 + } + ], + "stop_at": "2020-01-08T16:24:50Z", + "Source_ip": "103.212.97.45", + "Source_range": "103.212.96.0/22", + "Source_AutonomousSystemNumber": "45753", + "Source_AutonomousSystemOrganization": "NETSEC", + "Source_Country": "HK", + "Source_Latitude": 22.283300399780273, + "Source_Longitude": 114.1500015258789, + "sources": { + "103.212.97.45": { + "Ip": "103.212.97.45", + "Range": { + "IP": "103.212.96.0", + "Mask": "///8AA==" + }, + "AutonomousSystemNumber": "45753", + "AutonomousSystemOrganization": "NETSEC", + "Country": "HK", + "Latitude": 22.283300399780273, + "Longitude": 114.1500015258789, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "5a6ac7d4e195547d2b404da4a0d9b6f9cd50b4a9", + "scenario": "aggresive_crawl", + "bucket_id": "wandering-dawn", + "alert_message": "103.212.97.45 performed 'aggresive_crawl' (175 events over 1m7s) at 2020-01-08 16:26:21 +0000 UTC", + "events_count": 175, + "start_at": "2020-01-08T16:25:14Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-08T20:26:21Z", + "StartIp": 1741971757, + "EndIp": 1741971757, + "IpText": "103.212.97.45", + "Reason": "ban on ip 103.212.97.45", + "Scenario": "", + "SignalOccurenceID": 1001 + } + ], + "stop_at": "2020-01-08T16:26:21Z", + "Source_ip": "103.212.97.45", + "Source_range": "103.212.96.0/22", + "Source_AutonomousSystemNumber": "45753", + "Source_AutonomousSystemOrganization": "NETSEC", + "Source_Country": "HK", + "Source_Latitude": 22.283300399780273, + "Source_Longitude": 114.1500015258789, + "sources": { + "103.212.97.45": { + "Ip": "103.212.97.45", + "Range": { + "IP": "103.212.96.0", + "Mask": "///8AA==" + }, + "AutonomousSystemNumber": "45753", + "AutonomousSystemOrganization": "NETSEC", + "Country": "HK", + "Latitude": 22.283300399780273, + "Longitude": 114.1500015258789, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "fe7c4addc743ea4a3fbbf8abc4768c38a815fb04", + "scenario": "http_404-scan", + "bucket_id": "wispy-frog", + "alert_message": "103.212.97.45 performed 'http_404-scan' (6 events over 3s) at 2020-01-08 16:27:12 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-08T16:27:09Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-08T20:27:12Z", + "StartIp": 1741971757, + "EndIp": 1741971757, + "IpText": "103.212.97.45", + "Reason": "ban on ip 103.212.97.45", + "Scenario": "", + "SignalOccurenceID": 1002 + } + ], + "stop_at": "2020-01-08T16:27:12Z", + "Source_ip": "103.212.97.45", + "Source_range": "103.212.96.0/22", + "Source_AutonomousSystemNumber": "45753", + "Source_AutonomousSystemOrganization": "NETSEC", + "Source_Country": "HK", + "Source_Latitude": 22.283300399780273, + "Source_Longitude": 114.1500015258789, + "sources": { + "103.212.97.45": { + "Ip": "103.212.97.45", + "Range": { + "IP": "103.212.96.0", + "Mask": "///8AA==" + }, + "AutonomousSystemNumber": "45753", + "AutonomousSystemOrganization": "NETSEC", + "Country": "HK", + "Latitude": 22.283300399780273, + "Longitude": 114.1500015258789, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "0a2b19cb243f6607e4d95c45eb979424efa1f838", + "scenario": "http_404-scan", + "bucket_id": "restless-dream", + "alert_message": "35.180.132.238 performed 'http_404-scan' (6 events over 0s) at 2020-01-06 15:36:09 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-06T15:36:09Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-06T19:36:09Z", + "StartIp": 599033070, + "EndIp": 599033070, + "IpText": "35.180.132.238", + "Reason": "ban on ip 35.180.132.238", + "Scenario": "", + "SignalOccurenceID": 1003 + } + ], + "stop_at": "2020-01-06T15:36:09Z", + "Source_ip": "35.180.132.238", + "Source_range": "35.180.0.0/16", + "Source_AutonomousSystemNumber": "16509", + "Source_AutonomousSystemOrganization": "Amazon.com, Inc.", + "Source_Country": "FR", + "Source_Latitude": 48.86669921875, + "Source_Longitude": 2.3333001136779785, + "sources": { + "35.180.132.238": { + "Ip": "35.180.132.238", + "Range": { + "IP": "35.180.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "16509", + "AutonomousSystemOrganization": "Amazon.com, Inc.", + "Country": "FR", + "Latitude": 48.86669921875, + "Longitude": 2.3333001136779785, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "76779a7c22da5b031227d205fdc53a1d5c2e0940", + "scenario": "aggresive_crawl", + "bucket_id": "delicate-dust", + "alert_message": "35.180.132.238 performed 'aggresive_crawl' (47 events over 3s) at 2020-01-06 15:36:12 +0000 UTC", + "events_count": 47, + "start_at": "2020-01-06T15:36:09Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-06T19:36:12Z", + "StartIp": 599033070, + "EndIp": 599033070, + "IpText": "35.180.132.238", + "Reason": "ban on ip 35.180.132.238", + "Scenario": "", + "SignalOccurenceID": 1004 + } + ], + "stop_at": "2020-01-06T15:36:12Z", + "Source_ip": "35.180.132.238", + "Source_range": "35.180.0.0/16", + "Source_AutonomousSystemNumber": "16509", + "Source_AutonomousSystemOrganization": "Amazon.com, Inc.", + "Source_Country": "FR", + "Source_Latitude": 48.86669921875, + "Source_Longitude": 2.3333001136779785, + "sources": { + "35.180.132.238": { + "Ip": "35.180.132.238", + "Range": { + "IP": "35.180.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "16509", + "AutonomousSystemOrganization": "Amazon.com, Inc.", + "Country": "FR", + "Latitude": 48.86669921875, + "Longitude": 2.3333001136779785, + "Flags": null + } + }, + "capacity": 40, + "leak_speed": 500000000, + "Reprocess": false, + "Labels": { + "remediation": "true", + "service": "http", + "type": "crawl" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "a0c56f23985d1f8fcb844afd95b40c79b6a95d84", + "scenario": "http_404-scan", + "bucket_id": "small-sky", + "alert_message": "129.211.41.26 performed 'http_404-scan' (6 events over 2s) at 2020-01-06 18:34:21 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-06T18:34:19Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-06T22:34:21Z", + "StartIp": 2178099482, + "EndIp": 2178099482, + "IpText": "129.211.41.26", + "Reason": "ban on ip 129.211.41.26", + "Scenario": "", + "SignalOccurenceID": 1005 + } + ], + "stop_at": "2020-01-06T18:34:21Z", + "Source_ip": "129.211.41.26", + "Source_range": "129.211.0.0/16", + "Source_AutonomousSystemNumber": "7091", + "Source_AutonomousSystemOrganization": "ViaNet Communications", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "129.211.41.26": { + "Ip": "129.211.41.26", + "Range": { + "IP": "129.211.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "7091", + "AutonomousSystemOrganization": "ViaNet Communications", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "0a2b19cb243f6607e4d95c45eb979424efa1f838", + "scenario": "http_404-scan", + "bucket_id": "cool-rain", + "alert_message": "35.180.132.238 performed 'http_404-scan' (10 events over 2h58m14s) at 2020-01-06 18:34:25 +0000 UTC", + "events_count": 10, + "start_at": "2020-01-06T15:36:11Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-06T22:34:25Z", + "StartIp": 599033070, + "EndIp": 599033070, + "IpText": "35.180.132.238", + "Reason": "ban on ip 35.180.132.238", + "Scenario": "", + "SignalOccurenceID": 1006 + } + ], + "stop_at": "2020-01-06T18:34:25Z", + "Source_ip": "35.180.132.238", + "Source_range": "35.180.0.0/16", + "Source_AutonomousSystemNumber": "16509", + "Source_AutonomousSystemOrganization": "Amazon.com, Inc.", + "Source_Country": "FR", + "Source_Latitude": 48.86669921875, + "Source_Longitude": 2.3333001136779785, + "sources": { + "35.180.132.238": { + "Ip": "35.180.132.238", + "Range": { + "IP": "35.180.0.0", + "Mask": "//8AAA==" + }, + "AutonomousSystemNumber": "16509", + "AutonomousSystemOrganization": "Amazon.com, Inc.", + "Country": "FR", + "Latitude": 48.86669921875, + "Longitude": 2.3333001136779785, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "ca3945158c65616ddf95a814778f47da10c6cb6b", + "scenario": "http_404-scan", + "bucket_id": "long-wildflower", + "alert_message": "180.96.14.25 performed 'http_404-scan' (9 events over 72h37m58s) at 2020-01-07 04:11:11 +0000 UTC", + "events_count": 9, + "start_at": "2020-01-04T03:33:13Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-07T08:11:11Z", + "StartIp": 3026193945, + "EndIp": 3026193945, + "IpText": "180.96.14.25", + "Reason": "ban on ip 180.96.14.25", + "Scenario": "", + "SignalOccurenceID": 1007 + } + ], + "stop_at": "2020-01-07T04:11:11Z", + "Source_ip": "180.96.14.25", + "Source_range": "180.96.8.0/21", + "Source_AutonomousSystemNumber": "23650", + "Source_AutonomousSystemOrganization": "AS Number for CHINANET jiangsu province backbone", + "Source_Country": "CN", + "Source_Latitude": 32.06169891357422, + "Source_Longitude": 118.77780151367188, + "sources": { + "180.96.14.25": { + "Ip": "180.96.14.25", + "Range": { + "IP": "180.96.8.0", + "Mask": "///4AA==" + }, + "AutonomousSystemNumber": "23650", + "AutonomousSystemOrganization": "AS Number for CHINANET jiangsu province backbone", + "Country": "CN", + "Latitude": 32.06169891357422, + "Longitude": 118.77780151367188, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "4", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "574814d8651d7500a6325c696067497d4d051274", + "scenario": "http_404-scan", + "bucket_id": "black-shadow", + "alert_message": "176.122.121.249 performed 'http_404-scan' (6 events over 3s) at 2020-01-05 19:15:57 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-05T19:15:54Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-05T23:15:57Z", + "StartIp": 2960816633, + "EndIp": 2960816633, + "IpText": "176.122.121.249", + "Reason": "ban on ip 176.122.121.249", + "Scenario": "", + "SignalOccurenceID": 1008 + } + ], + "stop_at": "2020-01-05T19:15:57Z", + "Source_ip": "176.122.121.249", + "Source_range": "176.122.120.0/21", + "Source_AutonomousSystemNumber": "50581", + "Source_AutonomousSystemOrganization": "Ukraine telecommunication group Ltd.", + "Source_Country": "UA", + "Source_Latitude": 48.4630012512207, + "Source_Longitude": 35.03900146484375, + "sources": { + "176.122.121.249": { + "Ip": "176.122.121.249", + "Range": { + "IP": "176.122.120.0", + "Mask": "///4AA==" + }, + "AutonomousSystemNumber": "50581", + "AutonomousSystemOrganization": "Ukraine telecommunication group Ltd.", + "Country": "UA", + "Latitude": 48.4630012512207, + "Longitude": 35.03900146484375, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + { + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine2", + "trust_factor": "2", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "94f52cd832ed322d3bd788565170d5bdabed0f71", + "scenario": "http_404-scan", + "bucket_id": "lively-breeze", + "alert_message": "31.222.187.197 performed 'http_404-scan' (6 events over 0s) at 2020-01-14 00:44:14 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-14T00:44:14Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-14T04:44:14Z", + "StartIp": 534690757, + "EndIp": 534690757, + "IpText": "31.222.187.197", + "Reason": "ban on ip 31.222.187.197", + "Scenario": "", + "SignalOccurenceID": 1009 + } + ], + "stop_at": "2020-01-14T00:44:14Z", + "Source_ip": "31.222.187.197", + "Source_range": "31.222.128.0/18", + "Source_AutonomousSystemNumber": "15395", + "Source_AutonomousSystemOrganization": "Rackspace Ltd.", + "Source_Country": "GB", + "Source_Latitude": 51.49639892578125, + "Source_Longitude": -0.12240000069141388, + "sources": { + "31.222.187.197": { + "Ip": "31.222.187.197", + "Range": { + "IP": "31.222.128.0", + "Mask": "///AAA==" + }, + "AutonomousSystemNumber": "15395", + "AutonomousSystemOrganization": "Rackspace Ltd.", + "Country": "GB", + "Latitude": 51.49639892578125, + "Longitude": -0.12240000069141388, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": false + } diff --git a/tests/scenario/09consensus_trust/1/parsers.yaml b/tests/scenario/09consensus_trust/1/parsers.yaml new file mode 100644 index 000000000..6e1549cdd --- /dev/null +++ b/tests/scenario/09consensus_trust/1/parsers.yaml @@ -0,0 +1,2 @@ + - filename: ./hub/parsers/s00-raw/crowdsecurity/enrich.yaml + stage: s00-raw diff --git a/tests/scenario/09consensus_trust/1/scenarios.yaml b/tests/scenario/09consensus_trust/1/scenarios.yaml new file mode 100644 index 000000000..b97099b94 --- /dev/null +++ b/tests/scenario/09consensus_trust/1/scenarios.yaml @@ -0,0 +1,6 @@ + - filename: ./hub/scenarios/crowdsecurity/consensus-trust-factor.yaml + + + + + diff --git a/tests/scenario/09consensus_trust/1/success.sqlite b/tests/scenario/09consensus_trust/1/success.sqlite new file mode 100644 index 000000000..138120855 --- /dev/null +++ b/tests/scenario/09consensus_trust/1/success.sqlite @@ -0,0 +1,11 @@ +select count(*) == 1 from signal_occurences where source_ip = "139.199.192.143" and scenario = "consensus/strong_trust+diff_scenario" +select count(*) == 1 from signal_occurences where source_ip = "139.199.192.143" and scenario = "consensus/strong_trust+same_scenario" +select count(*) == 1 from signal_occurences where source_ip = "207.38.89.99" and scenario = "consensus/strong_trust+diff_scenario" +select count(*) == 1 from signal_occurences where source_ip = "207.38.89.99" and scenario = "consensus/strong_trust+same_scenario" +select count(*) == 1 from signal_occurences where source_ip = "51.159.56.89" and scenario = "consensus/strong_trust+diff_scenario" +select count(*) == 1 from signal_occurences where source_ip = "103.212.97.45" and scenario = "consensus/strong_trust+diff_scenario" +select count(*) == 1 from signal_occurences where source_ip = "103.212.97.45" and scenario = "consensus/strong_trust+same_scenario" +select count(*) == 1 from signal_occurences where source_ip = "35.180.132.238" and scenario = "consensus/strong_trust+diff_scenario" +select count(*) == 1 from signal_occurences where source_ip = "35.180.132.238" and scenario = "consensus/strong_trust+same_scenario" + + diff --git a/tests/scenario/09consensus_trust/2/file.log b/tests/scenario/09consensus_trust/2/file.log new file mode 100755 index 000000000..706e381c3 --- /dev/null +++ b/tests/scenario/09consensus_trust/2/file.log @@ -0,0 +1,70 @@ + +{ + "Type": 0, + "ExpectMode": 0, + "Whitelisted": false, + "Stage": "", + "Enriched": { + "machine_uuid": "user1_machine1", + "trust_factor": "1", + "user_uuid": "1", + "watcher_ip": "1.2.3.4" + }, + "Overflow": { + "MapKey": "7e159c83f45e4cabfe4c2d8653a24ac79506a703", + "scenario": "http_404-scan", + "bucket_id": "morning-sea", + "alert_message": "31.222.187.197 performed 'http_404-scan' (6 events over 2s) at 2020-01-02 15:31:32 +0000 UTC", + "events_count": 6, + "start_at": "2020-01-02T15:31:30Z", + "ban_applications": [ + { + "MeasureType": "ban", + "MeasureExtra": "", + "Until": "2020-01-02T19:31:32Z", + "StartIp": 1781924660, + "EndIp": 1781924660, + "IpText": "31.222.187.197", + "Reason": "ban on ip 31.222.187.197", + "Scenario": "", + "SignalOccurenceID": 985 + } + ], + "stop_at": "2020-01-14T06:44:14Z", + "Source_ip": "31.222.187.197", + "Source_range": "\u003cnil\u003e", + "Source_AutonomousSystemNumber": "0", + "Source_AutonomousSystemOrganization": "", + "Source_Country": "CN", + "Source_Latitude": 39.92890167236328, + "Source_Longitude": 116.38829803466797, + "sources": { + "31.222.187.197": { + "Ip": "31.222.187.197", + "Range": { + "IP": "", + "Mask": null + }, + "AutonomousSystemNumber": "0", + "AutonomousSystemOrganization": "", + "Country": "CN", + "Latitude": 39.92890167236328, + "Longitude": 116.38829803466797, + "Flags": null + } + }, + "capacity": 5, + "leak_speed": 10000000000, + "Reprocess": true, + "Labels": { + "remediation": "true", + "service": "http", + "type": "scan" + } + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "", + "Process": true + } + \ No newline at end of file diff --git a/tests/scenario/09consensus_trust/2/parsers.yaml b/tests/scenario/09consensus_trust/2/parsers.yaml new file mode 100644 index 000000000..6e1549cdd --- /dev/null +++ b/tests/scenario/09consensus_trust/2/parsers.yaml @@ -0,0 +1,2 @@ + - filename: ./hub/parsers/s00-raw/crowdsecurity/enrich.yaml + stage: s00-raw diff --git a/tests/scenario/09consensus_trust/2/scenarios.yaml b/tests/scenario/09consensus_trust/2/scenarios.yaml new file mode 100644 index 000000000..b97099b94 --- /dev/null +++ b/tests/scenario/09consensus_trust/2/scenarios.yaml @@ -0,0 +1,6 @@ + - filename: ./hub/scenarios/crowdsecurity/consensus-trust-factor.yaml + + + + + diff --git a/tests/scenario/09consensus_trust/2/success.sqlite b/tests/scenario/09consensus_trust/2/success.sqlite new file mode 100644 index 000000000..10da3a573 --- /dev/null +++ b/tests/scenario/09consensus_trust/2/success.sqlite @@ -0,0 +1,7 @@ +select count(*) == 1 from signal_occurences where source_ip = "31.222.187.197" and scenario = "base_consensus" +select count(*) == 1 from signal_occurences where source_ip = "31.222.187.197" and scenario = "specialized_consensus" + + + + + diff --git a/tests/scenario/README.md b/tests/scenario/README.md new file mode 100644 index 000000000..b34530df9 --- /dev/null +++ b/tests/scenario/README.md @@ -0,0 +1,37 @@ +# scenario tests + +``` +$ make build +$ cd tests/.../ +$ git clone git@github.com:JohnDoeCrowdSec/hub.git hub +$ ./cracra.sh -all +``` + +For the tests to run : + - crowdsec must be built + - ./hub/ must be a valid hub directory (ie `git clone git@github.com:JohnDoeCrowdSec/hub.git hub`) + +Each test is a directory starting by `0` containing : + - a logfile `file.log` + - a list of enabled parsers `parsers.yaml` + - a list of enabled scenarios `scenarios.yaml` + - a `success.sqlite` file that is a list of sqlite commands that must run successfuly + - a `label` file containing the label of the input file (ie. `type:syslog` or `prog_name:nginx`) + +A test is successfull when the agent, started with said parsers.yaml,scenarios.yaml,postoverflows.yaml produces a sqlite database conform to success.sqlite after being injected with the `file.log` in time-machine mode. + +## parsers.yaml + +As tests are run using time-machine mode, the `timemachine.yaml` parsers is mandatory or you will be getting errors. + +``` +$ cat 01ssh/parsers.yaml + - filename: ./hub/parsers/s00-raw/crowdsec/syslog-parse.yaml + stage: s00-raw + - filename: ./hub/parsers/s01-parse/crowdsec/sshd-logs.yaml + stage: s01-parse + - filename: ./hub/parsers/s02-enrich/crowdsec/timemachine.yaml + stage: s02-enrich +``` + +postoverflows and scenarios follows the same logic. diff --git a/tests/scenario/backend/sqlite.yaml b/tests/scenario/backend/sqlite.yaml new file mode 100644 index 000000000..6c1821be5 --- /dev/null +++ b/tests/scenario/backend/sqlite.yaml @@ -0,0 +1,5 @@ +name: sqlite +path: ./plugins/backend/sqlite.so +config: + db_path: ./test.db + flush: true \ No newline at end of file diff --git a/tests/scenario/cracra.sh b/tests/scenario/cracra.sh new file mode 100755 index 000000000..cb3be8f6e --- /dev/null +++ b/tests/scenario/cracra.sh @@ -0,0 +1,106 @@ +#!/bin/bash + +CWCMD="../../cmd/crowdsec/crowdsec" +PLUGINS_FOLDER="../../plugins" +PLUGINS_FOLDER_BACKEND="./plugins/backend/" + +dostuff() { + + STEP=${1} + + + if [[ "${STEP}" == *consensus_* ]] ; then + cat > ./acquis.yaml << EOF +mode: cat +type: bin +filename: ${STEP}/file.log +labels: + type: consensus +EOF + +EXTRA="" +if [ -f "./buckets_state.json" ] ; then + echo "Reusing existing bucket state" + EXTRA="-restore-state ./buckets_state.json" +else + echo "Creating new bucket state" +fi; + +${CWCMD} -c ./dev.yaml -acquis ./acquis.yaml ${EXTRA} -custom-config "parser:${STEP}/parsers.yaml,scenario:${STEP}/scenarios.yaml" -dump-state + + else + + +SCENAR=${1} +FILE_LABELS=$(cat ${SCENAR}"/labels" 2>/dev/null) + +rm "./test.db" +cat > ./acquis.yaml << EOF +mode: cat +filename: ${SCENAR}/file.log +labels: + ${FILE_LABELS} +EOF + +${CWCMD} -c ./dev.yaml -acquis ./acquis.yaml -custom-config "parser:${SCENAR}/parsers.yaml,scenario:${SCENAR}/scenarios.yaml" +fi; + +success=0 +echo "Checking results" +# check results +while read sqq ; do + if [ -z "${sqq}" ] ; then + continue + fi; + success=$((${success}+1)) + + if [ `echo ${sqq} | sqlite3 ./test.db` -eq "1" ] ; then + echo "OK : ${sqq}" ; + else + echo "FAILED : ${1} ${sqq}"; + echo "IN logs : ${1}/file.log" + echo "Expected : ${1}/success.sqlite" + echo "Failed sql query : ${sqq}" + echo "Full log : out.log" + exit + fi +done < ${1}/success.sqlite + + +echo "Done testing ${success} tests runned" + +} + +# Still cracra, but build the plugins and move them in ./plugins +CWD=$(pwd) +cd ../.. +bash ./scripts/build_plugins.sh +cd $CWD +mkdir -p "$PLUGINS_FOLDER_BACKEND" +cp -r ../../plugins/backend/*.so "$PLUGINS_FOLDER_BACKEND" +# Cracra finished + +### + +if [ -z ${1} ] ; then + echo "${0} [-all|/path/to/test]" + echo " /path/to/test : path to test directory (ie. ./01ssh/)" + echo " -all : run all tests" + echo " **./hub/** must be up-to-date hub directory/symlink (ie. hub clone)" + exit; +fi; + +case ${1} in + "-all") + for i in `find . -mindepth 1 -type d -iname "0*"` ; + do + echo "Testing ${i}"; + dostuff $i ; + done + ;; + *) + echo "Testing ${1}"; + dostuff $1 ; + ;; +esac + diff --git a/tests/scenario/dev.yaml b/tests/scenario/dev.yaml new file mode 100644 index 000000000..7e78ab7dd --- /dev/null +++ b/tests/scenario/dev.yaml @@ -0,0 +1,12 @@ +working_dir: "." +data_dir: "../../data/" +config_dir: "../../config/" +pid_dir: "./" +log_dir: "./" +log_mode: "stdout" +log_level: info +profiling: false +sqlite_path: "./test.db" +apimode: false +plugin: + backend: "./backend/" diff --git a/tests/scenario/test.db b/tests/scenario/test.db new file mode 100644 index 000000000..1cea4bf09 Binary files /dev/null and b/tests/scenario/test.db differ diff --git a/wizard.sh b/wizard.sh new file mode 100755 index 000000000..67195f73d --- /dev/null +++ b/wizard.sh @@ -0,0 +1,573 @@ +#!/usr/bin/env bash + +set -o pipefail +#set -x + + +RED='\033[0;31m' +BLUE='\033[0;34m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +NC='\033[0m' + +SILENT="false" + +CROWDSEC_RUN_DIR="/var/run" +CROWDSEC_LIB_DIR="/var/lib/crowdsec" +CROWDSEC_DATA_DIR="${CROWDSEC_LIB_DIR}/data" +CROWDSEC_PLUGIN_DIR="${CROWDSEC_LIB_DIR}/plugins" +CROWDSEC_PLUGIN_BACKEND_DIR="${CROWDSEC_PLUGIN_DIR}/backend" +CROWDSEC_DB_PATH="${CROWDSEC_DATA_DIR}/crowdsec.db" +CROWDSEC_CONFIG_PATH="/etc/crowdsec" +CROWDSEC_CONFIG_PATH="${CROWDSEC_CONFIG_PATH}/crowdsec" +CROWDSEC_LOG_FILE="/var/log/crowdsec.log" +CROWDSEC_BACKEND_FOLDER="/etc/crowdsec/plugins/backend" + +CROWDSEC_BIN="./cmd/crowdsec/crowdsec" +CSCLI_BIN="./cmd/crowdsec-cli/cscli" + +CROWDSEC_BIN_INSTALLED="/usr/local/bin/crowdsec" +CSCLI_BIN_INSTALLED="/usr/local/bin/cscli" + +ACQUIS_PATH="${CROWDSEC_CONFIG_PATH}" +TMP_ACQUIS_FILE="tmp-acquis.yaml" +ACQUIS_TARGET="${ACQUIS_PATH}/acquis.yaml" + +setup_cron_pull() { + cp ./config/crowdsec_pull /etc/cron.d/ +} + + +PID_DIR="/var/run" +SYSTEMD_PATH_FILE="/etc/systemd/system/crowdsec.service" + +PATTERNS_FOLDER="config/patterns" +PATTERNS_PATH="${CROWDSEC_CONFIG_PATH}/patterns/" + +ACTION="" + +DEBUG_MODE="false" + +SUPPORTED_SERVICES='apache2 +nginx +sshd +mysql +telnet +smb +' + +BACKUP_DIR=$(mktemp -d) + + +log_info() { + msg=$1 + date=$(date +%x:%X) + echo -e "[${date}][${BLUE}INF${NC}] crowdsec_wizard: ${msg}" +} + +log_err() { + msg=$1 + date=$(date +%x:%X) + echo -e "[${date}][${RED}ERR${NC}] crowdsec_wizard: ${msg}" 1>&2 +} + + +log_dbg() { + if [[ ${DEBUG_MODE} == "true" ]]; then + msg=$1 + date=$(date +%x:%X) + echo -e "[${date}][${YELLOW}DBG${NC}] crowdsec_wizard: ${msg}" 1>&2 + fi +} + +detect_services () { + DETECTED_SERVICES=() + HMENU=() + #list systemd services + SYSTEMD_SERVICES=`systemctl --state=enabled list-unit-files '*.service' | cut -d ' ' -f1` + #raw ps + PSAX=`ps ax -o comm=` + for SVC in ${SUPPORTED_SERVICES} ; do + log_info "Checking if service '${SVC}' is running (ps+systemd)" + for SRC in "${SYSTEMD_SERVICES}" "${PSAX}" ; do + echo ${SRC} | grep ${SVC} >/dev/null + if [ $? -eq 0 ]; then + DETECTED_SERVICES+=(${SVC}) + HMENU+=(${SVC} "on") + log_info "Found '${SVC}' running" + break; + fi; + done; + done; + if [[ ${OSTYPE} == "linux-gnu" ]]; then + DETECTED_SERVICES+=("linux") + HMENU+=("linux" "on") + else + log_info "NOT A LINUX" + fi; + + if [[ ${SILENT} == "false" ]]; then + #we put whiptail results in an array, notice the dark magic fd redirection + DETECTED_SERVICES=($(whiptail --separate-output --noitem --ok-button Continue --title "Services to monitor" --checklist "Detected services, uncheck to ignore. Ignored services won't be monitored." 18 70 10 ${HMENU[@]} 3>&1 1>&2 2>&3)) + if [ $? -eq 1 ]; then + log_err "user bailed out at services selection" + exit 1; + fi; + fi; +} + + +declare -A log_input_tags +log_input_tags[apache2]='type: apache2' +log_input_tags[nginx]='type: nginx' +log_input_tags[sshd]='type: syslog' +log_input_tags[rsyslog]='type: syslog' +log_input_tags[telnet]='type: telnet' +log_input_tags[mysql]='type: mysql' +log_input_tags[smb]='type: smb' +log_input_tags[linux]="type: syslog" + +declare -A log_locations +log_locations[apache2]='/var/log/apache2/*.log,/var/log/*httpd*.log' +log_locations[nginx]='/var/log/nginx/*.log' +log_locations[sshd]='/var/log/auth.log,/var/log/sshd.log,/var/log/secure' +log_locations[rsyslog]='/var/log/syslog' +log_locations[telnet]='/var/log/telnetd*.log' +log_locations[mysql]='/var/log/mysqld*.log' +log_locations[smb]='/var/log/samba*.log' +log_locations[linux]='/var/log/syslog,/var/log/kern.log,/var/log/messages' + +#$1 is service name, such those in SUPPORTED_SERVICES +find_logs_for() { + ret="" + x=${1} + #we have trailing and starting quotes because of whiptail + SVC="${x%\"}" + SVC="${SVC#\"}" + DETECTED_LOGFILES=() + HMENU=() + #log_info "Searching logs for ${SVC} : ${log_locations[${SVC}]}" + + + #split the line into an array with ',' separator + OIFS=${IFS} + IFS=',' read -r -a a <<< "${log_locations[${SVC}]}," + IFS=${OIFS} + #readarray -td, a <<<"${log_locations[${SVC}]},"; unset 'a[-1]'; + for poss_path in "${a[@]}"; do + #Split /var/log/nginx/*.log into '/var/log/nginx' and '*.log' so we can use find + path=${poss_path%/*} + fname=${poss_path##*/} + candidates=`find "${path}" -type f -mtime -5 -ctime -5 -name "$fname"` + #We have some candidates, add them + for final_file in ${candidates} ; do + log_info "Found logs file for '${SVC}': ${final_file}" + DETECTED_LOGFILES+=(${final_file}) + HMENU+=(${final_file} "on") + done; + done; + + if [[ ${SILENT} == "false" ]]; then + DETECTED_LOGFILES=($(whiptail --separate-output --noitem --ok-button Continue --title "Log files to process for ${SVC}" --checklist "Detected logfiles for ${SVC}, uncheck to ignore" 18 70 10 ${HMENU[@]} 3>&1 1>&2 2>&3)) + if [ $? -eq 1 ]; then + log_err "user bailed out at log file selection" + exit 1; + fi; + fi +} + + +in_array() { + str=$1 + shift + array=("$@") + for element in "${array[@]}"; do + if [[ ${str} == *${element}* ]]; then + return 0 + fi + done + return 1 +} + +install_collection() { + HMENU=() + readarray -t AVAILABLE_COLLECTION < <(${CSCLI_BIN_INSTALLED} list collections -o raw -a) + COLLECTION_TO_INSTALL=() + if [[ ${SILENT} == "false" ]]; then + for collect_info in "${AVAILABLE_COLLECTION[@]}"; do + #echo "collection raw : ${collect_info}" >> out.txt + collection="$(echo ${collect_info} | cut -d " " -f1)" + description="$(echo ${collect_info} | cut -d " " -f2-)" + in_array $collection "${DETECTED_SERVICES[@]}" + if [[ $? == 0 ]]; then + HMENU+=("${collection}" "${description}" "ON") + else + if [[ ${collection} == "linux" ]]; then + HMENU+=("${collection}" "${description}" "ON") + else + HMENU+=("${collection}" "${description}" "OFF") + fi + fi + done + COLLECTION_TO_INSTALL=($(whiptail --separate-output --ok-button Continue --title "Crowdsec collections" --checklist "Available collections in crowdsec, try to pick one that fits your profile. Collections contains parsers and scenarios to protect your system." 20 120 10 "${HMENU[@]}" 3>&1 1>&2 2>&3)) + if [ $? -eq 1 ]; then + log_err "user bailed out at collection selection" + exit 1; + fi; + else + for collection in "${DETECTED_SERVICES[@]}"; do + COLLECTION_TO_INSTALL+=(${collection}) + done + fi + + for collection in "${COLLECTION_TO_INSTALL[@]}"; do + log_info "Installing collection '${collection}'" + ${CSCLI_BIN_INSTALLED} install collection "${collection}" > /dev/null 2>&1 || log_err "fail to install collection ${collection}" + done + + + + ${CSCLI_BIN_INSTALLED} install parser "crowdsecurity/whitelists" > /dev/null 2>&1 || log_err "fail to install collection crowdsec/whitelists" + if [[ ${SILENT} == "false" ]]; then + whiptail --msgbox "Out of safety, I installed a parser called 'crowdsecurity/whitelists'. This one will prevent private IP adresses from being banned, feel free to remove it any time." 20 50 + fi +} + + + +#$1 is the service name, $... is the list of candidate logs (from find_logs_for) +genyaml() { + local service="${1}" + shift + local files=("${@}") + + echo "#Generated acquisition file - wizard.sh (service: ${service}) / files : ${files[@]}" >> ${TMP_ACQUIS_FILE} + + echo "filenames:" >> ${TMP_ACQUIS_FILE} + for fd in ${files[@]}; do + echo " - ${fd}" >> ${TMP_ACQUIS_FILE} + done + echo "labels:" >> ${TMP_ACQUIS_FILE} + echo " "${log_input_tags[${service}]} >> ${TMP_ACQUIS_FILE} + echo "---" >> ${TMP_ACQUIS_FILE} + log_info "Acquisition file generated" +} + +genacquisition() { + log_info "Found following services : "${DETECTED_SERVICES[@]} + for PSVG in ${DETECTED_SERVICES[@]} ; do + find_logs_for ${PSVG} + if [[ ${#DETECTED_LOGFILES[@]} -gt 0 ]] ; then + genyaml ${PSVG} ${DETECTED_LOGFILES[@]} + fi; + done +} + +#install crowdsec and cscli +install_crowdsec() { + mkdir -p "${CROWDSEC_DATA_DIR}" + find data -type f -exec install -Dm 755 {} "${CROWDSEC_LIB_DIR}/{}" \; || exit + (cd config && find patterns -type f -exec install -Dm 755 "{}" "${CROWDSEC_CONFIG_PATH}/{}" \; && cd ../) || exit + mkdir -p "${CROWDSEC_CONFIG_PATH}/scenarios" || exit + mkdir -p "${CROWDSEC_CONFIG_PATH}/postoverflows" || exit + mkdir -p "${CROWDSEC_CONFIG_PATH}/collections" || exit + mkdir -p "${CROWDSEC_CONFIG_PATH}/patterns" || exit + + mkdir -p "${CROWDSEC_BACKEND_FOLDER}" || exit + mkdir -p "${CROWDSEC_PLUGIN_BACKEND_DIR}" || exit + + (cd ./plugins && find . -type f -name "*.so" -exec install -Dm 644 {} "${CROWDSEC_PLUGIN_DIR}/{}" \; && cd ../) || exit + cp -r ./config/plugins/backend/* "${CROWDSEC_BACKEND_FOLDER}" || exit + + install -v -m 755 -D ./config/prod.yaml "${CROWDSEC_CONFIG_PATH}" || exit + install -v -m 755 -D ./config/dev.yaml "${CROWDSEC_CONFIG_PATH}" || exit + install -v -m 755 -D ./config/acquis.yaml "${CROWDSEC_CONFIG_PATH}" || exit + install -v -m 755 -D ./config/profiles.yaml "${CROWDSEC_CONFIG_PATH}" || exit + install -v -m 600 -D ./config/api.yaml "${CROWDSEC_CONFIG_PATH}" || exit + mkdir -p ${PID_DIR} || exit + PID=${PID_DIR} DATA=${CROWDSEC_DATA_DIR} CFG=${CROWDSEC_CONFIG_PATH} envsubst < ./config/prod.yaml > ${CROWDSEC_CONFIG_PATH}"/default.yaml" + CFG=${CROWDSEC_CONFIG_PATH} PID=${PID_DIR} BIN=${CROWDSEC_BIN_INSTALLED} envsubst < ./config/crowdsec.service > "${SYSTEMD_PATH_FILE}" + install_bins + systemctl daemon-reload + log_info "Default cscli config generation" + configure_cli +} + +update_bins() { + log_info "Saving api credentials" + ${CSCLI_BIN_INSTALLED} api credentials > ${BACKUP_DIR}/api-credentials.back || log_err "Unable to save credentials" + cat ${CROWDSEC_CONFIG_PATH}/api.yaml + log_info "Only upgrading binaries" + delete_bins + install_bins + log_info "Upgrade finished" + echo "#restored credentials" >> ${CROWDSEC_CONFIG_PATH}/api.yaml || log_err "unable to write to api file" + cat ${BACKUP_DIR}/api-credentials.back >> ${CROWDSEC_CONFIG_PATH}/api.yaml || log_err "unable to write to api file" + + systemctl restart crowdsec +} + +update_full() { + + if [[ ! -f "$CROWDSEC_BIN" ]]; then + log_err "Crowdwatch binary '$CROWDSEC_BIN' not found. Please build it with 'make build'" && exit + fi + if [[ ! -f "$CSCLI_BIN" ]]; then + log_err "Cwcli binary '$CSCLI_BIN' not found. Please build it with 'make build'" && exit + fi + + log_info "Backing up existing configuration" + ${CSCLI_BIN} backup save ${BACKUP_DIR} + log_info "Cleanup existing crowdsec configuration" + uninstall_crowdsec + log_info "Installing crowdsec" + install_crowdsec + log_info "Restoring configuration" + ${CSCLI_BIN} backup restore ${BACKUP_DIR} + log_info "Finished, restarting" + systemctl restart crowdsec || log_err "Failed to restart crowdsec" +} + +install_bins() { + log_info "Installing crowdsec binaries" + install -v -m 755 -D "${CROWDSEC_BIN}" "${CROWDSEC_BIN_INSTALLED}" || exit + install -v -m 755 -D "${CSCLI_BIN}" "${CSCLI_BIN_INSTALLED}" || exit +} + +delete_bins() { + log_info "Removing crowdsec binaries" + rm -f ${CROWDSEC_BIN_INSTALLED} + rm -f ${CSCLI_BIN_INSTALLED} +} + +# uninstall crowdsec and cscli +uninstall_crowdsec() { + systemctl stop crowdsec.service + ${CSCLI_BIN} dashboard stop --remove + delete_bins + rm -rf ${CROWDSEC_CONFIG_PATH} || echo "" + rm -f ${CROWDSEC_LOG_FILE} || echo "" + rm -f ${CROWDSEC_DB_PATH} || echo "" + rm -rf ${CROWDSEC_LIB_DIR} || echo "" + rm -f ${SYSTEMD_PATH_FILE} || echo "" + log_info "crowdsec successfully uninstalled" +} + +# configure token and crowdsec configuration path for cscli +configure_cli() { + ${CSCLI_BIN_INSTALLED} config installdir "$CROWDSEC_CONFIG_PATH" || log_err "unable to configure ${CSCLI_BIN_INSTALLED} crowdsec configuration path" + ${CSCLI_BIN_INSTALLED} config backend "$CROWDSEC_BACKEND_FOLDER" || log_err "unable to configure ${CSCLI_BIN_INSTALLED} backend folder" + +} + +setup_cron_pull() { + cp ./config/crowdsec_pull /etc/cron.d/ +} + + + +main() { + if [[ "$1" == "backup_to_dir" ]]; + then + backup_to_dir + return + fi + + if [[ "$1" == "restore_from_dir" ]]; + then + if ! [ $(id -u) = 0 ]; then + log_err "Please run it as root" + exit 1 + fi + restore_from_dir + return + fi + + if [[ "$1" == "binupgrade" ]]; + then + if ! [ $(id -u) = 0 ]; then + log_err "Please run it as root" + exit 1 + fi + update_bins + return + fi + + if [[ "$1" == "upgrade" ]]; + then + if ! [ $(id -u) = 0 ]; then + log_err "Please run it as root" + exit 1 + fi + update_full + return + fi + + if [[ "$1" == "uninstall" ]]; + then + if ! [ $(id -u) = 0 ]; then + log_err "Please run it as root" + exit 1 + fi + uninstall_crowdsec + return + fi + + + if [[ "$1" == "bininstall" ]]; + then + if ! [ $(id -u) = 0 ]; then + log_err "Please run it as root" + exit 1 + fi + log_info "installing crowdsec" + install_crowdsec + return + fi + + + if [[ "$1" == "install" ]]; + then + if ! [ $(id -u) = 0 ]; then + log_err "Please run it as root" + exit 1 + fi + + ## Do make build before installing (as non--root) in order to have the binary and then install crowdsec as root + log_info "installing crowdsec" + install_crowdsec + log_info "configuring ${CSCLI_BIN_INSTALLED}" + configure_cli + ${CSCLI_BIN_INSTALLED} update > /dev/null 2>&1 || (log_err "fail to update crowdsec hub. exiting" && exit 1) + + # detect running services + detect_services + if ! [ ${#DETECTED_SERVICES[@]} -gt 0 ] ; then + log_err "No detected or selected services, stopping." + exit 1 + fi; + + # Generate acquisition file and move it to the right folder + genacquisition + mv "${TMP_ACQUIS_FILE}" "${ACQUIS_TARGET}" + + # Install collections according to detected services + log_info "Installing needed collections ..." + install_collection + + # install patterns/ folder + log_info "Installing patterns" + mkdir -p "${PATTERNS_PATH}" + cp "./${PATTERNS_FOLDER}/"* "${PATTERNS_PATH}/" + + + # api register + ${CSCLI_BIN_INSTALLED} api register >> /etc/crowdsec/crowdsec/api.yaml || ${CSCLI_BIN_INSTALLED} api reset >> /etc/crowdsec/crowdsec/api.yaml || log_err "unable to register, skipping crowdsec api registration" + log_info "Crowdsec api registered" + + + (systemctl start crowdsec && log_info "crowdsec started") || log_err "unable to start crowdsec. exiting" + + ${CSCLI_BIN_INSTALLED} api pull + # Set the cscli api pull cronjob + setup_cron_pull + + return + fi + + if [[ "$1" == "detect" ]]; + then + rm -f "${TMP_ACQUIS_FILE}" + detect_services + if [[ ${DETECTED_SERVICES} == "" ]] ; then + log_err "No detected or selected services, stopping." + exit + fi; + log_info "Found ${#DETECTED_SERVICES[@]} supported services running:" + genacquisition + cat "${TMP_ACQUIS_FILE}" + rm "${TMP_ACQUIS_FILE}" + return + fi + +} + +usage() { + echo "Usage:" + echo " ./wizard.sh -h Display this help message." + echo " ./wizard.sh -d|--detect Detect running services and associated logs file" + echo " ./wizard.sh -i|--install Assisted installation of crowdsec/cscli and collections" + echo " ./wizard.sh --bininstall Install binaries and empty config, no wizard." + echo " ./wizard.sh --uninstall Uninstall crowdsec/cscli" + echo " ./wizard.sh --binupgrade Upgrade crowdsec/cscli binaries" + echo " ./wizard.sh --upgrade Perform a full upgrade and try to migrate configs" + echo " ./wizard.sh --unattended Install in unattended mode, no question will be asked and defaults will be followed" + echo " ./wizard.sh -r|--restore Restore saved configurations from ${BACKUP_DIR} to ${CROWDSEC_CONFIG_PATH}" + echo " ./wizard.sh -b|--backup Backup existing configurations to ${BACKUP_DIR}" + + exit 0 +} + +if [[ $# -eq 0 ]]; then +usage +fi + +while [[ $# -gt 0 ]] +do + key="${1}" + case ${key} in + --uninstall) + ACTION="uninstall" + shift #past argument + ;; + --binupgrade) + ACTION="binupgrade" + shift #past argument + ;; + --upgrade) + ACTION="upgrade" + shift #past argument + ;; + -i|--install) + ACTION="install" + shift # past argument + ;; + --bininstall) + ACTION="bininstall" + shift # past argument + ;; + -b|--backup) + ACTION="backup_to_dir" + shift # past argument + ;; + -r|--restore) + ACTION="restore_from_dir" + shift # past argument + ;; + -d|--detect) + ACTION="detect" + shift # past argument + ;; + --unattended) + SILENT="true" + ACTION="install" + shift + ;; + -v|--verbose) + DEBUG_MODE="true" + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) # unknown option + log_err "Unknown argument ${key}." + usage + exit 1 + ;; + esac +done + + +main ${ACTION}