fix conflict

This commit is contained in:
Thibault bui Koechlin 2020-07-15 11:14:45 +02:00
commit 1f8b375d9f
34 changed files with 882 additions and 128 deletions

View file

@ -35,6 +35,39 @@ One of the advantages of Crowdsec when compared to other solutions is its crowde
Besides detecting and stopping attacks in real time based on your logs, it allows you to preemptively block known bad actors from accessing your information system.
## Install it !
Find the [latest release](https://github.com/crowdsecurity/crowdsec/releases/latest)
Ensure you have dependencies :
<details open>
<summary>for Debian based distributions</summary>
```bash
apt-get install bash gettext whiptail curl wget
```
</details>
<details>
<summary>for RedHat based distributions</summary>
```bash
yum install bash gettext newt curl wget
```
</details>
```bash
curl -s https://api.github.com/repos/crowdsecurity/crowdsec/releases/latest | grep browser_download_url| cut -d '"' -f 4 | wget -i -
tar xvzf crowdsec-release.tgz
cd crowdsec-v*
sudo ./wizard.sh -i
```
## Key points
### Fast assisted installation, no technical barrier

View file

@ -102,7 +102,6 @@ func pullTOP() error {
if _, ok := item["scenario"]; !ok {
continue
}
item["scenario"] = fmt.Sprintf("api: %s", item["scenario"])
if _, ok := item["action"]; !ok {
continue

View file

@ -20,7 +20,11 @@ import (
var remediationType string
var atTime string
var all bool
//user supplied filters
var ipFilter, rangeFilter, reasonFilter, countryFilter, asFilter string
var displayLimit int
var displayAPI, displayALL bool
func simpleBanToSignal(targetIP string, reason string, expirationStr string, action string, asName string, asNum string, country string, banSource string) (types.SignalOccurence, error) {
var signalOcc types.SignalOccurence
@ -84,6 +88,102 @@ func simpleBanToSignal(targetIP string, reason string, expirationStr string, act
return signalOcc, nil
}
func filterBans(bans []map[string]string) ([]map[string]string, error) {
var retBans []map[string]string
for _, ban := range bans {
var banIP net.IP
var banRange *net.IPNet
var keep bool = true
var err error
if ban["iptext"] != "" {
if strings.Contains(ban["iptext"], "/") {
log.Debugf("%s is a range", ban["iptext"])
banIP, banRange, err = net.ParseCIDR(ban["iptext"])
if err != nil {
log.Warningf("failed to parse range '%s' from database : %s", ban["iptext"], err)
}
} else {
log.Debugf("%s is IP", ban["iptext"])
banIP = net.ParseIP(ban["iptext"])
}
}
if ipFilter != "" {
var filterBinIP net.IP = net.ParseIP(ipFilter)
if banRange != nil {
if banRange.Contains(filterBinIP) {
log.Debugf("[keep] ip filter is set, and range contains ip")
keep = true
} else {
log.Debugf("[discard] ip filter is set, and range doesn't contain ip")
keep = false
}
} else {
if ipFilter == ban["iptext"] {
log.Debugf("[keep] (ip) %s == %s", ipFilter, ban["iptext"])
keep = true
} else {
log.Debugf("[discard] (ip) %s == %s", ipFilter, ban["iptext"])
keep = false
}
}
}
if rangeFilter != "" {
_, filterBinRange, err := net.ParseCIDR(rangeFilter)
if err != nil {
return nil, fmt.Errorf("failed to parse range '%s' : %s", rangeFilter, err)
}
if filterBinRange.Contains(banIP) {
log.Debugf("[keep] range filter %s contains %s", rangeFilter, banIP.String())
keep = true
} else {
log.Debugf("[discard] range filter %s doesn't contain %s", rangeFilter, banIP.String())
keep = false
}
}
if reasonFilter != "" {
if strings.Contains(ban["reason"], reasonFilter) {
log.Debugf("[keep] reason filter %s matches %s", reasonFilter, ban["reason"])
keep = true
} else {
log.Debugf("[discard] reason filter %s doesn't match %s", reasonFilter, ban["reason"])
keep = false
}
}
if countryFilter != "" {
if ban["cn"] == countryFilter {
log.Debugf("[keep] country filter %s matches %s", countryFilter, ban["cn"])
keep = true
} else {
log.Debugf("[discard] country filter %s matches %s", countryFilter, ban["cn"])
keep = false
}
}
if asFilter != "" {
if strings.Contains(ban["as"], asFilter) {
log.Debugf("[keep] AS filter %s matches %s", asFilter, ban["as"])
keep = true
} else {
log.Debugf("[discard] AS filter %s doesn't match %s", asFilter, ban["as"])
keep = false
}
}
if keep {
retBans = append(retBans, ban)
} else {
log.Debugf("[discard] discard %v", ban)
}
}
return retBans, nil
}
func BanList() error {
at := time.Now()
if atTime != "" {
@ -96,6 +196,10 @@ func BanList() error {
if err != nil {
return fmt.Errorf("unable to get records from Database : %v", err)
}
ret, err = filterBans(ret)
if err != nil {
log.Errorf("Error while filtering : %s", err)
}
if config.output == "raw" {
fmt.Printf("source,ip,reason,bans,action,country,as,events_count,expiration\n")
for _, rm := range ret {
@ -113,10 +217,9 @@ func BanList() error {
table.SetHeader([]string{"Source", "Ip", "Reason", "Bans", "Action", "Country", "AS", "Events", "Expiration"})
dispcount := 0
totcount := 0
apicount := 0
for _, rm := range ret {
if !all && rm["source"] == "api" {
if !displayAPI && rm["source"] == "api" {
apicount++
if _, ok := uniqAS[rm["as"]]; !ok {
uniqAS[rm["as"]] = true
@ -124,27 +227,55 @@ func BanList() error {
if _, ok := uniqCN[rm["cn"]]; !ok {
uniqCN[rm["cn"]] = true
}
continue
}
if dispcount < 20 {
table.Append([]string{rm["source"], rm["iptext"], rm["reason"], rm["bancount"], rm["action"], rm["cn"], rm["as"], rm["events_count"], rm["until"]})
if displayALL {
if rm["source"] == "api" {
if displayAPI {
table.Append([]string{rm["source"], rm["iptext"], rm["reason"], rm["bancount"], rm["action"], rm["cn"], rm["as"], rm["events_count"], rm["until"]})
dispcount++
continue
}
} else {
table.Append([]string{rm["source"], rm["iptext"], rm["reason"], rm["bancount"], rm["action"], rm["cn"], rm["as"], rm["events_count"], rm["until"]})
dispcount++
continue
}
} else if dispcount < displayLimit {
if displayAPI {
if rm["source"] == "api" {
table.Append([]string{rm["source"], rm["iptext"], rm["reason"], rm["bancount"], rm["action"], rm["cn"], rm["as"], rm["events_count"], rm["until"]})
dispcount++
continue
}
} else {
if rm["source"] != "api" {
table.Append([]string{rm["source"], rm["iptext"], rm["reason"], rm["bancount"], rm["action"], rm["cn"], rm["as"], rm["events_count"], rm["until"]})
dispcount++
continue
}
}
}
totcount++
dispcount++
}
if dispcount > 0 {
if !all {
fmt.Printf("%d local decisions:\n", totcount)
if !displayAPI {
fmt.Printf("%d local decisions:\n", dispcount)
} else if displayAPI && !displayALL {
fmt.Printf("%d decision from API\n", dispcount)
} else if displayALL && displayAPI {
fmt.Printf("%d decision from crowdsec and API\n", dispcount)
}
table.Render() // Send output
if dispcount > 20 {
if dispcount > displayLimit && !displayALL {
fmt.Printf("Additional records stripped.\n")
}
} else {
fmt.Printf("No local decisions.\n")
if displayAPI {
fmt.Println("No API decisions")
} else {
fmt.Println("No local decisions")
}
}
if !all {
if !displayAPI {
fmt.Printf("And %d records from API, %d distinct AS, %d distinct countries\n", apicount, len(uniqAS), len(uniqCN))
}
}
@ -167,7 +298,7 @@ func BanAdd(target string, duration string, reason string, action string) error
if err != nil {
return err
}
log.Infof("Wrote ban to database.")
log.Infof("%s %s for %s (%s)", action, target, duration, reason)
return nil
}
@ -225,7 +356,11 @@ cscli ban add range 1.2.3.0/24 24h "the whole range"`,
Run: func(cmd *cobra.Command, args []string) {
reason := strings.Join(args[2:], " ")
if err := BanAdd(args[0], args[1], reason, remediationType); err != nil {
<<<<<<< HEAD
log.Fatalf("failed to add ban to database : %v", err)
=======
log.Fatalf("failed to add ban to sqlite : %v", err)
>>>>>>> master
}
},
}
@ -239,7 +374,11 @@ cscli ban add range 1.2.3.0/24 24h "the whole range"`,
Run: func(cmd *cobra.Command, args []string) {
reason := strings.Join(args[2:], " ")
if err := BanAdd(args[0], args[1], reason, remediationType); err != nil {
<<<<<<< HEAD
log.Fatalf("failed to add ban to database : %v", err)
=======
log.Fatalf("failed to add ban to sqlite : %v", err)
>>>>>>> master
}
},
}
@ -301,7 +440,8 @@ cscli ban del range 1.2.3.0/24`,
Short: "List local or api bans/remediations",
Long: `List the bans, by default only local decisions.
If --all/-a is specified, api-provided bans will be displayed too.
If --all/-a is specified, bans will be displayed without limit (--limit).
Default limit is 50.
Time can be specified with --at and support a variety of date formats:
- Jan 2 15:04:05
@ -312,6 +452,10 @@ Time can be specified with --at and support a variety of date formats:
- 2006-01-02
- 2006-01-02 15:04
`,
Example: `ban list --range 0.0.0.0/0 : will list all
ban list --country CN
ban list --reason crowdsecurity/http-probing
ban list --as OVH`,
Args: cobra.ExactArgs(0),
Run: func(cmd *cobra.Command, args []string) {
if err := BanList(); err != nil {
@ -320,7 +464,15 @@ Time can be specified with --at and support a variety of date formats:
},
}
cmdBanList.PersistentFlags().StringVar(&atTime, "at", "", "List bans at given time")
cmdBanList.PersistentFlags().BoolVarP(&all, "all", "a", false, "List as well bans received from API")
cmdBanList.PersistentFlags().BoolVarP(&displayALL, "all", "a", false, "List bans without limit")
cmdBanList.PersistentFlags().BoolVarP(&displayAPI, "api", "", false, "List as well bans received from API")
cmdBanList.PersistentFlags().StringVar(&ipFilter, "ip", "", "List bans for given IP")
cmdBanList.PersistentFlags().StringVar(&rangeFilter, "range", "", "List bans belonging to given range")
cmdBanList.PersistentFlags().StringVar(&reasonFilter, "reason", "", "List bans containing given reason")
cmdBanList.PersistentFlags().StringVar(&countryFilter, "country", "", "List bans belonging to given country code")
cmdBanList.PersistentFlags().StringVar(&asFilter, "as", "", "List bans belonging to given AS name")
cmdBanList.PersistentFlags().IntVar(&displayLimit, "limit", 50, "Limit of bans to display (default 50)")
cmdBan.AddCommand(cmdBanList)
return cmdBan
}

View file

@ -71,7 +71,7 @@ you should [update cscli](./cscli_update.md).
var cmdInstallParser = &cobra.Command{
Use: "parser [config]",
Short: "Install given log parser",
Short: "Install given parser",
Long: `Fetch and install given parser from hub`,
Example: `cscli install parser crowdsec/xxx`,
Args: cobra.MinimumNArgs(1),
@ -79,7 +79,9 @@ you should [update cscli](./cscli_update.md).
if err := cwhub.GetHubIdx(); err != nil {
log.Fatalf("failed to get Hub index : %v", err)
}
InstallItem(args[0], cwhub.PARSERS)
for _, name := range args {
InstallItem(name, cwhub.PARSERS)
}
},
}
cmdInstall.AddCommand(cmdInstallParser)
@ -93,7 +95,9 @@ you should [update cscli](./cscli_update.md).
if err := cwhub.GetHubIdx(); err != nil {
log.Fatalf("failed to get Hub index : %v", err)
}
InstallItem(args[0], cwhub.SCENARIOS)
for _, name := range args {
InstallItem(name, cwhub.SCENARIOS)
}
},
}
cmdInstall.AddCommand(cmdInstallScenario)
@ -108,7 +112,9 @@ you should [update cscli](./cscli_update.md).
if err := cwhub.GetHubIdx(); err != nil {
log.Fatalf("failed to get Hub index : %v", err)
}
InstallItem(args[0], cwhub.COLLECTIONS)
for _, name := range args {
InstallItem(name, cwhub.COLLECTIONS)
}
},
}
cmdInstall.AddCommand(cmdInstallCollection)
@ -124,7 +130,9 @@ As a reminder, postoverflows are parsing configuration that will occur after the
if err := cwhub.GetHubIdx(); err != nil {
log.Fatalf("failed to get Hub index : %v", err)
}
InstallItem(args[0], cwhub.PARSERS_OVFLW)
for _, name := range args {
InstallItem(name, cwhub.PARSERS_OVFLW)
}
},
}
cmdInstall.AddCommand(cmdInstallPostoverflow)

View file

@ -71,15 +71,13 @@ func NewRemoveCmd() *cobra.Command {
log.Fatalf("Failed to get Hub index : %v", err)
}
if remove_all && len(args) == 0 {
if remove_all {
RemoveMany(cwhub.PARSERS, "")
} else if len(args) == 1 {
RemoveMany(cwhub.PARSERS, args[0])
} else {
_ = cmd.Help()
return
for _, name := range args {
RemoveMany(cwhub.PARSERS, name)
}
}
//fmt.Println("remove/disable parser: " + strings.Join(args, " "))
},
}
cmdRemove.AddCommand(cmdRemoveParser)
@ -92,13 +90,12 @@ func NewRemoveCmd() *cobra.Command {
if err := cwhub.GetHubIdx(); err != nil {
log.Fatalf("Failed to get Hub index : %v", err)
}
if remove_all && len(args) == 0 {
if remove_all {
RemoveMany(cwhub.SCENARIOS, "")
} else if len(args) == 1 {
RemoveMany(cwhub.SCENARIOS, args[0])
} else {
_ = cmd.Help()
return
for _, name := range args {
RemoveMany(cwhub.SCENARIOS, name)
}
}
},
}
@ -112,13 +109,12 @@ func NewRemoveCmd() *cobra.Command {
if err := cwhub.GetHubIdx(); err != nil {
log.Fatalf("Failed to get Hub index : %v", err)
}
if remove_all && len(args) == 0 {
if remove_all {
RemoveMany(cwhub.COLLECTIONS, "")
} else if len(args) == 1 {
RemoveMany(cwhub.COLLECTIONS, args[0])
} else {
_ = cmd.Help()
return
for _, name := range args {
RemoveMany(cwhub.COLLECTIONS, name)
}
}
},
}
@ -133,13 +129,12 @@ func NewRemoveCmd() *cobra.Command {
if err := cwhub.GetHubIdx(); err != nil {
log.Fatalf("Failed to get Hub index : %v", err)
}
if remove_all && len(args) == 0 {
if remove_all {
RemoveMany(cwhub.PARSERS_OVFLW, "")
} else if len(args) == 1 {
RemoveMany(cwhub.PARSERS_OVFLW, args[0])
} else {
_ = cmd.Help()
return
for _, name := range args {
RemoveMany(cwhub.PARSERS_OVFLW, name)
}
}
},
}

View file

@ -124,14 +124,14 @@ cscli upgrade --force # Overwrite tainted configuration
if err := cwhub.GetHubIdx(); err != nil {
log.Fatalf("Failed to get Hub index : %v", err)
}
if len(args) == 1 {
UpgradeConfig(cwhub.PARSERS, args[0])
//UpgradeConfig(cwhub.PARSERS_OVFLW, "")
} else if upgrade_all {
if upgrade_all {
UpgradeConfig(cwhub.PARSERS, "")
} else {
_ = cmd.Help()
for _, name := range args {
UpgradeConfig(cwhub.PARSERS, name)
}
}
},
}
cmdUpgrade.AddCommand(cmdUpgradeParser)
@ -146,12 +146,12 @@ cscli upgrade --force # Overwrite tainted configuration
if err := cwhub.GetHubIdx(); err != nil {
log.Fatalf("Failed to get Hub index : %v", err)
}
if len(args) == 1 {
UpgradeConfig(cwhub.SCENARIOS, args[0])
} else if upgrade_all {
if upgrade_all {
UpgradeConfig(cwhub.SCENARIOS, "")
} else {
_ = cmd.Help()
for _, name := range args {
UpgradeConfig(cwhub.SCENARIOS, name)
}
}
},
}
@ -168,12 +168,12 @@ cscli upgrade --force # Overwrite tainted configuration
if err := cwhub.GetHubIdx(); err != nil {
log.Fatalf("Failed to get Hub index : %v", err)
}
if len(args) == 1 {
UpgradeConfig(cwhub.COLLECTIONS, args[0])
} else if upgrade_all {
if upgrade_all {
UpgradeConfig(cwhub.COLLECTIONS, "")
} else {
_ = cmd.Help()
for _, name := range args {
UpgradeConfig(cwhub.COLLECTIONS, name)
}
}
},
}
@ -191,12 +191,12 @@ cscli upgrade --force # Overwrite tainted configuration
if err := cwhub.GetHubIdx(); err != nil {
log.Fatalf("Failed to get Hub index : %v", err)
}
if len(args) == 1 {
UpgradeConfig(cwhub.PARSERS_OVFLW, args[0])
} else if upgrade_all {
if upgrade_all {
UpgradeConfig(cwhub.PARSERS_OVFLW, "")
} else {
_ = cmd.Help()
for _, name := range args {
UpgradeConfig(cwhub.PARSERS_OVFLW, name)
}
}
},
}

View file

@ -10,6 +10,7 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/acquisition"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
"github.com/crowdsecurity/crowdsec/pkg/outputs"
"github.com/crowdsecurity/crowdsec/pkg/parser"
@ -282,6 +283,11 @@ func main() {
go runTachymeter(cConfig.HTTPListen)
}
err = exprhelpers.Init()
if err != nil {
log.Fatalf("Failed to init expr helpers : %s", err)
}
// Start loading configs
if err := LoadParsers(cConfig); err != nil {
log.Fatalf("Failed to load parsers: %s", err)

View file

@ -1,6 +1,8 @@
package main
import (
"fmt"
log "github.com/sirupsen/logrus"
"time"
@ -40,6 +42,12 @@ LOOP:
input <- event
}
/* process post overflow parser nodes */
event, err := parser.Parse(poctx, event, ponodes)
if err != nil {
return fmt.Errorf("postoverflow failed : %s", err)
}
if event.Overflow.Scenario == "" && event.Overflow.MapKey != "" {
//log.Infof("Deleting expired entry %s", event.Overflow.MapKey)
buckets.Bucket_map.Delete(event.Overflow.MapKey)

View file

@ -0,0 +1,4 @@
name: sqlite
path: /usr/local/lib/crowdsec/plugins/backend/sqlite.so
config:
db_path: /var/lib/crowdsec/data/crowdsec.db

View file

@ -1,5 +1,5 @@
profile: default_remediation
filter: "sig.Labels.remediation == 'true'"
filter: "sig.Labels.remediation == 'true' && not sig.Whitelisted"
api: true # If no api: specified, will use the default config in default.yaml
remediation:
ban: true
@ -16,3 +16,11 @@ api: false
outputs:
- plugin: database # If we do not want to push, we can remove this line and the next one
store: false
---
profile: send_false_positif_to_API
filter: "sig.Whitelisted == true && sig.Labels.remediation == 'true'"
#remediation is empty, it means non taken
api: true
outputs:
- plugin: sqlite # If we do not want to push, we can remove this line and the next one
store: false

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

After

Width:  |  Height:  |  Size: 37 KiB

View file

@ -27,6 +27,14 @@ Besides detecting and stopping attacks in real time based on your logs, it allow
![Architecture](assets/images/crowdsec_architecture.png)
## Core concepts
{{crowdsec.name}} relies on {{parsers.htmlname}} to normalize and enrich logs, and {{scenarios.htmlname}} to detect attacks, often bundled together in {{collections.htmlname}} to form a coherent configuration set. For example the collection [`crowdsecurity/nginx`](https://hub.crowdsec.net/author/crowdsecurity/collections/nginx) contains all the necessary parsers and scenarios to deal with nginx logs and the common attacks that can be seen on http servers.
All of those are represented as YAML files, that can be found, shared and kept up-to-date thanks to the {{hub.htmlname}}, or [easily hand-crafted](/write_configurations/scenarios/) to address specific needs.
## Moving forward
To learn more about {{crowdsec.name}} and give it a try, please see :

View file

@ -151,10 +151,14 @@ It is meant to help understanding parser node behaviour by providing contextual
filter: expression
```
`filter` must be a valid {{expr.htmlname}} expression that will be evaluated against the {{event.name}}.
`filter` must be a valid {{expr.htmlname}} expression that will be evaluated against the {{event.htmlname}}.
If `filter` evaluation returns true or is absent, node will be processed.
If `filter` returns `false` or a non-boolean, node won't be processed.
Here is the [expr documentation](https://github.com/antonmedv/expr/tree/master/docs).
Examples :
- `filter: "evt.Meta.foo == 'test'"`
@ -278,6 +282,30 @@ statics:
expression: evt.Meta.target_field + ' this_is' + ' a dynamic expression'
```
### data
```
data:
- source_url: https://URL/TO/FILE
dest_file: LOCAL_FILENAME
[type: regexp]
```
`data` allows user to specify an external source of data.
This section is only relevant when `cscli` is used to install parser from hub, as it will download the `source_url` and store it to `dest_file`. When the parser is not installed from the hub, {{crowdsec.name}} won't download the URL, but the file must exist for the parser to be loaded correctly.
If `type` is set to `regexp`, the content of the file must be one valid (re2) regular expression per line.
Those regexps will be compiled and kept in cache.
```yaml
name: crowdsecurity/cdn-whitelist
...
data:
- source_url: https://www.cloudflare.com/ips-v4
dest_file: cloudflare_ips.txt
```
## Parser concepts

View file

@ -87,12 +87,16 @@ The name must be unique (and will define the scenario's name in the hub), and th
### filter
```yaml
filter: evt.Meta.log_type == 'telnet_new_session'
filter: expression
```
`filter` must be a valid {{expr.htmlname}} expression that will be evaluated against the {{event.htmlname}}.
an {{expr.htmlname}} that must return true if the event is eligible for the bucket.
If `filter` evaluation returns true or is absent, event will be pour in the bucket.
If `filter` returns `false` or a non-boolean, the event will be skip for this bucket.
Here is the [expr documentation](https://github.com/antonmedv/expr/tree/master/docs).
Examples :
@ -343,3 +347,28 @@ overflow_filter: any(queue.Queue, { .Enriched.IsInEU == "true" })
If this expression is present and returns false, the overflow will be discarded.
### data
```
data:
- source_url: https://URL/TO/FILE
dest_file: LOCAL_FILENAME
[type: regexp]
```
`data` allows user to specify an external source of data.
This section is only relevant when `cscli` is used to install scenario from hub, as ill download the `source_url` and store it to `dest_file`. When the scenario is not installed from the hub, {{crowdsec.name}} won't download the URL, but the file must exist for the scenario to be loaded correctly.
If `type` is set to `regexp`, the content of the file must be one valid (re2) regular expression per line.
Those regexps will be compiled and kept in cache.
```yaml
name: crowdsecurity/cdn-whitelist
...
data:
- source_url: https://www.cloudflare.com/ips-v4
dest_file: cloudflare_ips.txt
```

View file

@ -0,0 +1,52 @@
# Expressions
> {{expr.htmlname}} : Expression evaluation engine for Go: fast, non-Turing complete, dynamic typing, static typing
Several places of {{crowdsec.name}}'s configuration use {{expr.htmlname}} :
- {{filter.Htmlname}} that are used to determine events eligibility in {{parsers.htmlname}} and {{scenarios.htmlname}} or `profiles`
- {{statics.Htmlname}} use expr in the `expression` directive, to compute complex values
- {{whitelists.Htmlname}} rely on `expression` directive to allow more complex whitelists filters
To learn more about {{expr.htmlname}}, [check the github page of the project](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md).
In order to makes its use in {{crowdsec.name}} more efficient, we added a few helpers that are documented bellow.
## Atof(string) float64
Parses a string representation of a float number to an actual float number (binding on `strconv.ParseFloat`)
> Atof(evt.Parsed.tcp_port)
## JsonExtract(JsonBlob, FieldName) string
Extract the `FieldName` from the `JsonBlob` and returns it as a string. (binding on [jsonparser](https://github.com/buger/jsonparser/))
> JsonExtract(evt.Parsed.some_json_blob, "foo.bar[0].one_item")
## File(FileName) []string
Returns the content of `FileName` as an array of string, while providing cache mechanism.
> evt.Parsed.some_field in File('some_patterns.txt')
> any(File('rdns_seo_bots.txt'), { evt.Enriched.reverse_dns endsWith #})
## RegexpInFile(StringToMatch, FileName) bool
Returns `true` if the `StringToMatch` is matched by one of the expressions contained in `FileName` (uses RE2 regexp engine).
> RegexpInFile( evt.Enriched.reverse_dns, 'my_legit_seo_whitelists.txt')
## Upper(string) string
Returns the uppercase version of the string
> Upper("yop")
## IpInRange(IPStr, RangeStr) bool
Returns true if the IP `IPStr` is contained in the IP range `RangeStr` (uses `net.ParseCIDR`)
> IpInRange("1.2.3.4", "1.2.3.0/24")

View file

@ -3,6 +3,12 @@
!!! info
Please ensure that you have working env or setup test environment before writing your parser.
!!! warning "Parser dependency"
The crowdsecurity/syslog-logs parsers is needed by the core parsing
engine. Deletion or modification of this could result of {{crowdsec.name}}
being unable to parse logs, so this should be done very carefully.
> In the current example, we'll write a parser for the logs produced by `iptables` (netfilter) with the `-j LOG` target.
> This document aims at detailing the process of writing and testing new parsers.
@ -410,4 +416,4 @@ statics:
- meta: http_path
expression: "evt.Parsed.request"
```
</details> -->
</details> -->

View file

@ -1,15 +1,28 @@
## Where are whitelists
# What are whitelists
Whitelists are, as for most configuration, YAML files, and allow you to "discard" signals based on :
Whitelists are special parsers that allow you to "discard" events, and can exist at two different steps :
- ip adress or the fact that it belongs to a specific range
- a {{expr.name}} expression
- *Parser whitelists* : Allows you to discard an event at parse time, so that it never hits the buckets.
- *PostOverflow whitelists* : Those are whitelists that are checked *after* the overflow happens. It is usually best for whitelisting process that can be expensive (such as performing reverse DNS on an IP, or performing a `whois` of an IP).
Here is an example :
!!! info
While the whitelists are the same for parser or postoverflows, beware that field names might change.
Source ip is usually in `evt.Meta.source_ip` when it's a log, but `evt.Overflow.Source_ip` when it's an overflow
The whitelist can be based on several criteria :
- specific ip address : if the event/overflow IP is the same, event is whitelisted
- ip ranges : if the event/overflow IP belongs to this range, event is whitelisted
- a list of {{expr.htmlname}} expressions : if any expression returns true, event is whitelisted
Here is an example showcasing configuration :
```yaml
name: crowdsecurity/my-whitelists
description: "Whitelist events from my ipv4 addresses"
#it's a normal parser, so we can restrict its scope with filter
filter: "1 == 1"
whitelist:
reason: "my ipv4 ranges"
ip:
@ -19,67 +32,75 @@ whitelist:
- "10.0.0.0/8"
- "172.16.0.0/12"
expression:
- "'mycorp.com' in evt.Meta.source_ip_rdns"
#beware, this one will work *only* if you enabled the reverse dns (crowdsecurity/rdns) enrichment postoverflow parser
- evt.Enriched.reverse_dns endsWith ".mycoolorg.com."
#this one will work *only* if you enabled the geoip (crowdsecurity/geoip-enrich) enrichment parser
- evt.Enriched.IsoCode == 'FR'
```
## Hands on
Let's assume we have a setup with a `crowdsecurity/base-http-scenarios` scenario enabled and no whitelists.
# Whitelists in parsing
When a whitelist is present in parsing `/etc/crowdsec/config/parsers/...`, it will be checked/discarded before being poured to any bucket. These whitelists intentionally generate no logs and are useful to discard noisy false positive sources.
## Whitelist by ip
Let's assume we have a setup with a `crowdsecurity/nginx` collection enabled and no whitelists.
Thus, if I "attack" myself :
```bash
nikto -host 127.0.0.1
nikto -host myfqdn.com
```
my own IP will be flagged as being an attacker :
```bash
$ tail -f /var/log/crowdsec.log
time="07-05-2020 09:23:03" level=warning msg="127.0.0.1 triggered a 4h0m0s ip ban remediation for [crowdsecurity/http-scan-uniques_404]" bucket_id=old-surf event_time="2020-05-07 09:23:03.322277347 +0200 CEST m=+57172.732939890" scenario=crowdsecurity/http-scan-uniques_404 source_ip=127.0.0.1
time="07-05-2020 09:23:03" level=warning msg="127.0.0.1 triggered a 4h0m0s ip ban remediation for [crowdsecurity/http-crawl-non_statics]" bucket_id=lingering-sun event_time="2020-05-07 09:23:03.345341864 +0200 CEST m=+57172.756004380" scenario=crowdsecurity/http-crawl-non_statics source_ip=127.0.0.1
ime="07-07-2020 16:13:16" level=warning msg="80.x.x.x triggered a 4h0m0s ip ban remediation for [crowdsecurity/http-bad-user-agent]" bucket_id=cool-smoke event_time="2020-07-07 16:13:16.579581642 +0200 CEST m=+358819.413561109" scenario=crowdsecurity/http-bad-user-agent source_ip=80.x.x.x
time="07-07-2020 16:13:16" level=warning msg="80.x.x.x triggered a 4h0m0s ip ban remediation for [crowdsecurity/http-probing]" bucket_id=green-silence event_time="2020-07-07 16:13:16.737579458 +0200 CEST m=+358819.571558901" scenario=crowdsecurity/http-probing source_ip=80.x.x.x
time="07-07-2020 16:13:17" level=warning msg="80.x.x.x triggered a 4h0m0s ip ban remediation for [crowdsecurity/http-crawl-non_statics]" bucket_id=purple-snowflake event_time="2020-07-07 16:13:17.353641625 +0200 CEST m=+358820.187621068" scenario=crowdsecurity/http-crawl-non_statics source_ip=80.x.x.x
time="07-07-2020 16:13:18" level=warning msg="80.x.x.x triggered a 4h0m0s ip ban remediation for [crowdsecurity/http-sensitive-files]" bucket_id=small-hill event_time="2020-07-07 16:13:18.005919055 +0200 CEST m=+358820.839898498" scenario=crowdsecurity/http-sensitive-files source_ip=80.x.x.x
^C
$ {{cli.bin}} ban list
1 local decisions:
+--------+-----------+-------------------------------------+------+--------+---------+----+--------+------------+
| SOURCE | IP | REASON | BANS | ACTION | COUNTRY | AS | EVENTS | EXPIRATION |
+--------+-----------+-------------------------------------+------+--------+---------+----+--------+------------+
| local | 127.0.0.1 | crowdsecurity/http-scan-uniques_404 | 2 | ban | | 0 | 47 | 3h55m57s |
+--------+-----------+-------------------------------------+------+--------+---------+----+--------+------------+
4 local decisions:
+--------+---------------+-----------------------------------+------+--------+---------+---------------------------+--------+------------+
| SOURCE | IP | REASON | BANS | ACTION | COUNTRY | AS | EVENTS | EXPIRATION |
+--------+---------------+-----------------------------------+------+--------+---------+---------------------------+--------+------------+
| local | 80.x.x.x | crowdsecurity/http-bad-user-agent | 4 | ban | FR | 21502 SFR SA | 60 | 3h59m3s |
...
```
## Create the whitelist by IP
Let's create a `/etc/crowdsec/crowdsec/parsers/s02-enrich/whitelists.yaml` file with the following content :
### Create the whitelist by IP
Let's create a `/etc/crowdsec/crowdsec/parsers/s02-enrich/mywhitelists.yaml` file with the following content :
```yaml
name: crowdsecurity/whitelists
description: "Whitelist events from private ipv4 addresses"
description: "Whitelist events from my ip addresses"
whitelist:
reason: "private ipv4 ranges"
ip:
- "127.0.0.1"
reason: "my ip ranges"
ip:
- "80.x.x.x"
```
and restart {{crowdsec.name}} : `sudo systemctl restart {{crowdsec.name}}`
and reload {{crowdsec.name}} : `sudo systemctl restart crowdsec`
## Test the whitelist
### Test the whitelist
Thus, if we restart our attack :
```bash
nikto -host 127.0.0.1
nikto -host myfqdn.com
```
And we don't get bans, instead :
And we don't get bans :
```bash
$ tail -f /var/log/crowdsec.log
...
time="07-05-2020 09:30:13" level=info msg="Event from [127.0.0.1] is whitelisted by Ips !" filter= name=lively-firefly stage=s02-enrich
...
^C
$ {{cli.bin}} ban list
No local decisions.
@ -87,11 +108,12 @@ And 21 records from API, 15 distinct AS, 12 distinct countries
```
Here, we don't get *any* logs, as the event have been discarded at parsing time.
## Create whitelist by expression
Now, let's make something more tricky : let's whitelist a **specific** user-agent (of course, it's just an example, don't do this at home !).
Now, let's make something more tricky : let's whitelist a **specific** user-agent (of course, it's just an example, don't do this at home !). The [hub's taxonomy](https://hub.crowdsec.net/fields) will helps us to find which data is present in which field.
Let's change our whitelist to :
@ -109,7 +131,7 @@ again, let's restart {{crowdsec.name}} !
For the record, I edited nikto's configuration to use 'MySecretUserAgent' as user-agent, and thus :
```bash
nikto -host 127.0.0.1
nikto -host myfqdn.com
```
```bash
@ -120,3 +142,43 @@ time="07-05-2020 09:39:09" level=info msg="Event is whitelisted by Expr !" filte
```
# Whitelist in PostOverflows
Whitelists in PostOverflows are applied *after* the bucket overflow happens.
It has the advantage of being triggered only once we are about to take decision about an IP or Range, and thus happens a lot less often.
A good example is the [crowdsecurity/whitelist-good-actors](https://hub.crowdsec.net/author/crowdsecurity/collections/whitelist-good-actors) collection.
But let's craft ours based on our previous example !
First of all, install the [crowdsecurity/rdns postoverflow](https://hub.crowdsec.net/author/crowdsecurity/configurations/rdns) : it will be in charge of enriching overflows with reverse dns information of the offending IP.
Let's put the following file in `/etc/crowdsec/config/postoverflows/s01-whitelists/mywhitelists.yaml` :
```yaml
name: me/my_cool_whitelist
description: lets whitelist our own reverse dns
whitelist:
reason: dont ban my ISP
expression:
#this is the reverse of my ip, you can get it by performing a "host" command on your public IP for example
- evt.Enriched.reverse_dns endsWith '.asnieres.rev.numericable.fr.'
```
After reloading {{crowdsec.name}}, and launching (again!) nikto :
```bash
nikto -host myfqdn.com
```
```bash
$ tail -f /var/log/crowdsec.log
ime="07-07-2020 17:11:09" level=info msg="Ban for 80.x.x.x whitelisted, reason [dont ban my ISP]" id=cold-sunset name=me/my_cool_whitelist stage=s01
time="07-07-2020 17:11:09" level=info msg="node warning : no remediation" bucket_id=blue-cloud event_time="2020-07-07 17:11:09.175068053 +0200 CEST m=+2308.040825320" scenario=crowdsecurity/http-probing source_ip=80.x.x.x
time="07-07-2020 17:11:09" level=info msg="Processing Overflow with no decisions 80.x.x.x performed 'crowdsecurity/http-probing' (11 events over 313.983994ms) at 2020-07-07 17:11:09.175068053 +0200 CEST m=+2308.040825320" bucket_id=blue-cloud event_time="2020-07-07 17:11:09.175068053 +0200 CEST m=+2308.040825320" scenario=crowdsecurity/http-probing source_ip=80.x.x.x
...
```
This time, we can see that logs are being produced when the event is discarded.

View file

@ -17,6 +17,7 @@ nav:
- Cheat Sheets:
- Ban Management: cheat_sheets/ban-mgmt.md
- Configuration Management: cheat_sheets/config-mgmt.md
- Hub's taxonomy: https://hub.crowdsec.net/fields
- Observability:
- Overview: observability/overview.md
- Logs: observability/logs.md
@ -31,7 +32,8 @@ nav:
- Acquisition: write_configurations/acquisition.md
- Parsers: write_configurations/parsers.md
- Scenarios: write_configurations/scenarios.md
- Whitelist: write_configurations/whitelist.md
- Whitelists: write_configurations/whitelist.md
- Expressions: write_configurations/expressions.md
- Blockers:
- Overview : blockers/index.md
- Nginx:
@ -204,6 +206,11 @@ extra:
Name: Overflow
htmlname: "[overflow](/getting_started/glossary/#overflow-or-signaloccurence)"
Htmlname: "[Overflow](/getting_started/glossary/#overflow-or-signaloccurence)"
whitelists:
name: whitelists
Name: Whitelists
htmlname: "[whitelists](/write_configurations/whitelist/)"
Htmlname: "[Whitelists](/write_configurations/whitelist/)"
signal:
name: signal
Name: Signal

View file

@ -813,9 +813,6 @@ func HubStatus(itype string, name string, list_all bool) []map[string]string {
log.Errorf("type %s doesn't exist", itype)
return nil
}
if list_all {
log.Printf("only enabled ones")
}
var mli []map[string]string
/*remember, you do it for the user :)*/

View file

@ -5,6 +5,7 @@ import (
"testing"
"github.com/antonmedv/expr"
"github.com/stretchr/testify/require"
"gotest.tools/assert"
)
@ -113,3 +114,22 @@ func TestFile(t *testing.T) {
assert.Equal(t, test.result, result)
}
}
func TestIpInRange(t *testing.T) {
env := map[string]interface{}{
"ip": "192.168.0.1",
"ipRange": "192.168.0.0/24",
"IpInRange": IpInRange,
}
code := "IpInRange(ip, ipRange)"
log.Printf("Running filter : %s", code)
program, err := expr.Compile(code, expr.Env(env))
require.NoError(t, err)
output, err := expr.Run(program, env)
require.NoError(t, err)
require.Equal(t, true, output)
}

View file

@ -3,6 +3,7 @@ package exprhelpers
import (
"bufio"
"fmt"
"net"
"os"
"path"
"regexp"
@ -36,6 +37,7 @@ func GetExprEnv(ctx map[string]interface{}) map[string]interface{} {
"File": File,
"RegexpInFile": RegexpInFile,
"Upper": Upper,
"IpInRange": IpInRange,
}
for k, v := range ctx {
ExprLib[k] = v
@ -50,6 +52,7 @@ func Init() error {
}
func FileInit(fileFolder string, filename string, fileType string) error {
log.Debugf("init (folder:%s) (file:%s) (type:%s)", fileFolder, filename, fileType)
filepath := path.Join(fileFolder, filename)
file, err := os.Open(filepath)
if err != nil {
@ -65,6 +68,9 @@ func FileInit(fileFolder string, filename string, fileType string) error {
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
if strings.HasPrefix(scanner.Text(), "#") { // allow comments
continue
}
switch fileType {
case "regex", "regexp":
dataFileRegex[filename] = append(dataFileRegex[filename], regexp.MustCompile(scanner.Text()))
@ -85,7 +91,7 @@ func File(filename string) []string {
if _, ok := dataFile[filename]; ok {
return dataFile[filename]
}
log.Errorf("file '%s' not found for expr library", filename)
log.Errorf("file '%s' (type:string) not found in expr library", filename)
return []string{}
}
@ -97,7 +103,27 @@ func RegexpInFile(data string, filename string) bool {
}
}
} else {
log.Errorf("file '%s' not found for expr library", filename)
log.Errorf("file '%s' (type:regexp) not found in expr library", filename)
}
return false
}
func IpInRange(ip string, ipRange string) bool {
var err error
var ipParsed net.IP
var ipRangeParsed *net.IPNet
ipParsed = net.ParseIP(ip)
if ipParsed == nil {
log.Errorf("'%s' is not a valid IP", ip)
return false
}
if _, ipRangeParsed, err = net.ParseCIDR(ipRange); err != nil {
log.Errorf("'%s' is not a valid IP Range", ipRange)
return false
}
if ipRangeParsed.Contains(ipParsed) {
return true
}
return false
}

View file

@ -10,6 +10,7 @@ import (
"testing"
"time"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/parser"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/davecgh/go-spew/spew"
@ -25,6 +26,10 @@ type TestFile struct {
func TestBucket(t *testing.T) {
var envSetting = os.Getenv("TEST_ONLY")
err := exprhelpers.Init()
if err != nil {
log.Fatalf("exprhelpers init failed: %s", err)
}
if envSetting != "" {
if err := testOneBucket(t, envSetting); err != nil {

View file

@ -112,10 +112,6 @@ func LoadBuckets(files []string, dataFolder string) ([]BucketFactory, chan types
)
var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano())
err := exprhelpers.Init()
if err != nil {
return nil, nil, err
}
response = make(chan types.Event, 1)
for _, f := range files {

View file

@ -176,7 +176,7 @@ func (o *Output) ProcessOutput(sig types.SignalOccurence, profiles []types.Profi
return err
}
if warn != nil {
logger.Infof("node warning : %s", warn)
logger.Debugf("node warning : %s", warn)
}
if ordr != nil {
bans, err := types.OrderToApplications(ordr)

View file

@ -18,7 +18,7 @@ func reverse_dns(field string, p *types.Event, ctx interface{}) (map[string]stri
}
rets, err := net.LookupAddr(field)
if err != nil {
log.Infof("failed to resolve '%s'", field)
log.Debugf("failed to resolve '%s'", field)
return nil, nil
}
//When using the host C library resolver, at most one result will be returned. To bypass the host resolver, use a custom Resolver.

View file

@ -137,14 +137,15 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
NodeState = true
clog.Debugf("eval(TRUE) '%s'", n.Filter)
} else {
clog.Tracef("Node has not filter, enter")
clog.Debugf("Node has not filter, enter")
NodeState = true
}
if n.Name != "" {
NodesHits.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
}
set := false
isWhitelisted := false
hasWhitelist := false
var src net.IP
/*overflow and log don't hold the source ip in the same field, should be changed */
/* perform whitelist checks for ips, cidr accordingly */
@ -160,24 +161,28 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
if v.Equal(src) {
clog.Debugf("Event from [%s] is whitelisted by Ips !", src)
p.Whitelisted = true
set = true
isWhitelisted = true
} else {
clog.Debugf("whitelist: %s is not eq [%s]", src, v)
}
hasWhitelist = true
}
for _, v := range n.Whitelist.B_Cidrs {
if v.Contains(src) {
clog.Debugf("Event from [%s] is whitelisted by Cidrs !", src)
p.Whitelisted = true
set = true
isWhitelisted = true
} else {
clog.Debugf("whitelist: %s not in [%s]", src, v)
}
hasWhitelist = true
}
} else {
clog.Debugf("no ip in event, cidr/ip whitelists not checked")
}
/* run whitelist expression tests anyway */
for _, e := range n.Whitelist.B_Exprs {
for eidx, e := range n.Whitelist.B_Exprs {
output, err := expr.Run(e, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p}))
if err != nil {
clog.Warningf("failed to run whitelist expr : %v", err)
@ -190,11 +195,14 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
if out {
clog.Debugf("Event is whitelisted by Expr !")
p.Whitelisted = true
set = true
isWhitelisted = true
}
hasWhitelist = true
default:
log.Errorf("unexpected type %t (%v) while running '%s'", output, output, n.Whitelist.Exprs[eidx])
}
}
if set {
if isWhitelisted {
p.WhiteListReason = n.Whitelist.Reason
/*huglily wipe the ban order if the event is whitelisted and it's an overflow */
if p.Type == types.OVFLW { /*don't do this at home kids */
@ -202,6 +210,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
//Break this for now. Souldn't have been done this way, but that's not taht serious
/*only display logs when we discard ban to avoid spam*/
clog.Infof("Ban for %s whitelisted, reason [%s]", p.Overflow.Source.Ip.String(), n.Whitelist.Reason)
p.Overflow.Whitelisted = true
}
}
@ -295,9 +304,9 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
if n.Name != "" {
NodesHitsOk.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
}
if len(n.Statics) > 0 {
if hasWhitelist && isWhitelisted && len(n.Statics) > 0 || len(n.Statics) > 0 && !hasWhitelist {
clog.Debugf("+ Processing %d statics", len(n.Statics))
// if all else is good, process node's statics
// if all else is good in whitelist, process node's statics
err := ProcessStatics(n.Statics, p, clog)
if err != nil {
clog.Fatalf("Failed to process statics : %v", err)

View file

@ -10,6 +10,7 @@ import (
"strings"
"testing"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/davecgh/go-spew/spew"
log "github.com/sirupsen/logrus"
@ -139,6 +140,10 @@ func testOneParser(pctx *UnixParserCtx, dir string, b *testing.B) error {
func prepTests() (*UnixParserCtx, error) {
var pctx *UnixParserCtx
var p UnixParser
err := exprhelpers.Init()
if err != nil {
log.Fatalf("exprhelpers init failed: %s", err)
}
//Load enrichment
datadir := "../../data/"

View file

@ -43,10 +43,6 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx) ([]Node, error) {
tmpstages := make(map[string]bool)
pctx.Stages = []string{}
err := exprhelpers.Init()
if err != nil {
return nil, err
}
for _, stageFile := range stageFiles {
if !strings.HasSuffix(stageFile.Filename, ".yaml") {
log.Warningf("skip non yaml : %s", stageFile.Filename)

View file

@ -9,3 +9,6 @@ whitelist:
- "1.2.3.0/24"
expression:
- "'supertoken1234' == evt.Enriched.test_token"
statics:
- meta: statics
value: success

View file

@ -3,41 +3,51 @@ lines:
- Meta:
test: test1
source_ip: 8.8.8.8
statics: toto
- Meta:
test: test2
source_ip: 1.2.3.4
statics: toto
- Meta:
test: test3
source_ip: 2.2.3.4
statics: toto
- Meta:
test: test4
source_ip: 8.8.8.9
statics: toto
- Enriched:
test_token: supertoken1234
Meta:
test: test5
statics: toto
#these are the results we expect from the parser
results:
- Whitelisted: true
Process: true
Meta:
test: test1
statics: success
- Whitelisted: true
Process: true
Meta:
test: test2
statics: success
- Whitelisted: false
Process: true
Meta:
test: test3
statics: toto
- Whitelisted: false
Process: true
Meta:
test: test4
statics: toto
- Whitelisted: true
Process: true
Meta:
test: test5
statics: success

190
pkg/sqlite/commit.go Normal file
View file

@ -0,0 +1,190 @@
package sqlite
import (
"fmt"
"sync/atomic"
"time"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
func (c *Context) DeleteExpired() error {
//Delete the expired records
if c.flush {
retx := c.Db.Where(`strftime("%s", until) < strftime("%s", "now")`).Delete(types.BanApplication{})
if retx.RowsAffected > 0 {
log.Infof("Flushed %d expired entries from Ban Application", retx.RowsAffected)
}
}
return nil
}
func (c *Context) Flush() error {
c.lock.Lock()
defer c.lock.Unlock()
ret := c.tx.Commit()
if ret.Error != nil {
c.tx = c.Db.Begin()
return fmt.Errorf("failed to commit records : %v", ret.Error)
}
c.tx = c.Db.Begin()
c.lastCommit = time.Now()
return nil
}
func (c *Context) CleanUpRecordsByAge() error {
//let's fetch all expired records that are more than XX days olds
sos := []types.BanApplication{}
if c.maxDurationRetention == 0 {
return nil
}
//look for soft-deleted events that are OLDER than maxDurationRetention
ret := c.Db.Unscoped().Table("ban_applications").Where("deleted_at is not NULL").
Where(fmt.Sprintf("deleted_at > date('now','-%d minutes')", int(c.maxDurationRetention.Minutes()))).
Order("updated_at desc").Find(&sos)
if ret.Error != nil {
return errors.Wrap(ret.Error, "failed to get count of old records")
}
//no events elligible
if len(sos) == 0 || ret.RowsAffected == 0 {
log.Debugf("no event older than %s", c.maxDurationRetention.String())
return nil
}
//let's do it in a single transaction
delTx := c.Db.Unscoped().Begin()
delRecords := 0
for _, record := range sos {
copy := record
delTx.Unscoped().Table("signal_occurences").Where("ID = ?", copy.SignalOccurenceID).Delete(&types.SignalOccurence{})
delTx.Unscoped().Table("event_sequences").Where("signal_occurence_id = ?", copy.SignalOccurenceID).Delete(&types.EventSequence{})
delTx.Unscoped().Table("ban_applications").Delete(&copy)
//we need to delete associations : event_sequences, signal_occurences
delRecords++
}
ret = delTx.Unscoped().Commit()
if ret.Error != nil {
return errors.Wrap(ret.Error, "failed to delete records")
}
log.Printf("max_records_age: deleting %d events (max age:%s)", delRecords, c.maxDurationRetention)
return nil
}
func (c *Context) CleanUpRecordsByCount() error {
var count int
if c.maxEventRetention <= 0 {
return nil
}
ret := c.Db.Unscoped().Table("ban_applications").Order("updated_at desc").Count(&count)
if ret.Error != nil {
return errors.Wrap(ret.Error, "failed to get bans count")
}
if count < c.maxEventRetention {
log.Debugf("%d < %d, don't cleanup", count, c.maxEventRetention)
return nil
}
sos := []types.BanApplication{}
/*get soft deleted records oldest to youngest*/
records := c.Db.Unscoped().Table("ban_applications").Where("deleted_at is not NULL").Where(`strftime("%s", deleted_at) < strftime("%s", "now")`).Find(&sos)
if records.Error != nil {
return errors.Wrap(records.Error, "failed to list expired bans for flush")
}
//let's do it in a single transaction
delTx := c.Db.Unscoped().Begin()
delRecords := 0
for _, ld := range sos {
copy := ld
delTx.Unscoped().Table("signal_occurences").Where("ID = ?", copy.SignalOccurenceID).Delete(&types.SignalOccurence{})
delTx.Unscoped().Table("event_sequences").Where("signal_occurence_id = ?", copy.SignalOccurenceID).Delete(&types.EventSequence{})
delTx.Unscoped().Table("ban_applications").Delete(&copy)
//we need to delete associations : event_sequences, signal_occurences
delRecords++
//let's delete as well the associated event_sequence
if count-delRecords <= c.maxEventRetention {
break
}
}
if len(sos) > 0 {
//log.Printf("Deleting %d soft-deleted results out of %d total events (%d soft-deleted)", delRecords, count, len(sos))
log.Printf("max_records: deleting %d events. (%d soft-deleted)", delRecords, len(sos))
ret = delTx.Unscoped().Commit()
if ret.Error != nil {
return errors.Wrap(ret.Error, "failed to delete records")
}
} else {
log.Debugf("didn't find any record to clean")
}
return nil
}
func (c *Context) StartAutoCommit() error {
//TBD : we shouldn't start auto-commit if we are in cli mode ?
c.PusherTomb.Go(func() error {
c.autoCommit()
return nil
})
return nil
}
func (c *Context) autoCommit() {
log.Debugf("starting autocommit")
ticker := time.NewTicker(200 * time.Millisecond)
cleanUpTicker := time.NewTicker(1 * time.Minute)
expireTicker := time.NewTicker(1 * time.Second)
if !c.flush {
log.Debugf("flush is disabled")
}
for {
select {
case <-c.PusherTomb.Dying():
//we need to shutdown
log.Infof("sqlite routine shutdown")
if err := c.Flush(); err != nil {
log.Errorf("error while flushing records: %s", err)
}
if ret := c.tx.Commit(); ret.Error != nil {
log.Errorf("failed to commit records : %v", ret.Error)
}
if err := c.tx.Close(); err != nil {
log.Errorf("error while closing tx : %s", err)
}
if err := c.Db.Close(); err != nil {
log.Errorf("error while closing db : %s", err)
}
return
case <-expireTicker.C:
if err := c.DeleteExpired(); err != nil {
log.Errorf("Error while deleting expired records: %s", err)
}
case <-ticker.C:
if atomic.LoadInt32(&c.count) != 0 &&
(atomic.LoadInt32(&c.count)%100 == 0 || time.Since(c.lastCommit) >= 500*time.Millisecond) {
if err := c.Flush(); err != nil {
log.Errorf("failed to flush : %s", err)
}
}
case <-cleanUpTicker.C:
if err := c.CleanUpRecordsByCount(); err != nil {
log.Errorf("error in max records cleanup : %s", err)
}
if err := c.CleanUpRecordsByAge(); err != nil {
log.Errorf("error in old records cleanup : %s", err)
}
}
}
}

88
pkg/sqlite/sqlite.go Normal file
View file

@ -0,0 +1,88 @@
package sqlite
import (
"fmt"
"strconv"
"sync"
"time"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/sqlite"
_ "github.com/mattn/go-sqlite3"
"gopkg.in/tomb.v2"
)
type Context struct {
Db *gorm.DB //Pointer to sqlite db
tx *gorm.DB //Pointer to current transaction (flushed on a regular basis)
lastCommit time.Time
flush bool
count int32
lock sync.Mutex //booboo
PusherTomb tomb.Tomb
//to manage auto cleanup : max number of records *or* oldest
maxEventRetention int
maxDurationRetention time.Duration
}
func NewSQLite(cfg map[string]string) (*Context, error) {
var err error
c := &Context{}
if v, ok := cfg["max_records"]; ok {
c.maxEventRetention, err = strconv.Atoi(v)
if err != nil {
log.Errorf("Ignoring invalid max_records '%s' : %s", v, err)
}
}
if v, ok := cfg["max_records_age"]; ok {
c.maxDurationRetention, err = time.ParseDuration(v)
if err != nil {
log.Errorf("Ignoring invalid duration '%s' : %s", v, err)
}
}
if _, ok := cfg["db_path"]; !ok {
return nil, fmt.Errorf("please specify a 'db_path' to SQLite db in the configuration")
}
if cfg["db_path"] == "" {
return nil, fmt.Errorf("please specify a 'db_path' to SQLite db in the configuration")
}
log.Debugf("Starting SQLite backend, path:%s", cfg["db_path"])
c.Db, err = gorm.Open("sqlite3", cfg["db_path"]+"?_busy_timeout=1000")
if err != nil {
return nil, fmt.Errorf("failed to open %s : %s", cfg["db_path"], err)
}
if val, ok := cfg["debug"]; ok && val == "true" {
log.Infof("Enabling debug for sqlite")
c.Db.LogMode(true)
}
c.flush, err = strconv.ParseBool(cfg["flush"])
if err != nil {
return nil, errors.Wrap(err, "Unable to parse 'flush' flag")
}
// Migrate the schema
c.Db.AutoMigrate(&types.EventSequence{}, &types.SignalOccurence{}, &types.BanApplication{})
c.Db.Model(&types.SignalOccurence{}).Related(&types.EventSequence{})
c.Db.Model(&types.SignalOccurence{}).Related(&types.BanApplication{})
c.tx = c.Db.Begin()
c.lastCommit = time.Now()
ret := c.tx.Commit()
if ret.Error != nil {
return nil, fmt.Errorf("failed to commit records : %v", ret.Error)
}
c.tx = c.Db.Begin()
if c.tx == nil {
return nil, fmt.Errorf("failed to begin sqlite transac : %s", err)
}
return c, nil
}

View file

@ -35,9 +35,9 @@ type SignalOccurence struct {
Dest_ip string `json:"dst_ip,omitempty"` //for now just the destination IP
//Policy string `json:"policy,omitempty"` //for now we forward it as well :)
//bucket info
Capacity int `json:"capacity,omitempty"`
Leak_speed time.Duration `json:"leak_speed,omitempty"`
Reprocess bool //Reprocess, when true, will make the overflow being processed again as a fresh log would
Labels map[string]string `gorm:"-"`
Capacity int `json:"capacity,omitempty"`
Leak_speed time.Duration `json:"leak_speed,omitempty"`
Whitelisted bool `gorm:"-"`
Reprocess bool //Reprocess, when true, will make the overflow being processed again as a fresh log would
Labels map[string]string `gorm:"-"`
}

View file

@ -315,6 +315,8 @@ update_full() {
log_info "Backing up existing configuration"
${CSCLI_BIN_INSTALLED} backup save ${BACKUP_DIR}
log_info "Saving default database content"
cp /var/lib/crowdsec/data/crowdsec.db ${BACKUP_DIR}/crowdsec.db
log_info "Cleanup existing crowdsec configuration"
uninstall_crowdsec
log_info "Installing crowdsec"
@ -322,6 +324,8 @@ update_full() {
log_info "Restoring configuration"
${CSCLI_BIN_INSTALLED} update
${CSCLI_BIN_INSTALLED} backup restore ${BACKUP_DIR}
log_info "Restoring saved database"
cp ${BACKUP_DIR}/crowdsec.db /var/lib/crowdsec/data/crowdsec.db
log_info "Finished, restarting"
systemctl restart crowdsec || log_err "Failed to restart crowdsec"
}