Add crowdsec reload + cscli metrics minor improvements (#79)
This commit is contained in:
parent
bb60d29ac8
commit
5446857377
80 changed files with 559 additions and 4413 deletions
|
@ -199,7 +199,7 @@ func extractMetabaseDB(buf *bytes.Reader) error {
|
|||
|
||||
func resetMetabasePassword(newpassword string) error {
|
||||
|
||||
httpctx := sling.New().Base(metabaseURI).Set("User-Agent", fmt.Sprintf("CrowdWatch/%s", cwversion.VersionStr()))
|
||||
httpctx := sling.New().Base(metabaseURI).Set("User-Agent", fmt.Sprintf("Crowdsec/%s", cwversion.VersionStr()))
|
||||
|
||||
log.Printf("Waiting for metabase API to be up (can take up to a minute)")
|
||||
for {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -18,6 +19,39 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func metricsToTable(table *tablewriter.Table, stats map[string]map[string]int, keys []string) error {
|
||||
|
||||
var sortedKeys []string
|
||||
|
||||
if table == nil {
|
||||
return fmt.Errorf("nil table")
|
||||
}
|
||||
//sort keys to keep consistent order when printing
|
||||
sortedKeys = []string{}
|
||||
for akey := range stats {
|
||||
sortedKeys = append(sortedKeys, akey)
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
//
|
||||
for _, alabel := range sortedKeys {
|
||||
astats, ok := stats[alabel]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
row := []string{}
|
||||
row = append(row, alabel) //name
|
||||
for _, sl := range keys {
|
||||
if v, ok := astats[sl]; ok && v != 0 {
|
||||
row = append(row, fmt.Sprintf("%d", v))
|
||||
} else {
|
||||
row = append(row, "-")
|
||||
}
|
||||
}
|
||||
table.Append(row)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*This is a complete rip from prom2json*/
|
||||
func ShowPrometheus(url string) {
|
||||
mfChan := make(chan *dto.MetricFamily, 1024)
|
||||
|
@ -55,11 +89,11 @@ func ShowPrometheus(url string) {
|
|||
metric := m.(prom2json.Metric)
|
||||
name, ok := metric.Labels["name"]
|
||||
if !ok {
|
||||
log.Debugf("no name in Metric")
|
||||
log.Debugf("no name in Metric %v", metric.Labels)
|
||||
}
|
||||
source, ok := metric.Labels["source"]
|
||||
if !ok {
|
||||
log.Debugf("no source in Metric")
|
||||
log.Debugf("no source in Metric %v", metric.Labels)
|
||||
}
|
||||
value := m.(prom2json.Metric).Value
|
||||
fval, err := strconv.ParseFloat(value, 32)
|
||||
|
@ -74,6 +108,11 @@ func ShowPrometheus(url string) {
|
|||
buckets_stats[name] = make(map[string]int)
|
||||
}
|
||||
buckets_stats[name]["instanciation"] += ival
|
||||
case "cs_bucket_count":
|
||||
if _, ok := buckets_stats[name]; !ok {
|
||||
buckets_stats[name] = make(map[string]int)
|
||||
}
|
||||
buckets_stats[name]["curr_count"] += ival
|
||||
case "cs_bucket_overflow":
|
||||
if _, ok := buckets_stats[name]; !ok {
|
||||
buckets_stats[name] = make(map[string]int)
|
||||
|
@ -126,72 +165,33 @@ func ShowPrometheus(url string) {
|
|||
}
|
||||
}
|
||||
if config.output == "human" {
|
||||
atable := tablewriter.NewWriter(os.Stdout)
|
||||
atable.SetHeader([]string{"Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket"})
|
||||
for alabel, astats := range acquis_stats {
|
||||
|
||||
if alabel == "" {
|
||||
continue
|
||||
}
|
||||
row := []string{}
|
||||
row = append(row, alabel) //name
|
||||
for _, sl := range []string{"reads", "parsed", "unparsed", "pour"} {
|
||||
if v, ok := astats[sl]; ok {
|
||||
row = append(row, fmt.Sprintf("%d", v))
|
||||
} else {
|
||||
row = append(row, "-")
|
||||
}
|
||||
}
|
||||
atable.Append(row)
|
||||
acquisTable := tablewriter.NewWriter(os.Stdout)
|
||||
acquisTable.SetHeader([]string{"Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket"})
|
||||
keys := []string{"reads", "parsed", "unparsed", "pour"}
|
||||
if err := metricsToTable(acquisTable, acquis_stats, keys); err != nil {
|
||||
log.Warningf("while collecting acquis stats : %s", err)
|
||||
}
|
||||
btable := tablewriter.NewWriter(os.Stdout)
|
||||
btable.SetHeader([]string{"Bucket", "Overflows", "Instanciated", "Poured", "Expired"})
|
||||
for blabel, bstats := range buckets_stats {
|
||||
if blabel == "" {
|
||||
continue
|
||||
}
|
||||
row := []string{}
|
||||
row = append(row, blabel) //name
|
||||
for _, sl := range []string{"overflow", "instanciation", "pour", "underflow"} {
|
||||
if v, ok := bstats[sl]; ok {
|
||||
row = append(row, fmt.Sprintf("%d", v))
|
||||
} else {
|
||||
row = append(row, "-")
|
||||
}
|
||||
}
|
||||
btable.Append(row)
|
||||
bucketsTable := tablewriter.NewWriter(os.Stdout)
|
||||
bucketsTable.SetHeader([]string{"Bucket", "Current Count", "Overflows", "Instanciated", "Poured", "Expired"})
|
||||
keys = []string{"curr_count", "overflow", "instanciation", "pour", "underflow"}
|
||||
if err := metricsToTable(bucketsTable, buckets_stats, keys); err != nil {
|
||||
log.Warningf("while collecting acquis stats : %s", err)
|
||||
}
|
||||
ptable := tablewriter.NewWriter(os.Stdout)
|
||||
ptable.SetHeader([]string{"Parsers", "Hits", "Parsed", "Unparsed"})
|
||||
for plabel, pstats := range parsers_stats {
|
||||
if plabel == "" {
|
||||
continue
|
||||
}
|
||||
row := []string{}
|
||||
row = append(row, plabel) //name
|
||||
hits := 0
|
||||
parsed := 0
|
||||
for _, sl := range []string{"hits", "parsed"} {
|
||||
if v, ok := pstats[sl]; ok {
|
||||
row = append(row, fmt.Sprintf("%d", v))
|
||||
if sl == "hits" {
|
||||
hits = v
|
||||
} else if sl == "parsed" {
|
||||
parsed = v
|
||||
}
|
||||
} else {
|
||||
row = append(row, "-")
|
||||
}
|
||||
}
|
||||
row = append(row, fmt.Sprintf("%d", hits-parsed))
|
||||
ptable.Append(row)
|
||||
|
||||
parsersTable := tablewriter.NewWriter(os.Stdout)
|
||||
parsersTable.SetHeader([]string{"Parsers", "Hits", "Parsed", "Unparsed"})
|
||||
keys = []string{"hits", "parsed", "unparsed"}
|
||||
if err := metricsToTable(parsersTable, parsers_stats, keys); err != nil {
|
||||
log.Warningf("while collecting acquis stats : %s", err)
|
||||
}
|
||||
|
||||
log.Printf("Buckets Metrics:")
|
||||
btable.Render() // Send output
|
||||
bucketsTable.Render()
|
||||
log.Printf("Acquisition Metrics:")
|
||||
atable.Render() // Send output
|
||||
acquisTable.Render()
|
||||
log.Printf("Parser Metrics:")
|
||||
ptable.Render() // Send output
|
||||
parsersTable.Render()
|
||||
} else if config.output == "json" {
|
||||
for _, val := range []map[string]map[string]int{acquis_stats, parsers_stats, buckets_stats} {
|
||||
x, err := json.MarshalIndent(val, "", " ")
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"io/ioutil"
|
||||
"fmt"
|
||||
"syscall"
|
||||
|
||||
_ "net/http/pprof"
|
||||
"time"
|
||||
|
@ -15,11 +14,11 @@ import (
|
|||
"github.com/crowdsecurity/crowdsec/pkg/outputs"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/parser"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
"github.com/sevlyar/go-daemon"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"gopkg.in/tomb.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -28,70 +27,42 @@ var (
|
|||
parsersTomb tomb.Tomb
|
||||
bucketsTomb tomb.Tomb
|
||||
outputsTomb tomb.Tomb
|
||||
|
||||
holders []leaky.BucketFactory
|
||||
buckets *leaky.Buckets
|
||||
/*global crowdsec config*/
|
||||
cConfig *csconfig.CrowdSec
|
||||
|
||||
/*the state of acquisition*/
|
||||
acquisitionCTX *acquisition.FileAcquisCtx
|
||||
/*the state of the buckets*/
|
||||
holders []leaky.BucketFactory
|
||||
buckets *leaky.Buckets
|
||||
outputEventChan chan types.Event //the buckets init returns its own chan that is used for multiplexing
|
||||
/*the state of outputs*/
|
||||
OutputRunner *outputs.Output
|
||||
outputProfiles []types.Profile
|
||||
/*the state of the parsers*/
|
||||
parserCTX *parser.UnixParserCtx
|
||||
postOverflowCTX *parser.UnixParserCtx
|
||||
parserNodes []parser.Node
|
||||
postOverflowNodes []parser.Node
|
||||
/*settings*/
|
||||
lastProcessedItem time.Time /*keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them.*/
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
err error
|
||||
p parser.UnixParser
|
||||
parserNodes []parser.Node = make([]parser.Node, 0)
|
||||
postOverflowNodes []parser.Node = make([]parser.Node, 0)
|
||||
nbParser int = 1
|
||||
parserCTX *parser.UnixParserCtx
|
||||
postOverflowCTX *parser.UnixParserCtx
|
||||
acquisitionCTX *acquisition.FileAcquisCtx
|
||||
CustomParsers []parser.Stagefile
|
||||
CustomPostoverflows []parser.Stagefile
|
||||
CustomScenarios []parser.Stagefile
|
||||
outputEventChan chan types.Event
|
||||
)
|
||||
func LoadParsers(cConfig *csconfig.CrowdSec) error {
|
||||
var p parser.UnixParser
|
||||
var err error
|
||||
|
||||
inputLineChan := make(chan types.Event)
|
||||
inputEventChan := make(chan types.Event)
|
||||
|
||||
cConfig = csconfig.NewCrowdSecConfig()
|
||||
|
||||
// Handle command line arguments
|
||||
if err := cConfig.GetOPT(); err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
if err = types.SetDefaultLoggerConfig(cConfig.LogMode, cConfig.LogFolder, cConfig.LogLevel); err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
log.Infof("Crowdwatch %s", cwversion.VersionStr())
|
||||
|
||||
if cConfig.Prometheus {
|
||||
registerPrometheus()
|
||||
cConfig.Profiling = true
|
||||
}
|
||||
parserNodes = make([]parser.Node, 0)
|
||||
postOverflowNodes = make([]parser.Node, 0)
|
||||
|
||||
log.Infof("Loading grok library")
|
||||
/* load base regexps for two grok parsers */
|
||||
parserCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/"), "data": cConfig.DataFolder})
|
||||
if err != nil {
|
||||
log.Errorf("failed to initialize parser : %v", err)
|
||||
return
|
||||
return fmt.Errorf("failed to load parser patterns : %v", err)
|
||||
}
|
||||
postOverflowCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/"), "data": cConfig.DataFolder})
|
||||
if err != nil {
|
||||
log.Errorf("failed to initialize postoverflow : %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
/*enable profiling*/
|
||||
if cConfig.Profiling {
|
||||
go runTachymeter(cConfig.HTTPListen)
|
||||
parserCTX.Profiling = true
|
||||
postOverflowCTX.Profiling = true
|
||||
return fmt.Errorf("failed to load postovflw parser patterns : %v", err)
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -100,92 +71,37 @@ func main() {
|
|||
log.Infof("Loading enrich plugins")
|
||||
parserPlugins, err := parser.Loadplugin(cConfig.DataFolder)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to load plugin geoip : %v", err)
|
||||
return fmt.Errorf("Failed to load enrich plugin : %v", err)
|
||||
}
|
||||
parser.ECTX = append(parser.ECTX, parserPlugins)
|
||||
parser.ECTX = []parser.EnricherCtx{parserPlugins}
|
||||
|
||||
/*parser the validatormode option if present. mostly used for testing purposes*/
|
||||
if cConfig.ValidatorMode != "" {
|
||||
//beurk : provided 'parser:file.yaml,postoverflow:file.yaml,scenario:file.yaml load only those
|
||||
validators := strings.Split(cConfig.ValidatorMode, ",")
|
||||
for _, val := range validators {
|
||||
splittedValidator := strings.Split(val, ":")
|
||||
if len(splittedValidator) != 2 {
|
||||
log.Fatalf("parser:file,scenario:file,postoverflow:file")
|
||||
}
|
||||
/*
|
||||
Load the actual parsers
|
||||
*/
|
||||
|
||||
configType := splittedValidator[0]
|
||||
configFile := splittedValidator[1]
|
||||
log.Infof("Loading parsers")
|
||||
parserNodes, err = parser.LoadStageDir(cConfig.ConfigFolder+"/parsers/", parserCTX)
|
||||
|
||||
var parsedFile []parser.Stagefile
|
||||
dataFile, err := ioutil.ReadFile(configFile)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("failed opening %s : %s", configFile, err)
|
||||
}
|
||||
if err := yaml.UnmarshalStrict(dataFile, &parsedFile); err != nil {
|
||||
log.Fatalf("failed unmarshalling %s : %s", configFile, err)
|
||||
}
|
||||
switch configType {
|
||||
case "parser":
|
||||
CustomParsers = parsedFile
|
||||
case "scenario":
|
||||
CustomScenarios = parsedFile
|
||||
case "postoverflow":
|
||||
CustomPostoverflows = parsedFile
|
||||
default:
|
||||
log.Fatalf("wrong type, format is parser:file,scenario:file,postoverflow:file")
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/* load the parser nodes */
|
||||
if cConfig.ValidatorMode != "" && len(CustomParsers) > 0 {
|
||||
log.Infof("Loading (validatormode) parsers")
|
||||
parserNodes, err = parser.LoadStages(CustomParsers, parserCTX)
|
||||
} else {
|
||||
log.Infof("Loading parsers")
|
||||
parserNodes, err = parser.LoadStageDir(cConfig.ConfigFolder+"/parsers/", parserCTX)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("failed to load parser config : %v", err)
|
||||
return fmt.Errorf("failed to load parser config : %v", err)
|
||||
}
|
||||
/* parsers loaded */
|
||||
|
||||
/* load the post-overflow stages*/
|
||||
if cConfig.ValidatorMode != "" && len(CustomPostoverflows) > 0 {
|
||||
log.Infof("Loading (validatormode) postoverflow parsers")
|
||||
postOverflowNodes, err = parser.LoadStages(CustomPostoverflows, postOverflowCTX)
|
||||
} else {
|
||||
log.Infof("Loading postoverflow parsers")
|
||||
postOverflowNodes, err = parser.LoadStageDir(cConfig.ConfigFolder+"/postoverflows/", postOverflowCTX)
|
||||
}
|
||||
log.Infof("Loading postoverflow parsers")
|
||||
postOverflowNodes, err = parser.LoadStageDir(cConfig.ConfigFolder+"/postoverflows/", postOverflowCTX)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("failed to load postoverflow config : %v", err)
|
||||
return fmt.Errorf("failed to load postoverflow config : %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Loaded Nodes : %d parser, %d postoverflow", len(parserNodes), len(postOverflowNodes))
|
||||
/* post overflow loaded */
|
||||
|
||||
/* Loading buckets / scenarios */
|
||||
if cConfig.ValidatorMode != "" && len(CustomScenarios) > 0 {
|
||||
log.Infof("Loading (validatormode) scenarios")
|
||||
bucketFiles := []string{}
|
||||
for _, scenarios := range CustomScenarios {
|
||||
bucketFiles = append(bucketFiles, scenarios.Filename)
|
||||
}
|
||||
holders, outputEventChan, err = leaky.LoadBuckets(bucketFiles, cConfig.DataFolder)
|
||||
|
||||
} else {
|
||||
log.Infof("Loading scenarios")
|
||||
holders, outputEventChan, err = leaky.Init(map[string]string{"patterns": cConfig.ConfigFolder + "/scenarios/", "data": cConfig.DataFolder})
|
||||
if cConfig.Profiling {
|
||||
parserCTX.Profiling = true
|
||||
postOverflowCTX.Profiling = true
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Scenario loading failed : %v", err)
|
||||
}
|
||||
/* buckets/scenarios loaded */
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetEnabledScenarios() string {
|
||||
/*keep track of scenarios name for consensus profiling*/
|
||||
var scenariosEnabled string
|
||||
for _, x := range holders {
|
||||
|
@ -194,39 +110,50 @@ func main() {
|
|||
}
|
||||
scenariosEnabled += x.Name
|
||||
}
|
||||
return scenariosEnabled
|
||||
}
|
||||
|
||||
func LoadBuckets(cConfig *csconfig.CrowdSec) error {
|
||||
|
||||
var err error
|
||||
|
||||
log.Infof("Loading scenarios")
|
||||
holders, outputEventChan, err = leaky.Init(map[string]string{"patterns": cConfig.ConfigFolder + "/scenarios/", "data": cConfig.DataFolder})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Scenario loading failed : %v", err)
|
||||
}
|
||||
buckets = leaky.NewBuckets()
|
||||
|
||||
/*restore as well previous state if present*/
|
||||
if cConfig.RestoreMode != "" {
|
||||
log.Warningf("Restoring buckets state from %s", cConfig.RestoreMode)
|
||||
if err := leaky.LoadBucketsState(cConfig.RestoreMode, buckets, holders); err != nil {
|
||||
log.Fatalf("unable to restore buckets : %s", err)
|
||||
return fmt.Errorf("unable to restore buckets : %s", err)
|
||||
}
|
||||
}
|
||||
if cConfig.Profiling {
|
||||
//force the profiling in all buckets
|
||||
for holderIndex := range holders {
|
||||
holders[holderIndex].Profiling = true
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func LoadOutputs(cConfig *csconfig.CrowdSec) error {
|
||||
var err error
|
||||
/*
|
||||
Load output profiles
|
||||
*/
|
||||
log.Infof("Loading output profiles")
|
||||
outputProfiles, err := outputs.LoadOutputProfiles(cConfig.ConfigFolder + "/profiles.yaml")
|
||||
outputProfiles, err = outputs.LoadOutputProfiles(cConfig.ConfigFolder + "/profiles.yaml")
|
||||
if err != nil || len(outputProfiles) == 0 {
|
||||
log.Fatalf("Failed to load output profiles : %v", err)
|
||||
}
|
||||
/* Linting is done */
|
||||
if cConfig.Linter {
|
||||
return
|
||||
return fmt.Errorf("Failed to load output profiles : %v", err)
|
||||
}
|
||||
|
||||
outputRunner, err := outputs.NewOutput(cConfig.OutputConfig, cConfig.Daemonize)
|
||||
OutputRunner, err = outputs.NewOutput(cConfig.OutputConfig, cConfig.Daemonize)
|
||||
if err != nil {
|
||||
log.Fatalf("output plugins initialization error : %s", err.Error())
|
||||
return fmt.Errorf("output plugins initialization error : %s", err.Error())
|
||||
}
|
||||
|
||||
/* Init the API connector */
|
||||
|
@ -234,14 +161,143 @@ func main() {
|
|||
log.Infof("Loading API client")
|
||||
var apiConfig = map[string]string{
|
||||
"path": cConfig.ConfigFolder + "/api.yaml",
|
||||
"profile": scenariosEnabled,
|
||||
"profile": GetEnabledScenarios(),
|
||||
}
|
||||
if err := outputRunner.InitAPI(apiConfig); err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
if err := OutputRunner.InitAPI(apiConfig); err != nil {
|
||||
return fmt.Errorf("failed to load api : %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func LoadAcquisition(cConfig *csconfig.CrowdSec) error {
|
||||
var err error
|
||||
//Init the acqusition : from cli or from acquis.yaml file
|
||||
acquisitionCTX, err = acquisition.LoadAcquisitionConfig(cConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to start acquisition : %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func StartProcessingRoutines(cConfig *csconfig.CrowdSec) (chan types.Event, error) {
|
||||
|
||||
acquisTomb = tomb.Tomb{}
|
||||
parsersTomb = tomb.Tomb{}
|
||||
bucketsTomb = tomb.Tomb{}
|
||||
outputsTomb = tomb.Tomb{}
|
||||
|
||||
inputLineChan := make(chan types.Event)
|
||||
inputEventChan := make(chan types.Event)
|
||||
|
||||
//start go-routines for parsing, buckets pour and ouputs.
|
||||
for i := 0; i < cConfig.NbParsers; i++ {
|
||||
parsersTomb.Go(func() error {
|
||||
err := runParse(inputLineChan, inputEventChan, *parserCTX, parserNodes)
|
||||
if err != nil {
|
||||
log.Errorf("runParse error : %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
for i := 0; i < cConfig.NbParsers; i++ {
|
||||
bucketsTomb.Go(func() error {
|
||||
err := runPour(inputEventChan, holders, buckets)
|
||||
if err != nil {
|
||||
log.Errorf("runPour error : %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
for i := 0; i < cConfig.NbParsers; i++ {
|
||||
outputsTomb.Go(func() error {
|
||||
err := runOutput(inputEventChan, outputEventChan, holders, buckets, *postOverflowCTX, postOverflowNodes, outputProfiles, OutputRunner)
|
||||
if err != nil {
|
||||
log.Errorf("runPour error : %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return inputLineChan, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
err error
|
||||
)
|
||||
|
||||
cConfig = csconfig.NewCrowdSecConfig()
|
||||
|
||||
// Handle command line arguments
|
||||
if err := cConfig.GetOPT(); err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
// Configure logging
|
||||
if err = types.SetDefaultLoggerConfig(cConfig.LogMode, cConfig.LogFolder, cConfig.LogLevel); err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
daemonCTX := &daemon.Context{
|
||||
PidFileName: cConfig.PIDFolder + "/crowdsec.pid",
|
||||
PidFilePerm: 0644,
|
||||
WorkDir: "./",
|
||||
Umask: 027,
|
||||
}
|
||||
if cConfig.Daemonize {
|
||||
daemon.SetSigHandler(termHandler, syscall.SIGTERM)
|
||||
daemon.SetSigHandler(reloadHandler, syscall.SIGHUP)
|
||||
daemon.SetSigHandler(debugHandler, syscall.SIGUSR1)
|
||||
|
||||
d, err := daemonCTX.Reborn()
|
||||
if err != nil {
|
||||
log.Fatalf("unable to run daemon: %s ", err.Error())
|
||||
}
|
||||
if d != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
/*if the user is in "single file mode" (might be writting scenario or parsers), allow loading **without** parsers or scenarios */
|
||||
log.Infof("Crowdsec %s", cwversion.VersionStr())
|
||||
|
||||
// Enable profiling early
|
||||
if cConfig.Prometheus {
|
||||
registerPrometheus()
|
||||
cConfig.Profiling = true
|
||||
}
|
||||
if cConfig.Profiling {
|
||||
go runTachymeter(cConfig.HTTPListen)
|
||||
}
|
||||
|
||||
// Start loading configs
|
||||
if err := LoadParsers(cConfig); err != nil {
|
||||
log.Fatalf("Failed to load parsers: %s", err)
|
||||
}
|
||||
|
||||
if err := LoadBuckets(cConfig); err != nil {
|
||||
log.Fatalf("Failed to load scenarios: %s", err)
|
||||
|
||||
}
|
||||
|
||||
if err := LoadOutputs(cConfig); err != nil {
|
||||
log.Fatalf("failed to initialize outputs : %s", err)
|
||||
}
|
||||
|
||||
if err := LoadAcquisition(cConfig); err != nil {
|
||||
log.Fatalf("Error while loading acquisition config : %s", err)
|
||||
}
|
||||
|
||||
/* if it's just linting, we're done */
|
||||
if cConfig.Linter {
|
||||
return
|
||||
}
|
||||
|
||||
/*if the user is in "single file mode" (might be writting scenario or parsers),
|
||||
allow loading **without** parsers or scenarios */
|
||||
if cConfig.SingleFile == "" {
|
||||
if len(parserNodes) == 0 {
|
||||
log.Fatalf("no parser(s) loaded, abort.")
|
||||
|
@ -256,53 +312,29 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
//Start the background routines that comunicate via chan
|
||||
log.Infof("Starting processing routines")
|
||||
//start go-routines for parsing, buckets pour and ouputs.
|
||||
for i := 0; i < nbParser; i++ {
|
||||
parsersTomb.Go(func() error {
|
||||
err := runParse(inputLineChan, inputEventChan, *parserCTX, parserNodes)
|
||||
if err != nil {
|
||||
log.Errorf("runParse error : %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
for i := 0; i < nbParser; i++ {
|
||||
bucketsTomb.Go(func() error {
|
||||
err := runPour(inputEventChan, holders, buckets)
|
||||
if err != nil {
|
||||
log.Errorf("runPour error : %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
for i := 0; i < nbParser; i++ {
|
||||
outputsTomb.Go(func() error {
|
||||
err := runOutput(inputEventChan, outputEventChan, holders, buckets, *postOverflowCTX, postOverflowNodes, outputProfiles, outputRunner)
|
||||
if err != nil {
|
||||
log.Errorf("runPour error : %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
inputLineChan, err := StartProcessingRoutines(cConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start processing routines : %s", err)
|
||||
}
|
||||
|
||||
//Fire!
|
||||
log.Warningf("Starting processing data")
|
||||
|
||||
//Init the acqusition : from cli or from acquis.yaml file
|
||||
acquisitionCTX, err = acquisition.LoadAcquisitionConfig(cConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to start acquisition : %s", err)
|
||||
}
|
||||
//start reading in the background
|
||||
acquisition.AcquisStartReading(acquisitionCTX, inputLineChan, &acquisTomb)
|
||||
|
||||
if err = serve(*outputRunner); err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
if !cConfig.Daemonize {
|
||||
if err = serveOneTimeRun(*OutputRunner); err != nil {
|
||||
log.Errorf(err.Error())
|
||||
} else {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
defer daemonCTX.Release() //nolint:errcheck // won't bother checking this error in defer statement
|
||||
err = daemon.ServeSignals()
|
||||
if err != nil {
|
||||
log.Fatalf("serveDaemon error : %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -67,12 +67,15 @@ var globalBucketPourOk = prometheus.NewCounter(
|
|||
)
|
||||
|
||||
func dumpMetrics() {
|
||||
var tmpFile string
|
||||
var err error
|
||||
|
||||
if cConfig.DumpBuckets {
|
||||
log.Infof("!! Dumping buckets state")
|
||||
if err := leaky.DumpBucketsStateAt("buckets_state.json", time.Now(), buckets); err != nil {
|
||||
if tmpFile, err = leaky.DumpBucketsStateAt(time.Now(), buckets); err != nil {
|
||||
log.Fatalf("Failed dumping bucket state : %s", err)
|
||||
}
|
||||
log.Infof("Buckets state dumped to %s", tmpFile)
|
||||
}
|
||||
|
||||
if cConfig.Profiling {
|
||||
|
@ -117,8 +120,9 @@ func runTachymeter(HTTPListen string) {
|
|||
func registerPrometheus() {
|
||||
/*Registering prometheus*/
|
||||
log.Warningf("Loading prometheus collectors")
|
||||
prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo, parser.NodesHits, parser.NodesHitsOk,
|
||||
parser.NodesHitsKo, acquisition.ReaderHits, leaky.BucketsPour, leaky.BucketsUnderflow, leaky.BucketsInstanciation,
|
||||
leaky.BucketsOverflow)
|
||||
prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo,
|
||||
parser.NodesHits, parser.NodesHitsOk, parser.NodesHitsKo,
|
||||
acquisition.ReaderHits,
|
||||
leaky.BucketsPour, leaky.BucketsUnderflow, leaky.BucketsInstanciation, leaky.BucketsOverflow, leaky.BucketsCurrentCount)
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
}
|
||||
|
|
|
@ -21,9 +21,14 @@ func runOutput(input chan types.Event, overflow chan types.Event, holders []leak
|
|||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-bucketsTomb.Dying():
|
||||
log.Infof("Exiting output processing")
|
||||
case <-outputsTomb.Dying():
|
||||
log.Infof("Flushing outputs")
|
||||
output.FlushAll()
|
||||
log.Debugf("Shuting down output routines")
|
||||
if err := output.Shutdown(); err != nil {
|
||||
log.Errorf("error while in output shutdown: %s", err)
|
||||
}
|
||||
log.Infof("Done shutdown down output")
|
||||
break LOOP
|
||||
case event := <-overflow:
|
||||
if cConfig.Profiling {
|
||||
|
|
|
@ -34,9 +34,9 @@ LOOP:
|
|||
}
|
||||
if cConfig.Profiling {
|
||||
atomic.AddUint64(&linesReadOK, 1)
|
||||
globalParserHits.With(prometheus.Labels{"source": event.Line.Src}).Inc()
|
||||
|
||||
}
|
||||
globalParserHits.With(prometheus.Labels{"source": event.Line.Src}).Inc()
|
||||
|
||||
/* parse the log using magic */
|
||||
parsed, error := parser.Parse(parserCTX, event, nodes)
|
||||
if error != nil {
|
||||
|
@ -45,17 +45,17 @@ LOOP:
|
|||
}
|
||||
if !parsed.Process {
|
||||
if cConfig.Profiling {
|
||||
globalParserHitsKo.With(prometheus.Labels{"source": event.Line.Src}).Inc()
|
||||
atomic.AddUint64(&linesParsedKO, 1)
|
||||
}
|
||||
globalParserHitsKo.With(prometheus.Labels{"source": event.Line.Src}).Inc()
|
||||
log.Debugf("Discarding line %+v", parsed)
|
||||
discardCPT++
|
||||
continue
|
||||
}
|
||||
if cConfig.Profiling {
|
||||
globalParserHitsOk.With(prometheus.Labels{"source": event.Line.Src}).Inc()
|
||||
atomic.AddUint64(&linesParsedOK, 1)
|
||||
}
|
||||
globalParserHitsOk.With(prometheus.Labels{"source": event.Line.Src}).Inc()
|
||||
processCPT++
|
||||
if parsed.Whitelisted {
|
||||
log.Debugf("event whitelisted, discard")
|
||||
|
|
|
@ -20,7 +20,8 @@ LOOP:
|
|||
//bucket is now ready
|
||||
select {
|
||||
case <-bucketsTomb.Dying():
|
||||
log.Infof("Exiting Bucketify")
|
||||
log.Infof("Exiting pour routine")
|
||||
|
||||
break LOOP
|
||||
case parsed := <-input:
|
||||
count++
|
||||
|
|
|
@ -1,78 +1,131 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/acquisition"
|
||||
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/outputs"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/sevlyar/go-daemon"
|
||||
)
|
||||
|
||||
func reloadHandler(sig os.Signal) error {
|
||||
dumpMetrics()
|
||||
//debugHandler is kept as a dev convenience : it shuts down and serialize internal state
|
||||
func debugHandler(sig os.Signal) error {
|
||||
var tmpFile string
|
||||
var err error
|
||||
//stop go routines
|
||||
if err := ShutdownRoutines(); err != nil {
|
||||
log.Warningf("Failed to shut down routines: %s", err)
|
||||
}
|
||||
//todo : properly stop acquis with the tail readers
|
||||
if tmpFile, err = leaky.DumpBucketsStateAt(time.Now(), buckets); err != nil {
|
||||
log.Warningf("Failed dumping bucket state : %s", err)
|
||||
}
|
||||
if err := leaky.ShutdownAllBuckets(buckets); err != nil {
|
||||
log.Warningf("while shutting down routines : %s", err)
|
||||
}
|
||||
log.Printf("shutdown is finished buckets are in %s", tmpFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
func termHandler(sig os.Signal) error {
|
||||
log.Warningf("Shutting down routines")
|
||||
func reloadHandler(sig os.Signal) error {
|
||||
var tmpFile string
|
||||
var err error
|
||||
//stop go routines
|
||||
if err := ShutdownRoutines(); err != nil {
|
||||
log.Fatalf("Failed to shut down routines: %s", err)
|
||||
}
|
||||
if tmpFile, err = leaky.DumpBucketsStateAt(time.Now(), buckets); err != nil {
|
||||
log.Fatalf("Failed dumping bucket state : %s", err)
|
||||
}
|
||||
|
||||
if err := leaky.ShutdownAllBuckets(buckets); err != nil {
|
||||
log.Fatalf("while shutting down routines : %s", err)
|
||||
}
|
||||
//reload all and start processing again :)
|
||||
if err := LoadParsers(cConfig); err != nil {
|
||||
log.Fatalf("Failed to load parsers: %s", err)
|
||||
}
|
||||
|
||||
if err := LoadBuckets(cConfig); err != nil {
|
||||
log.Fatalf("Failed to load scenarios: %s", err)
|
||||
|
||||
}
|
||||
//restore bucket state
|
||||
log.Warningf("Restoring buckets state from %s", tmpFile)
|
||||
if err := leaky.LoadBucketsState(tmpFile, buckets, holders); err != nil {
|
||||
log.Fatalf("unable to restore buckets : %s", err)
|
||||
}
|
||||
|
||||
if err := LoadOutputs(cConfig); err != nil {
|
||||
log.Fatalf("failed to initialize outputs : %s", err)
|
||||
}
|
||||
|
||||
if err := LoadAcquisition(cConfig); err != nil {
|
||||
log.Fatalf("Error while loading acquisition config : %s", err)
|
||||
}
|
||||
//Start the background routines that comunicate via chan
|
||||
log.Infof("Starting processing routines")
|
||||
inputLineChan, err := StartProcessingRoutines(cConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start processing routines : %s", err)
|
||||
}
|
||||
|
||||
//Fire!
|
||||
log.Warningf("Starting processing data")
|
||||
|
||||
acquisition.AcquisStartReading(acquisitionCTX, inputLineChan, &acquisTomb)
|
||||
|
||||
log.Printf("Reload is finished")
|
||||
//delete the tmp file, it's safe now :)
|
||||
if err := os.Remove(tmpFile); err != nil {
|
||||
log.Warningf("Failed to delete temp file (%s) : %s", tmpFile, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ShutdownRoutines() error {
|
||||
var reterr error
|
||||
|
||||
acquisTomb.Kill(nil)
|
||||
log.Infof("waiting for acquisition to finish")
|
||||
if err := acquisTomb.Wait(); err != nil {
|
||||
log.Warningf("Acquisition returned error : %s", err)
|
||||
reterr = err
|
||||
}
|
||||
log.Infof("acquisition is finished, wait for parser/bucket/ouputs.")
|
||||
parsersTomb.Kill(nil)
|
||||
if err := parsersTomb.Wait(); err != nil {
|
||||
log.Warningf("Parsers returned error : %s", err)
|
||||
reterr = err
|
||||
}
|
||||
log.Infof("parsers is done")
|
||||
bucketsTomb.Kill(nil)
|
||||
if err := bucketsTomb.Wait(); err != nil {
|
||||
log.Warningf("Buckets returned error : %s", err)
|
||||
reterr = err
|
||||
}
|
||||
log.Infof("buckets is done")
|
||||
outputsTomb.Kill(nil)
|
||||
if err := outputsTomb.Wait(); err != nil {
|
||||
log.Warningf("Ouputs returned error : %s", err)
|
||||
reterr = err
|
||||
|
||||
}
|
||||
log.Infof("ouputs is done")
|
||||
dumpMetrics()
|
||||
log.Warningf("all routines are done, bye.")
|
||||
return daemon.ErrStop
|
||||
log.Infof("outputs are done")
|
||||
return reterr
|
||||
}
|
||||
|
||||
func serveDaemon() error {
|
||||
var daemonCTX *daemon.Context
|
||||
|
||||
daemon.SetSigHandler(termHandler, syscall.SIGTERM)
|
||||
daemon.SetSigHandler(reloadHandler, syscall.SIGHUP)
|
||||
|
||||
daemonCTX = &daemon.Context{
|
||||
PidFileName: cConfig.PIDFolder + "/crowdsec.pid",
|
||||
PidFilePerm: 0644,
|
||||
WorkDir: "./",
|
||||
Umask: 027,
|
||||
func termHandler(sig os.Signal) error {
|
||||
log.Infof("Shutting down routines")
|
||||
if err := ShutdownRoutines(); err != nil {
|
||||
log.Errorf("Error encountered while shutting down routines : %s", err)
|
||||
}
|
||||
|
||||
d, err := daemonCTX.Reborn()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to run daemon: %s ", err.Error())
|
||||
}
|
||||
if d != nil {
|
||||
return nil
|
||||
}
|
||||
defer daemonCTX.Release() //nolint:errcheck // won't bother checking this error in defer statement
|
||||
err = daemon.ServeSignals()
|
||||
if err != nil {
|
||||
return fmt.Errorf("serveDaemon error : %s", err.Error())
|
||||
}
|
||||
return nil
|
||||
log.Warningf("all routines are done, bye.")
|
||||
return daemon.ErrStop
|
||||
}
|
||||
|
||||
func serveOneTimeRun(outputRunner outputs.Output) error {
|
||||
|
@ -87,42 +140,11 @@ func serveOneTimeRun(outputRunner outputs.Output) error {
|
|||
time.Sleep(5 * time.Second)
|
||||
|
||||
// wait for the parser to parse all events
|
||||
parsersTomb.Kill(nil)
|
||||
if err := parsersTomb.Wait(); err != nil {
|
||||
log.Warningf("parsers returned error : %s", err)
|
||||
if err := ShutdownRoutines(); err != nil {
|
||||
log.Errorf("failed shutting down routines : %s", err)
|
||||
}
|
||||
log.Infof("parsers is done")
|
||||
|
||||
// wait for the bucket to pour all events
|
||||
bucketsTomb.Kill(nil)
|
||||
if err := bucketsTomb.Wait(); err != nil {
|
||||
log.Warningf("buckets returned error : %s", err)
|
||||
}
|
||||
log.Infof("buckets is done")
|
||||
|
||||
// wait for output to output all event
|
||||
outputsTomb.Kill(nil)
|
||||
if err := outputsTomb.Wait(); err != nil {
|
||||
log.Warningf("ouputs returned error : %s", err)
|
||||
|
||||
}
|
||||
log.Infof("ouputs is done")
|
||||
dumpMetrics()
|
||||
outputRunner.Flush()
|
||||
log.Warningf("all routines are done, bye.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func serve(outputRunner outputs.Output) error {
|
||||
var err error
|
||||
if cConfig.Daemonize {
|
||||
if err = serveDaemon(); err != nil {
|
||||
return fmt.Errorf(err.Error())
|
||||
}
|
||||
} else {
|
||||
if err = serveOneTimeRun(outputRunner); err != nil {
|
||||
return fmt.Errorf(err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,10 +4,11 @@ After=syslog.target network.target remote-fs.target nss-lookup.target
|
|||
|
||||
[Service]
|
||||
Type=forking
|
||||
#PIDFile=${PID}/crowdsec.pid
|
||||
ExecStartPre=${BIN} -c ${CFG}/default.yaml -t
|
||||
PIDFile=${PID}/crowdsec.pid
|
||||
#ExecStartPre=${BIN} -c ${CFG}/default.yaml -t
|
||||
ExecStart=${BIN} -c ${CFG}/default.yaml
|
||||
ExecStartPost=/bin/sleep 0.1
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
|
@ -238,6 +238,9 @@ LOOP:
|
|||
select {
|
||||
case <-AcquisTomb.Dying(): //we are being killed by main
|
||||
clog.Infof("Killing acquistion routine")
|
||||
if err := ctx.tail.Stop(); err != nil {
|
||||
clog.Errorf("error in stop : %s", err)
|
||||
}
|
||||
break LOOP
|
||||
case <-ctx.tail.Tomb.Dying(): //our tailer is dying
|
||||
clog.Warningf("Reader is dying/dead")
|
||||
|
@ -254,9 +257,8 @@ LOOP:
|
|||
if line.Text == "" { //skip empty lines
|
||||
continue
|
||||
}
|
||||
if ctx.Profiling {
|
||||
ReaderHits.With(prometheus.Labels{"source": ctx.Filename}).Inc()
|
||||
}
|
||||
ReaderHits.With(prometheus.Labels{"source": ctx.Filename}).Inc()
|
||||
|
||||
l.Raw = line.Text
|
||||
l.Labels = ctx.Labels
|
||||
l.Time = line.Time
|
||||
|
|
|
@ -32,10 +32,10 @@ type CrowdSec struct {
|
|||
SQLiteFile string `yaml:"sqlite_path,omitempty"` //path to sqlite output
|
||||
APIMode bool `yaml:"apimode,omitempty"` //true -> enable api push
|
||||
CsCliFolder string `yaml:"cscli_dir"` //cscli folder
|
||||
NbParsers int `yaml:"parser_routines"` //the number of go routines to start for parsing
|
||||
Linter bool
|
||||
Prometheus bool
|
||||
HTTPListen string `yaml:"http_listen,omitempty"`
|
||||
ValidatorMode string /*if present points to a specific config (for tests)*/
|
||||
RestoreMode string
|
||||
DumpBuckets bool
|
||||
OutputConfig *outputs.OutputFactory `yaml:"plugin"`
|
||||
|
@ -47,14 +47,15 @@ func NewCrowdSecConfig() *CrowdSec {
|
|||
LogLevel: log.InfoLevel,
|
||||
Daemonize: false,
|
||||
Profiling: false,
|
||||
WorkingFolder: "./",
|
||||
DataFolder: "./data/",
|
||||
ConfigFolder: "./config/",
|
||||
PIDFolder: "./",
|
||||
LogFolder: "./",
|
||||
WorkingFolder: "/tmp/",
|
||||
DataFolder: "/var/lib/crowdsec/data/",
|
||||
ConfigFolder: "/etc/crowdsec/config/",
|
||||
PIDFolder: "/var/run/",
|
||||
LogFolder: "/var/log/",
|
||||
LogMode: "stdout",
|
||||
SQLiteFile: "./test.db",
|
||||
SQLiteFile: "/var/lib/crowdsec/data/crowdsec.db",
|
||||
APIMode: false,
|
||||
NbParsers: 1,
|
||||
Prometheus: false,
|
||||
HTTPListen: "127.0.0.1:6060",
|
||||
}
|
||||
|
@ -95,7 +96,6 @@ func (c *CrowdSec) GetOPT() error {
|
|||
daemonMode := flag.Bool("daemon", false, "Daemonize, go background, drop PID file, log to file")
|
||||
testMode := flag.Bool("t", false, "only test configs")
|
||||
prometheus := flag.Bool("prometheus-metrics", false, "expose http prometheus collector (see http_listen)")
|
||||
validatorMode := flag.String("custom-config", "", "[dev] run a specific subset of configs parser:file.yaml,scenarios:file.yaml")
|
||||
restoreMode := flag.String("restore-state", "", "[dev] restore buckets state from json file")
|
||||
dumpMode := flag.Bool("dump-state", false, "[dev] Dump bucket state at the end of run.")
|
||||
|
||||
|
@ -140,9 +140,6 @@ func (c *CrowdSec) GetOPT() error {
|
|||
if *testMode {
|
||||
c.Linter = true
|
||||
}
|
||||
if *validatorMode != "" {
|
||||
c.ValidatorMode = *validatorMode
|
||||
}
|
||||
/*overriden by cmdline*/
|
||||
if *daemonMode {
|
||||
c.Daemonize = true
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/dghubble/sling"
|
||||
"gopkg.in/tomb.v2"
|
||||
)
|
||||
|
||||
type ApiCtx struct {
|
||||
|
@ -37,6 +38,7 @@ type ApiCtx struct {
|
|||
tokenExpired bool `yaml:"-"`
|
||||
toPush []types.Event `yaml:"-"`
|
||||
Http *sling.Sling `yaml:"-"`
|
||||
PusherTomb tomb.Tomb
|
||||
}
|
||||
|
||||
type ApiCreds struct {
|
||||
|
@ -94,7 +96,7 @@ func (ctx *ApiCtx) LoadConfig(cfg string) error {
|
|||
log.Warningf("!API paths must not be prefixed by /")
|
||||
}
|
||||
|
||||
ctx.Http = sling.New().Base(ctx.BaseURL+"/"+ctx.ApiVersion+"/").Set("User-Agent", fmt.Sprintf("CrowdWatch/%s", cwversion.VersionStr()))
|
||||
ctx.Http = sling.New().Base(ctx.BaseURL+"/"+ctx.ApiVersion+"/").Set("User-Agent", fmt.Sprintf("Crowdsec/%s", cwversion.VersionStr()))
|
||||
log.Printf("api load configuration: configuration loaded successfully (base:%s)", ctx.BaseURL+"/"+ctx.ApiVersion+"/")
|
||||
return nil
|
||||
}
|
||||
|
@ -113,7 +115,23 @@ func (ctx *ApiCtx) Init(cfg string, profile string) error {
|
|||
return err
|
||||
}
|
||||
//start the background go-routine
|
||||
go ctx.pushLoop() //nolint:errcheck // runs into the background, we can't check error with chan or such
|
||||
ctx.PusherTomb.Go(func() error {
|
||||
err := ctx.pushLoop()
|
||||
if err != nil {
|
||||
log.Errorf("api push error : %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctx *ApiCtx) Shutdown() error {
|
||||
ctx.PusherTomb.Kill(nil)
|
||||
log.Infof("Waiting for API routine to finish")
|
||||
if err := ctx.PusherTomb.Wait(); err != nil {
|
||||
return fmt.Errorf("API routine returned error : %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -105,6 +105,9 @@ func (ctx *ApiCtx) pushLoop() error {
|
|||
if err != nil {
|
||||
log.Errorf("api push loop: %s", err.Error())
|
||||
}
|
||||
case <-ctx.PusherTomb.Dying(): //we are being killed by main
|
||||
log.Infof("Killing api routine")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ type Backend interface {
|
|||
Delete(string) (int, error)
|
||||
Init(map[string]string) error
|
||||
Flush() error
|
||||
Shutdown() error
|
||||
DeleteAll() error
|
||||
}
|
||||
|
||||
|
@ -82,7 +83,7 @@ func NewBackendPlugin(path string, isDaemon bool) (*BackendManager, error) {
|
|||
plugNew := symNew()
|
||||
bInterface, ok := plugNew.(Backend)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected '%s' type, skipping", newPlugin.Name)
|
||||
return nil, fmt.Errorf("unexpected '%s' type (%T), skipping", newPlugin.Name, plugNew)
|
||||
}
|
||||
|
||||
// Add the interface and Init()
|
||||
|
@ -120,6 +121,17 @@ func (b *BackendManager) Delete(target string) (int, error) {
|
|||
return nbDel, nil
|
||||
}
|
||||
|
||||
func (b *BackendManager) Shutdown() error {
|
||||
var err error
|
||||
for _, plugin := range b.backendPlugins {
|
||||
err = plugin.funcs.Shutdown()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to shutdown : %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BackendManager) DeleteAll() error {
|
||||
var err error
|
||||
for _, plugin := range b.backendPlugins {
|
||||
|
|
|
@ -26,12 +26,8 @@ const (
|
|||
TIMEMACHINE
|
||||
)
|
||||
|
||||
//the bucket itself
|
||||
//Leaky represents one instance of a bucket
|
||||
type Leaky struct {
|
||||
//action_overflow
|
||||
//OverflowAction string
|
||||
//bucket actions
|
||||
//Actions []string
|
||||
Name string
|
||||
Mode int //LIVE or TIMEMACHINE
|
||||
//the limiter is what holds the proper "leaky aspect", it determines when/if we can pour objects
|
||||
|
@ -68,10 +64,6 @@ type Leaky struct {
|
|||
Profiling bool
|
||||
timedOverflow bool
|
||||
logger *log.Entry
|
||||
//as the rate-limiter is intended for http or such, we need to have a separate mechanism to track 'empty' bucket.
|
||||
//we use a go-routine that use waitN to know when the bucket is empty (N would be equal to bucket capacity)
|
||||
//as it try to reserves the capacity, we need to cancel it before we can pour in the bucket
|
||||
//reservation *rate.Reservation
|
||||
}
|
||||
|
||||
var BucketsPour = prometheus.NewCounterVec(
|
||||
|
@ -106,15 +98,23 @@ var BucketsInstanciation = prometheus.NewCounterVec(
|
|||
[]string{"name"},
|
||||
)
|
||||
|
||||
func NewLeaky(g BucketFactory) *Leaky {
|
||||
g.logger.Tracef("Instantiating live bucket %s", g.Name)
|
||||
return FromFactory(g)
|
||||
}
|
||||
var BucketsCurrentCount = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "cs_bucket_count",
|
||||
Help: "How many instances of this bucket exist.",
|
||||
},
|
||||
[]string{"name"},
|
||||
)
|
||||
|
||||
// Newleaky creates a new leaky bucket from a BucketFactory
|
||||
// Events created by the bucket (overflow, bucket empty) are sent to a chan defined by BucketFactory
|
||||
// The leaky bucket implementation is based on rate limiter (see https://godoc.org/golang.org/x/time/rate)
|
||||
// There's a trick to have an event said when the bucket gets empty to allow its destruction
|
||||
func NewLeaky(g BucketFactory) *Leaky {
|
||||
g.logger.Tracef("Instantiating live bucket %s", g.Name)
|
||||
return FromFactory(g)
|
||||
}
|
||||
|
||||
func FromFactory(g BucketFactory) *Leaky {
|
||||
var limiter rate.RateLimiter
|
||||
//golang rate limiter. It's mainly intended for http rate limiter
|
||||
|
@ -135,9 +135,8 @@ func FromFactory(g BucketFactory) *Leaky {
|
|||
} else {
|
||||
limiter = rate.NewLimiter(rate.Every(g.leakspeed), g.Capacity)
|
||||
}
|
||||
if g.Profiling {
|
||||
BucketsInstanciation.With(prometheus.Labels{"name": g.Name}).Inc()
|
||||
}
|
||||
BucketsInstanciation.With(prometheus.Labels{"name": g.Name}).Inc()
|
||||
|
||||
//create the leaky bucket per se
|
||||
l := &Leaky{
|
||||
Name: g.Name,
|
||||
|
@ -169,12 +168,16 @@ func FromFactory(g BucketFactory) *Leaky {
|
|||
var LeakyRoutineCount int64
|
||||
|
||||
/* for now mimic a leak routine */
|
||||
//LeakRoutine us the life of a bucket. It dies when the bucket underflows or overflows
|
||||
func LeakRoutine(l *Leaky) {
|
||||
|
||||
var (
|
||||
durationTicker <-chan time.Time = make(<-chan time.Time)
|
||||
)
|
||||
|
||||
BucketsCurrentCount.With(prometheus.Labels{"name": l.Name}).Inc()
|
||||
defer BucketsCurrentCount.With(prometheus.Labels{"name": l.Name}).Dec()
|
||||
|
||||
/*todo : we create a logger at runtime while we want leakroutine to be up asap, might not be a good idea*/
|
||||
l.logger = l.BucketConfig.logger.WithFields(log.Fields{"capacity": l.Capacity, "partition": l.Mapkey, "bucket_id": l.Uuid})
|
||||
|
||||
|
@ -192,7 +195,6 @@ func LeakRoutine(l *Leaky) {
|
|||
}
|
||||
|
||||
l.logger.Debugf("Leaky routine starting, lifetime : %s", l.Duration)
|
||||
defer l.logger.Debugf("Leaky routine exiting")
|
||||
for {
|
||||
select {
|
||||
/*receiving an event*/
|
||||
|
@ -208,9 +210,8 @@ func LeakRoutine(l *Leaky) {
|
|||
l.logger.Tracef("Pour event: %s", spew.Sdump(msg))
|
||||
l.logger.Debugf("Pouring event.")
|
||||
|
||||
if l.Profiling {
|
||||
BucketsPour.With(prometheus.Labels{"name": l.Name, "source": msg.Line.Src}).Inc()
|
||||
}
|
||||
BucketsPour.With(prometheus.Labels{"name": l.Name, "source": msg.Line.Src}).Inc()
|
||||
|
||||
l.Pour(l, msg) // glue for now
|
||||
//Clear cache on behalf of pour
|
||||
tmp := time.NewTicker(l.Duration)
|
||||
|
@ -236,9 +237,9 @@ func LeakRoutine(l *Leaky) {
|
|||
l.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: sig}))
|
||||
mt, _ := l.Ovflw_ts.MarshalText()
|
||||
l.logger.Tracef("overflow time : %s", mt)
|
||||
if l.Profiling {
|
||||
BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc()
|
||||
}
|
||||
|
||||
BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc()
|
||||
|
||||
l.AllOut <- types.Event{Overflow: sig, Type: types.OVFLW, MarshaledTime: string(mt)}
|
||||
return
|
||||
/*we underflow or reach bucket deadline (timers)*/
|
||||
|
@ -249,9 +250,8 @@ func LeakRoutine(l *Leaky) {
|
|||
sig := types.SignalOccurence{MapKey: l.Mapkey}
|
||||
|
||||
if l.timedOverflow {
|
||||
if l.Profiling {
|
||||
BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc()
|
||||
}
|
||||
BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc()
|
||||
|
||||
sig = FormatOverflow(l, ofw)
|
||||
for _, f := range l.BucketConfig.processors {
|
||||
sig, ofw = f.OnBucketOverflow(l.BucketConfig)(l, sig, ofw)
|
||||
|
|
|
@ -178,23 +178,27 @@ POLL_AGAIN:
|
|||
check the results we got against the expected ones
|
||||
only the keys of the expected part are checked against result
|
||||
*/
|
||||
var tmpFile string
|
||||
|
||||
for {
|
||||
if len(tf.Results) == 0 && len(results) == 0 {
|
||||
log.Warningf("Test is successfull")
|
||||
if dump {
|
||||
if err := DumpBucketsStateAt(bs+".new", latest_ts, buckets); err != nil {
|
||||
if tmpFile, err = DumpBucketsStateAt(latest_ts, buckets); err != nil {
|
||||
t.Fatalf("Failed dumping bucket state : %s", err)
|
||||
}
|
||||
log.Infof("dumped bucket to %s", tmpFile)
|
||||
}
|
||||
return true
|
||||
} else {
|
||||
log.Warningf("%d results to check against %d expected results", len(results), len(tf.Results))
|
||||
if len(tf.Results) != len(results) {
|
||||
if dump {
|
||||
if err := DumpBucketsStateAt(bs+".new", latest_ts, buckets); err != nil {
|
||||
if tmpFile, err = DumpBucketsStateAt(latest_ts, buckets); err != nil {
|
||||
t.Fatalf("Failed dumping bucket state : %s", err)
|
||||
}
|
||||
log.Infof("dumped bucket to %s", tmpFile)
|
||||
|
||||
}
|
||||
log.Errorf("results / expected count doesn't match results = %d / expected = %d", len(results), len(tf.Results))
|
||||
return false
|
||||
|
|
|
@ -375,6 +375,7 @@ func GarbageCollectBuckets(deadline time.Time, buckets *Buckets) error {
|
|||
key := rkey.(string)
|
||||
val := rvalue.(*Leaky)
|
||||
total += 1
|
||||
//bucket already overflowed, we can kill it
|
||||
if !val.Ovflw_ts.IsZero() {
|
||||
discard += 1
|
||||
val.logger.Debugf("overflowed at %s.", val.Ovflw_ts)
|
||||
|
@ -388,6 +389,7 @@ func GarbageCollectBuckets(deadline time.Time, buckets *Buckets) error {
|
|||
tokcapa := float64(val.Capacity)
|
||||
tokat = math.Round(tokat*100) / 100
|
||||
tokcapa = math.Round(tokcapa*100) / 100
|
||||
//bucket actually underflowed based on log time, but no in real time
|
||||
if tokat >= tokcapa {
|
||||
BucketsUnderflow.With(prometheus.Labels{"name": val.Name}).Inc()
|
||||
val.logger.Debugf("UNDERFLOW : first_ts:%s tokens_at:%f capcity:%f", val.First_ts, tokat, tokcapa)
|
||||
|
@ -412,7 +414,14 @@ func GarbageCollectBuckets(deadline time.Time, buckets *Buckets) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func DumpBucketsStateAt(file string, deadline time.Time, buckets *Buckets) error {
|
||||
func DumpBucketsStateAt(deadline time.Time, buckets *Buckets) (string, error) {
|
||||
//var file string
|
||||
tmpFd, err := ioutil.TempFile(os.TempDir(), "crowdsec-buckets-dump-")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create temp file : %s", err)
|
||||
}
|
||||
defer tmpFd.Close()
|
||||
tmpFileName := tmpFd.Name()
|
||||
serialized = make(map[string]Leaky)
|
||||
log.Printf("Dumping buckets state at %s", deadline)
|
||||
total := 0
|
||||
|
@ -455,11 +464,23 @@ func DumpBucketsStateAt(file string, deadline time.Time, buckets *Buckets) error
|
|||
if err != nil {
|
||||
log.Fatalf("Failed to unmarshal buckets : %s", err)
|
||||
}
|
||||
err = ioutil.WriteFile(file, bbuckets, 0644)
|
||||
size, err := tmpFd.Write(bbuckets)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write buckets state %s", err)
|
||||
return "", fmt.Errorf("failed to write temp file : %s", err)
|
||||
}
|
||||
log.Warningf("Serialized %d live buckets state, %d total with %d expired to %s", len(serialized), total, discard, file)
|
||||
log.Infof("Serialized %d live buckets (+%d expired) in %d bytes to %s", len(serialized), discard, size, tmpFd.Name())
|
||||
serialized = nil
|
||||
return tmpFileName, nil
|
||||
}
|
||||
|
||||
func ShutdownAllBuckets(buckets *Buckets) error {
|
||||
buckets.Bucket_map.Range(func(rkey, rvalue interface{}) bool {
|
||||
key := rkey.(string)
|
||||
val := rvalue.(*Leaky)
|
||||
val.KillSwitch <- true
|
||||
log.Infof("killed %s", key)
|
||||
return true
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -81,6 +81,25 @@ func OvflwToOrder(sig types.SignalOccurence, prof types.Profile) (*types.BanOrde
|
|||
return &ordr, nil, warn
|
||||
}
|
||||
|
||||
func (o *Output) Shutdown() error {
|
||||
var reterr error
|
||||
if o.API != nil {
|
||||
if err := o.API.Shutdown(); err != nil {
|
||||
log.Errorf("error while shutting down API : %s", err)
|
||||
reterr = err
|
||||
}
|
||||
}
|
||||
if o.bManager != nil {
|
||||
if err := o.bManager.Shutdown(); err != nil {
|
||||
log.Errorf("error while shutting down backend : %s", err)
|
||||
reterr = err
|
||||
}
|
||||
}
|
||||
//bManager
|
||||
//TBD : the backend(s) should be stopped in the same way
|
||||
return reterr
|
||||
}
|
||||
|
||||
func (o *Output) FlushAll() {
|
||||
if o.API != nil {
|
||||
if err := o.API.Flush(); err != nil {
|
||||
|
|
|
@ -141,7 +141,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
|||
NodeState = true
|
||||
}
|
||||
|
||||
if n.Profiling && n.Name != "" {
|
||||
if n.Name != "" {
|
||||
NodesHits.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
|
||||
}
|
||||
set := false
|
||||
|
@ -285,14 +285,14 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
|||
|
||||
//grok or leafs failed, don't process statics
|
||||
if !NodeState {
|
||||
if n.Profiling && n.Name != "" {
|
||||
if n.Name != "" {
|
||||
NodesHitsKo.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
|
||||
}
|
||||
clog.Debugf("Event leaving node : ko")
|
||||
return NodeState, nil
|
||||
}
|
||||
|
||||
if n.Profiling && n.Name != "" {
|
||||
if n.Name != "" {
|
||||
NodesHitsOk.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
|
||||
}
|
||||
if len(n.Statics) > 0 {
|
||||
|
|
|
@ -35,6 +35,22 @@ func (c *Context) AutoCommit() {
|
|||
ticker := time.NewTicker(200 * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
case <-c.PusherTomb.Dying():
|
||||
//we need to shutdown
|
||||
log.Infof("sqlite routine shutdown")
|
||||
if err := c.Flush(); err != nil {
|
||||
log.Errorf("error while flushing records: %s", err)
|
||||
}
|
||||
if ret := c.tx.Commit(); ret.Error != nil {
|
||||
log.Errorf("failed to commit records : %v", ret.Error)
|
||||
}
|
||||
if err := c.tx.Close(); err != nil {
|
||||
log.Errorf("error while closing tx : %s", err)
|
||||
}
|
||||
if err := c.Db.Close(); err != nil {
|
||||
log.Errorf("error while closing db : %s", err)
|
||||
}
|
||||
return
|
||||
case <-ticker.C:
|
||||
if atomic.LoadInt32(&c.count) != 0 &&
|
||||
(atomic.LoadInt32(&c.count)%100 == 0 || time.Since(c.lastCommit) >= 500*time.Millisecond) {
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/jinzhu/gorm"
|
||||
_ "github.com/jinzhu/gorm/dialects/sqlite"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"gopkg.in/tomb.v2"
|
||||
)
|
||||
|
||||
type Context struct {
|
||||
|
@ -21,6 +22,7 @@ type Context struct {
|
|||
flush bool
|
||||
count int32
|
||||
lock sync.Mutex //booboo
|
||||
PusherTomb tomb.Tomb
|
||||
}
|
||||
|
||||
func NewSQLite(cfg map[string]string) (*Context, error) {
|
||||
|
@ -62,6 +64,9 @@ func NewSQLite(cfg map[string]string) (*Context, error) {
|
|||
if c.tx == nil {
|
||||
return nil, fmt.Errorf("failed to begin sqlite transac : %s", err)
|
||||
}
|
||||
go c.AutoCommit()
|
||||
c.PusherTomb.Go(func() error {
|
||||
c.AutoCommit()
|
||||
return nil
|
||||
})
|
||||
return c, nil
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"encoding/binary"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
@ -49,7 +48,7 @@ func LastAddress(n *net.IPNet) net.IP {
|
|||
}
|
||||
|
||||
var logFormatter log.Formatter
|
||||
var logOutput io.Writer
|
||||
var LogOutput *lumberjack.Logger //io.Writer
|
||||
var logLevel log.Level
|
||||
var logReportCaller bool
|
||||
|
||||
|
@ -57,14 +56,14 @@ func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level
|
|||
|
||||
/*Configure logs*/
|
||||
if cfgMode == "file" {
|
||||
logOutput = &lumberjack.Logger{
|
||||
LogOutput = &lumberjack.Logger{
|
||||
Filename: cfgFolder + "/crowdsec.log",
|
||||
MaxSize: 500, //megabytes
|
||||
MaxBackups: 3,
|
||||
MaxAge: 28, //days
|
||||
Compress: true, //disabled by default
|
||||
}
|
||||
log.SetOutput(logOutput)
|
||||
log.SetOutput(LogOutput)
|
||||
} else if cfgMode != "stdout" {
|
||||
return fmt.Errorf("log mode '%s' unknown", cfgMode)
|
||||
}
|
||||
|
@ -83,8 +82,8 @@ func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level
|
|||
|
||||
func ConfigureLogger(clog *log.Logger) error {
|
||||
/*Configure logs*/
|
||||
if logOutput != nil {
|
||||
clog.SetOutput(logOutput)
|
||||
if LogOutput != nil {
|
||||
clog.SetOutput(LogOutput)
|
||||
}
|
||||
if logReportCaller {
|
||||
clog.SetReportCaller(true)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/sqlite"
|
||||
|
@ -13,6 +14,15 @@ type pluginDB struct {
|
|||
CTX *sqlite.Context
|
||||
}
|
||||
|
||||
func (p *pluginDB) Shutdown() error {
|
||||
p.CTX.PusherTomb.Kill(nil)
|
||||
if err := p.CTX.PusherTomb.Wait(); err != nil {
|
||||
return fmt.Errorf("DB shutdown error : %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *pluginDB) Init(config map[string]string) error {
|
||||
var err error
|
||||
log.Debugf("sqlite config : %+v \n", config)
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
2018-02-07T18:00:06+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2
|
||||
2018-02-07T18:00:09+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2
|
||||
2018-02-07T18:00:12+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2
|
||||
2018-02-07T18:00:12+01:00 eqx10863 sshd[13934]: Disconnecting: Too many authentication failures for root from 192.168.13.38 port 39596 ssh2 [preauth]
|
||||
2018-02-07T18:00:21+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:23+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:26+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:29+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Disconnecting: Too many authentication failures for root from 192.168.13.38 port 2377 ssh2 [preauth]
|
||||
2018-02-07T18:00:06+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2
|
||||
2018-02-07T18:00:09+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2
|
||||
2018-02-07T18:00:12+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2
|
||||
2018-02-07T18:00:12+01:00 eqx10863 sshd[13934]: Disconnecting: Too many authentication failures for root from 192.168.13.38 port 39596 ssh2 [preauth]
|
||||
2018-02-07T18:00:21+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:23+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:26+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:29+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Disconnecting: Too many authentication failures for root from 192.168.13.38 port 2377 ssh2 [preauth]
|
||||
2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: PAM 5 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=192.168.13.38 user=root
|
||||
2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2
|
||||
2018-02-07T18:00:31+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2
|
||||
2018-02-07T18:00:32+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2
|
||||
2018-02-07T18:00:32+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2
|
||||
2018-02-07T18:00:33+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2
|
||||
2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2
|
||||
2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2
|
||||
2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2
|
||||
2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2
|
||||
2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2
|
||||
2018-02-07T18:00:34+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.37 port 2377 ssh2
|
|
@ -1,2 +0,0 @@
|
|||
type: syslog
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
- filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml
|
||||
stage: s00-raw
|
||||
- filename: ./hub/parsers/s01-parse/crowdsecurity/sshd-logs.yaml
|
||||
stage: s01-parse
|
||||
- filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml
|
||||
stage: s02-enrich
|
|
@ -1 +0,0 @@
|
|||
- filename: ./hub/scenarios/crowdsecurity/ssh-bf.yaml
|
|
@ -1,3 +0,0 @@
|
|||
select count(*) == 1 from signal_occurences where source_ip = "192.168.13.38" and scenario = "crowdsecurity/ssh-bf"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "192.168.13.37" and scenario = "crowdsecurity/ssh-bf"
|
||||
|
|
@ -1 +0,0 @@
|
|||
2018-04-27T15:46:50+02:00 rp-ch-01 nginx: 2018/04/27 15:46:50 [error] 20329#0: *81170632 NAXSI_EXLOG: ip=191.154.37.115&server=cogedis.trustelem.com&uri=/app/55773/sso&id=10091&zone=ARGS&var_name=signature&content=gTyxddzKMBjOQ6iiNXsauWKyznrWzgzobNS5L226v23%2BSvh0z8uKrZbErckzPs7sF1Yif/T9P1O2Fmm05mSu1%2BL/TBAt1G2JsDv2%2B0zp2blECZFMMTfpgcyIeITDgh8HGM5GR9K2diB6/d1g5yShZs6Vm9%2BMCtXVO4gfpFwH4sSM7jbjU5xbShmiKkYNn3O8f3ZAdnZpk3%2BELVcODIGWwhRuN9Hy6agMirzx4PMTUWcDmdnB9W4iDcV/k28xnxuBE0vNw1JAL9sOSqrBnzqKk%2BUx9kt9hfEofvDYPvLfWiU56oEd8yzT1fEn21dzA6BcOCetzYoNjSdYDreKQm4O%2BVAgn90WKjvcORK%2BO3CkPR5%2B9N4d1hMLc10ZrKps4iHiJMG%2BRHvzBxL3yeYGdmdjX%2Bf6ZKjPkI3dTwP9379Wong0/DZ4BQ8ZC6SozID68PXybKynOGauaUxKCt3y3fAXSLH1Qtcl70kVQ9eQa1q%2B%2BZxujCGJ33sVl6ps10iLn2lYoJ85CAXCk%2B7p%2BMKOQzwGaFUBuVMgVbxATRQPnCN%2BHPymQ23LwWtKQbvRtJpahyPR9Yb6mUbf7JO1H2XF6%2BsPp4pcIZqv/SwJlgxSkPT5ehnJjLUhVIFu6SGlau1C0B/LUgHoZ8c%2Bkoy%2BfzzPqQPO2I1Y5SXFWwFPU6dbBgz1p%2BQ=, client: 77.136.47.223, server: www.trustelem.com, request: "GET /app/55773/sso?SAMLRequest=fZJbc6owFIX%2FCpN3NCJUZIqdtHihglfU2hcmjRGwQDAJaPvrD%2Bpxpuc8dM%2FkIbP3WiuX7%2FHpnKVKRblIWG6DVgMCheaE7ZI8ssEqGKgmeOo9CpylhYVKGecLeiypkEqty4V1bdig5LnFsEiEleOMCksSa4l8z9Ia0Co4k4ywFChICMplHfTCclFmlC8prxJCVwvPBrGUhbCazWRHsSopiXOWsiihopF9NQROqdgzTmiDsOxJMBtCxzDhtWbaNgKKUx8qybG83uNuRlhEd4loSF4KSVOaXeRNXBRNw%2Bh02k0hGFBcxwah9oLq2kzf1PMG%2BX3zNAmik%2B%2Bgy4Lz7094abe8aDMIk%2B3gIYz7zmrGzYU26n8Rrnn7c3beIndjurm63Q2HqTg%2Ff3M1LeHSgL67LraTKD6ij5ggPVjrHwjiKqlN8cP3J0F9nfnF4ICNlbtIzdepF3jxpDIO%2BxF3dv336t1cqN0Xz5fz1f4Ai7QfszOVejUMsoOero9V130bw8ioxsjcxQe9%2B6qy6tBpif0Yh1lZlGietsnpzRkQj0WOxK%2BeHh4jDTPzxMQUr8LhKFTna6KNfX5oLRblftyuw4elQMOQH1MXn7OsTVD9WkKU1M2FxLm0gQZbpgp1VesELcPSHyy929DbnXegzP5%2B%2B3OS32D6jZGP25CwRkEwU2fTZQCU9R3KegDcELSu4fwHe7%2Fb4jtwoHcn4iL6D6fH5g%2Fv3m33L%2By9Pw%3D%3D&RelayState=%2Fa085800002amsSg&SigAlg=http%3A%2F%2Fwww.w3.org%2F2001%2F04%2Fxmldsig-more%23rsa-sha256&Signature=gTyxddzKMBjOQ6iiNXsauWKyznrWzgzobNS5L226v23%2BSvh0z8uKrZbErckzPs7sF1Yif%2FT9P1O2Fmm05mSu1%2BL%2FTBAt1G2JsDv2%2B0zp2blECZFMMTfpgcyIeITDgh8HGM5GR9K2diB6%2Fd1g5yShZs6Vm9%2BMCt
|
|
@ -1 +0,0 @@
|
|||
type: syslog
|
|
@ -1,9 +0,0 @@
|
|||
- filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml
|
||||
stage: s00-raw
|
||||
- filename: ./hub/parsers/s01-parse/crowdsecurity/nginx-logs.yaml
|
||||
stage: s01-parse
|
||||
#it's a bit nasty : naxsi is in enrich phase because it parses nginx error log parser output
|
||||
- filename: ./hub/parsers/s02-enrich/crowdsecurity/naxsi-logs.yaml
|
||||
stage: s02-enrich
|
||||
- filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml
|
||||
stage: s02-enrich
|
|
@ -1,2 +0,0 @@
|
|||
- filename: ./hub/scenarios/crowdsecurity/naxsi-exploit-vpatch.yaml
|
||||
|
|
@ -1 +0,0 @@
|
|||
select count(*) == 1 from signal_occurences where source_ip = "191.154.37.115" and scenario = "crowdsecurity/naxsi-exploit-vpatch"
|
|
@ -1,6 +0,0 @@
|
|||
2017-12-01T14:47:42+01:00 rp-ch-01 nginx: 192.168.13.38 - - [01/Dec/2017:14:47:42 +0000] "POST /lh-magazine/wp-login.php HTTP/1.1" 200 4249 "http://www.lahalle.com/lh-magazine/wp-login.php" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:43+01:00 rp-ch-01 nginx: 192.168.13.38 - - [01/Dec/2017:14:47:43 +0000] "POST /lh-magazine/wp-login.php HTTP/1.1" 200 4249 "http://www.lahalle.com/lh-magazine/wp-login.php" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 rp-ch-01 nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "POST /lh-magazine/wp-login.php HTTP/1.1" 200 4249 "http://www.lahalle.com/lh-magazine/wp-login.php" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:45+01:00 rp-ch-01 nginx: 192.168.13.38 - - [01/Dec/2017:14:47:45 +0000] "POST /lh-magazine/wp-login.php HTTP/1.1" 200 4249 "http://www.lahalle.com/lh-magazine/wp-login.php" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:46+01:00 rp-ch-01 nginx: 192.168.13.38 - - [01/Dec/2017:14:47:46 +0000] "POST /lh-magazine/wp-login.php HTTP/1.1" 200 4249 "http://www.lahalle.com/lh-magazine/wp-login.php" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:48+01:00 rp-ch-01 nginx: 192.168.13.38 - - [01/Dec/2017:14:47:48 +0000] "POST /lh-magazine/wp-login.php HTTP/1.1" 200 4249 "http://www.lahalle.com/lh-magazine/wp-login.php" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
|
@ -1 +0,0 @@
|
|||
type: nginx
|
|
@ -1,9 +0,0 @@
|
|||
- filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml
|
||||
stage: s00-raw
|
||||
- filename: ./hub/parsers/s01-parse/crowdsecurity/nginx-logs.yaml
|
||||
stage: s01-parse
|
||||
- filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml
|
||||
stage: s02-enrich
|
||||
- filename: ./hub/parsers/s02-enrich/crowdsecurity/http-logs.yaml
|
||||
stage: s02-enrich
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
- filename: ./hub/scenarios/crowdsecurity/http-bf-wordpress_bf.yaml
|
||||
|
||||
|
|
@ -1 +0,0 @@
|
|||
select count(*) == 1 from signal_occurences where source_ip = "192.168.13.38" and scenario = "crowdsecurity/http-bf-wordpress_bf"
|
|
@ -1,7 +0,0 @@
|
|||
Dec 13 00:31:12 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:12.487033 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:65132] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015
|
||||
Dec 13 00:31:13 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:13.294397 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:1391] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015
|
||||
Dec 13 00:31:14 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:14.108036 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:2154] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015
|
||||
Dec 13 00:31:14 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:14.883233 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:2893] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015
|
||||
Dec 13 00:31:15 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:13.294397 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:1391] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015
|
||||
Dec 13 00:31:16 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:14.108036 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:2154] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015
|
||||
Dec 13 00:31:17 ip-172-31-11-1.us-west-1.compute.internal smb[2762]: Auth: [SMB2,(null)] user [domainname]\[Administrator] at [Fri, 13 Dec 2019 00:31:14.883233 UTC] with [NTLMv2] status [NT_STATUS_NO_SUCH_USER] workstation [LOCALPCNAME] remote host [ipv4:61.6.206.22:2893] mapped to [domainname]\[Administrator]. local host [ipv4:172.18.0.3:445] #015
|
|
@ -1 +0,0 @@
|
|||
type: syslog
|
|
@ -1,6 +0,0 @@
|
|||
- filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml
|
||||
stage: s00-raw
|
||||
- filename: ./hub/parsers/s01-parse/crowdsecurity/smb-logs.yaml
|
||||
stage: s01-parse
|
||||
- filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml
|
||||
stage: s02-enrich
|
|
@ -1,4 +0,0 @@
|
|||
- filename: ./hub/scenarios/crowdsecurity/smb-bf.yaml
|
||||
|
||||
|
||||
|
|
@ -1 +0,0 @@
|
|||
select count(*) == 1 from signal_occurences where source_ip = "61.6.206.22" and scenario = "crowdsecurity/smb-bf"
|
|
@ -1,5 +0,0 @@
|
|||
Dec 12 22:43:09 ip-172-31-11-1.us-west-1.compute.internal mysql[2762]: 2019-12-12T22:43:09.600659Z 120 [Note] Access denied for user 'root'@'106.3.44.207' (using password: YES)
|
||||
Dec 12 22:43:10 ip-172-31-11-1.us-west-1.compute.internal mysql[2762]: 2019-12-12T22:43:10.408842Z 121 [Note] Access denied for user 'root'@'106.3.44.207' (using password: YES)
|
||||
Dec 12 22:43:11 ip-172-31-11-1.us-west-1.compute.internal mysql[2762]: 2019-12-12T22:43:11.218794Z 122 [Note] Access denied for user 'root'@'106.3.44.207' (using password: YES)
|
||||
Dec 12 22:43:12 ip-172-31-11-1.us-west-1.compute.internal mysql[2762]: 2019-12-12T22:43:12.027695Z 123 [Note] Access denied for user 'root'@'106.3.44.207' (using password: YES)
|
||||
Dec 12 22:43:12 ip-172-31-11-1.us-west-1.compute.internal mysql[2762]: 2019-12-12T22:43:12.841040Z 124 [Note] Access denied for user 'root'@'106.3.44.207' (using password: YES)
|
|
@ -1 +0,0 @@
|
|||
type: syslog
|
|
@ -1,6 +0,0 @@
|
|||
- filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml
|
||||
stage: s00-raw
|
||||
- filename: ./hub/parsers/s01-parse/crowdsecurity/mysql-logs.yaml
|
||||
stage: s01-parse
|
||||
- filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml
|
||||
stage: s02-enrich
|
|
@ -1,5 +0,0 @@
|
|||
- filename: ./hub/scenarios/crowdsecurity/mysql-bf.yaml
|
||||
|
||||
|
||||
|
||||
|
|
@ -1 +0,0 @@
|
|||
select count(*) == 1 from signal_occurences where source_ip = "106.3.44.207" and scenario = "crowdsecurity/mysql-bf"
|
|
@ -1,23 +0,0 @@
|
|||
2018-02-07T18:00:00+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2
|
||||
2018-02-07T18:00:00+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2
|
||||
2018-02-07T18:00:00+01:00 eqx10863 sshd[13934]: Failed password for root from 192.168.13.38 port 39596 ssh2
|
||||
2018-02-07T18:00:00+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:00+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
#this one will overflow
|
||||
2018-02-07T18:00:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
#these ones will be blackholed
|
||||
2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:00:02+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
#these ones won't
|
||||
2018-02-07T18:02:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:02:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:02:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:02:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:02:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
2018-02-07T18:02:01+01:00 eqx10863 sshd[13952]: Failed password for root from 192.168.13.38 port 2377 ssh2
|
||||
|
|
@ -1 +0,0 @@
|
|||
type: syslog
|
|
@ -1,6 +0,0 @@
|
|||
- filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml
|
||||
stage: s00-raw
|
||||
- filename: ./hub/parsers/s01-parse/crowdsecurity/sshd-logs.yaml
|
||||
stage: s01-parse
|
||||
- filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml
|
||||
stage: s02-enrich
|
|
@ -1,6 +0,0 @@
|
|||
- filename: ./hub/scenarios/crowdsecurity/ssh-bf.yaml
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1 +0,0 @@
|
|||
select count(*) == 2 from signal_occurences where source_ip = "192.168.13.38" and scenario = "crowdsecurity/ssh-bf"
|
|
@ -1,84 +0,0 @@
|
|||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page1 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page1" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page2 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page2" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page3 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page3" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page4 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page4" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page5 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page5" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page6 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page6" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page7 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page7" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page8 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page8" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page9 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page9" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page10 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page10" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page11 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page11" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page12 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page12" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page13 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page13" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page14 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page14" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page15 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page15" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page16 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page16" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page17 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page17" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page18 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page18" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page19 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page19" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page20 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page20" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page21 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page1" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page22 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page2" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page23 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page3" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page24 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page4" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page25 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page5" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page26 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page6" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page27 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page7" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page28 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page8" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page29 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page9" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page30 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page10" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page31 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page11" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page32 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page12" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page33 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page13" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page34 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page14" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page35 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page15" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page36 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page16" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page37 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page17" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page38 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page18" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page39 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page19" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page40 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page20" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.38 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page41 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page20" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
|
||||
## Those logs should not make an overflow
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page1 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page1" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page2 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page2" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page3 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page3" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page4 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page4" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page5 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page5" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page6 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page6" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page7 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page7" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page8 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page8" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:47:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:47:44 +0000] "GET /crawl_page9 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page9" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page10 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page10" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page11 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page11" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page12 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page12" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page13 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page13" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page14 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page14" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page15 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page15" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:49:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:49:44 +0000] "GET /crawl_page16 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page16" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page17 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page17" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page18 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page18" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page19 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page19" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page20 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page20" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page21 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page1" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page22 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page2" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:50:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:50:44 +0000] "GET /crawl_page23 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page3" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page24 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page4" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page25 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page5" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page26 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page6" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page27 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page7" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page28 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page8" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page29 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page9" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page30 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page10" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page31 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page11" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:51:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:51:44 +0000] "GET /crawl_page32 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page12" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:52:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:52:44 +0000] "GET /crawl_page33 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page13" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:52:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:52:44 +0000] "GET /crawl_page34 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page14" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:52:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:52:44 +0000] "GET /crawl_page35 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page15" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:52:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:52:44 +0000] "GET /crawl_page36 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page16" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:52:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:52:44 +0000] "GET /crawl_page37 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page17" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:53:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:53:44 +0000] "GET /crawl_page38 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page18" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:53:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:53:44 +0000] "GET /crawl_page39 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page19" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:53:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:53:44 +0000] "GET /crawl_page40 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page20" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
||||
2017-12-01T14:53:44+01:00 mywebserver nginx: 192.168.13.40 - - [01/Dec/2017:14:53:44 +0000] "GET /crawl_page41 HTTP/1.1" 200 4249 "http://www.cs.com/crawl_page20" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
|
|
@ -1 +0,0 @@
|
|||
type: nginx
|
|
@ -1,9 +0,0 @@
|
|||
- filename: ./hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml
|
||||
stage: s00-raw
|
||||
- filename: ./hub/parsers/s01-parse/crowdsecurity/nginx-logs.yaml
|
||||
stage: s01-parse
|
||||
- filename: ./hub/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml
|
||||
stage: s02-enrich
|
||||
- filename: ./hub/parsers/s02-enrich/crowdsecurity/http-logs.yaml
|
||||
stage: s02-enrich
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
- filename: ./hub/scenarios/crowdsecurity/http-crawl-non_statics.yaml
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1 +0,0 @@
|
|||
select count(*) == 1 from signal_occurences where source_ip = "192.168.13.38" and scenario = "crowdsecurity/http-crawl-non_statics"
|
File diff suppressed because it is too large
Load diff
|
@ -1,2 +0,0 @@
|
|||
- filename: ./hub/parsers/s00-raw/crowdsecurity/enrich.yaml
|
||||
stage: s00-raw
|
|
@ -1,6 +0,0 @@
|
|||
- filename: ./hub/scenarios/crowdsecurity/basic-consensus.yaml
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
select count(*) == 1 from signal_occurences where source_ip = "139.199.192.143" and scenario = "specialized_consensus"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "139.199.192.143" and scenario = "base_consensus"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "207.38.89.99" and scenario = "base_consensus"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "207.38.89.99" and scenario = "specialized_consensus"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "51.159.56.89" and scenario = "base_consensus"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "103.212.97.45" and scenario = "base_consensus"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "103.212.97.45" and scenario = "specialized_consensus"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "35.180.132.238" and scenario = "specialized_consensus"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "35.180.132.238" and scenario = "base_consensus"
|
||||
|
||||
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
|
||||
{
|
||||
"Type": 0,
|
||||
"ExpectMode": 0,
|
||||
"Whitelisted": false,
|
||||
"Stage": "",
|
||||
"Enriched": {
|
||||
"machine_uuid": "user1_machine1",
|
||||
"trust_factor": "4",
|
||||
"user_uuid": "1",
|
||||
"watcher_ip": "1.2.3.4"
|
||||
},
|
||||
"Overflow": {
|
||||
"MapKey": "7e159c83f45e4cabfe4c2d8653a24ac79506a703",
|
||||
"scenario": "http_404-scan",
|
||||
"bucket_id": "morning-sea",
|
||||
"alert_message": "31.222.187.197 performed 'http_404-scan' (6 events over 2s) at 2020-01-02 15:31:32 +0000 UTC",
|
||||
"events_count": 6,
|
||||
"start_at": "2020-01-02T15:31:30Z",
|
||||
"ban_applications": [
|
||||
{
|
||||
"MeasureType": "ban",
|
||||
"MeasureExtra": "",
|
||||
"Until": "2020-01-02T19:31:32Z",
|
||||
"StartIp": 1781924660,
|
||||
"EndIp": 1781924660,
|
||||
"IpText": "31.222.187.197",
|
||||
"Reason": "ban on ip 31.222.187.197",
|
||||
"Scenario": "",
|
||||
"SignalOccurenceID": 985
|
||||
}
|
||||
],
|
||||
"stop_at": "2020-01-14T06:44:14Z",
|
||||
"Source_ip": "31.222.187.197",
|
||||
"Source_range": "\u003cnil\u003e",
|
||||
"Source_AutonomousSystemNumber": "0",
|
||||
"Source_AutonomousSystemOrganization": "",
|
||||
"Source_Country": "CN",
|
||||
"Source_Latitude": 39.92890167236328,
|
||||
"Source_Longitude": 116.38829803466797,
|
||||
"sources": {
|
||||
"31.222.187.197": {
|
||||
"Ip": "31.222.187.197",
|
||||
"Range": {
|
||||
"IP": "",
|
||||
"Mask": null
|
||||
},
|
||||
"AutonomousSystemNumber": "0",
|
||||
"AutonomousSystemOrganization": "",
|
||||
"Country": "CN",
|
||||
"Latitude": 39.92890167236328,
|
||||
"Longitude": 116.38829803466797,
|
||||
"Flags": null
|
||||
}
|
||||
},
|
||||
"capacity": 5,
|
||||
"leak_speed": 10000000000,
|
||||
"Reprocess": true,
|
||||
"Labels": {
|
||||
"remediation": "true",
|
||||
"service": "http",
|
||||
"type": "scan"
|
||||
}
|
||||
},
|
||||
"Time": "0001-01-01T00:00:00Z",
|
||||
"StrTime": "",
|
||||
"MarshaledTime": "",
|
||||
"Process": true
|
||||
}
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
- filename: ./hub/parsers/s00-raw/crowdsecurity/enrich.yaml
|
||||
stage: s00-raw
|
|
@ -1,6 +0,0 @@
|
|||
- filename: ./hub/scenarios/crowdsecurity/basic-consensus.yaml
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
select count(*) == 1 from signal_occurences where source_ip = "31.222.187.197" and scenario = "base_consensus"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "31.222.187.197" and scenario = "specialized_consensus"
|
||||
|
||||
|
||||
|
||||
|
||||
|
File diff suppressed because it is too large
Load diff
|
@ -1,2 +0,0 @@
|
|||
- filename: ./hub/parsers/s00-raw/crowdsecurity/enrich.yaml
|
||||
stage: s00-raw
|
|
@ -1,6 +0,0 @@
|
|||
- filename: ./hub/scenarios/crowdsecurity/consensus-trust-factor.yaml
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
select count(*) == 1 from signal_occurences where source_ip = "139.199.192.143" and scenario = "consensus/strong_trust+diff_scenario"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "139.199.192.143" and scenario = "consensus/strong_trust+same_scenario"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "207.38.89.99" and scenario = "consensus/strong_trust+diff_scenario"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "207.38.89.99" and scenario = "consensus/strong_trust+same_scenario"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "51.159.56.89" and scenario = "consensus/strong_trust+diff_scenario"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "103.212.97.45" and scenario = "consensus/strong_trust+diff_scenario"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "103.212.97.45" and scenario = "consensus/strong_trust+same_scenario"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "35.180.132.238" and scenario = "consensus/strong_trust+diff_scenario"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "35.180.132.238" and scenario = "consensus/strong_trust+same_scenario"
|
||||
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
|
||||
{
|
||||
"Type": 0,
|
||||
"ExpectMode": 0,
|
||||
"Whitelisted": false,
|
||||
"Stage": "",
|
||||
"Enriched": {
|
||||
"machine_uuid": "user1_machine1",
|
||||
"trust_factor": "1",
|
||||
"user_uuid": "1",
|
||||
"watcher_ip": "1.2.3.4"
|
||||
},
|
||||
"Overflow": {
|
||||
"MapKey": "7e159c83f45e4cabfe4c2d8653a24ac79506a703",
|
||||
"scenario": "http_404-scan",
|
||||
"bucket_id": "morning-sea",
|
||||
"alert_message": "31.222.187.197 performed 'http_404-scan' (6 events over 2s) at 2020-01-02 15:31:32 +0000 UTC",
|
||||
"events_count": 6,
|
||||
"start_at": "2020-01-02T15:31:30Z",
|
||||
"ban_applications": [
|
||||
{
|
||||
"MeasureType": "ban",
|
||||
"MeasureExtra": "",
|
||||
"Until": "2020-01-02T19:31:32Z",
|
||||
"StartIp": 1781924660,
|
||||
"EndIp": 1781924660,
|
||||
"IpText": "31.222.187.197",
|
||||
"Reason": "ban on ip 31.222.187.197",
|
||||
"Scenario": "",
|
||||
"SignalOccurenceID": 985
|
||||
}
|
||||
],
|
||||
"stop_at": "2020-01-14T06:44:14Z",
|
||||
"Source_ip": "31.222.187.197",
|
||||
"Source_range": "\u003cnil\u003e",
|
||||
"Source_AutonomousSystemNumber": "0",
|
||||
"Source_AutonomousSystemOrganization": "",
|
||||
"Source_Country": "CN",
|
||||
"Source_Latitude": 39.92890167236328,
|
||||
"Source_Longitude": 116.38829803466797,
|
||||
"sources": {
|
||||
"31.222.187.197": {
|
||||
"Ip": "31.222.187.197",
|
||||
"Range": {
|
||||
"IP": "",
|
||||
"Mask": null
|
||||
},
|
||||
"AutonomousSystemNumber": "0",
|
||||
"AutonomousSystemOrganization": "",
|
||||
"Country": "CN",
|
||||
"Latitude": 39.92890167236328,
|
||||
"Longitude": 116.38829803466797,
|
||||
"Flags": null
|
||||
}
|
||||
},
|
||||
"capacity": 5,
|
||||
"leak_speed": 10000000000,
|
||||
"Reprocess": true,
|
||||
"Labels": {
|
||||
"remediation": "true",
|
||||
"service": "http",
|
||||
"type": "scan"
|
||||
}
|
||||
},
|
||||
"Time": "0001-01-01T00:00:00Z",
|
||||
"StrTime": "",
|
||||
"MarshaledTime": "",
|
||||
"Process": true
|
||||
}
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
- filename: ./hub/parsers/s00-raw/crowdsecurity/enrich.yaml
|
||||
stage: s00-raw
|
|
@ -1,6 +0,0 @@
|
|||
- filename: ./hub/scenarios/crowdsecurity/consensus-trust-factor.yaml
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
select count(*) == 1 from signal_occurences where source_ip = "31.222.187.197" and scenario = "base_consensus"
|
||||
select count(*) == 1 from signal_occurences where source_ip = "31.222.187.197" and scenario = "specialized_consensus"
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
# scenario tests
|
||||
|
||||
```
|
||||
$ make build
|
||||
$ cd tests/.../
|
||||
$ git clone git@github.com:JohnDoeCrowdSec/hub.git hub
|
||||
$ ./cracra.sh -all
|
||||
```
|
||||
|
||||
For the tests to run :
|
||||
- crowdsec must be built
|
||||
- ./hub/ must be a valid hub directory (ie `git clone git@github.com:JohnDoeCrowdSec/hub.git hub`)
|
||||
|
||||
Each test is a directory starting by `0` containing :
|
||||
- a logfile `file.log`
|
||||
- a list of enabled parsers `parsers.yaml`
|
||||
- a list of enabled scenarios `scenarios.yaml`
|
||||
- a `success.sqlite` file that is a list of sqlite commands that must run successfuly
|
||||
- a `label` file containing the label of the input file (ie. `type:syslog` or `prog_name:nginx`)
|
||||
|
||||
A test is successfull when the agent, started with said parsers.yaml,scenarios.yaml,postoverflows.yaml produces a sqlite database conform to success.sqlite after being injected with the `file.log` in time-machine mode.
|
||||
|
||||
## parsers.yaml
|
||||
|
||||
As tests are run using time-machine mode, the `timemachine.yaml` parsers is mandatory or you will be getting errors.
|
||||
|
||||
```
|
||||
$ cat 01ssh/parsers.yaml
|
||||
- filename: ./hub/parsers/s00-raw/crowdsec/syslog-parse.yaml
|
||||
stage: s00-raw
|
||||
- filename: ./hub/parsers/s01-parse/crowdsec/sshd-logs.yaml
|
||||
stage: s01-parse
|
||||
- filename: ./hub/parsers/s02-enrich/crowdsec/timemachine.yaml
|
||||
stage: s02-enrich
|
||||
```
|
||||
|
||||
postoverflows and scenarios follows the same logic.
|
|
@ -1,5 +0,0 @@
|
|||
name: sqlite
|
||||
path: ./plugins/backend/sqlite.so
|
||||
config:
|
||||
db_path: ./test.db
|
||||
flush: true
|
|
@ -1,106 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
CWCMD="../../cmd/crowdsec/crowdsec"
|
||||
PLUGINS_FOLDER="../../plugins"
|
||||
PLUGINS_FOLDER_BACKEND="./plugins/backend/"
|
||||
|
||||
dostuff() {
|
||||
|
||||
STEP=${1}
|
||||
|
||||
|
||||
if [[ "${STEP}" == *consensus_* ]] ; then
|
||||
cat > ./acquis.yaml << EOF
|
||||
mode: cat
|
||||
type: bin
|
||||
filename: ${STEP}/file.log
|
||||
labels:
|
||||
type: consensus
|
||||
EOF
|
||||
|
||||
EXTRA=""
|
||||
if [ -f "./buckets_state.json" ] ; then
|
||||
echo "Reusing existing bucket state"
|
||||
EXTRA="-restore-state ./buckets_state.json"
|
||||
else
|
||||
echo "Creating new bucket state"
|
||||
fi;
|
||||
|
||||
${CWCMD} -c ./dev.yaml -acquis ./acquis.yaml ${EXTRA} -custom-config "parser:${STEP}/parsers.yaml,scenario:${STEP}/scenarios.yaml" -dump-state
|
||||
|
||||
else
|
||||
|
||||
|
||||
SCENAR=${1}
|
||||
FILE_LABELS=$(cat ${SCENAR}"/labels" 2>/dev/null)
|
||||
|
||||
rm "./test.db"
|
||||
cat > ./acquis.yaml << EOF
|
||||
mode: cat
|
||||
filename: ${SCENAR}/file.log
|
||||
labels:
|
||||
${FILE_LABELS}
|
||||
EOF
|
||||
|
||||
${CWCMD} -c ./dev.yaml -acquis ./acquis.yaml -custom-config "parser:${SCENAR}/parsers.yaml,scenario:${SCENAR}/scenarios.yaml"
|
||||
fi;
|
||||
|
||||
success=0
|
||||
echo "Checking results"
|
||||
# check results
|
||||
while read sqq ; do
|
||||
if [ -z "${sqq}" ] ; then
|
||||
continue
|
||||
fi;
|
||||
success=$((${success}+1))
|
||||
|
||||
if [ `echo ${sqq} | sqlite3 ./test.db` -eq "1" ] ; then
|
||||
echo "OK : ${sqq}" ;
|
||||
else
|
||||
echo "FAILED : ${1} ${sqq}";
|
||||
echo "IN logs : ${1}/file.log"
|
||||
echo "Expected : ${1}/success.sqlite"
|
||||
echo "Failed sql query : ${sqq}"
|
||||
echo "Full log : out.log"
|
||||
exit
|
||||
fi
|
||||
done < ${1}/success.sqlite
|
||||
|
||||
|
||||
echo "Done testing ${success} tests runned"
|
||||
|
||||
}
|
||||
|
||||
# Still cracra, but build the plugins and move them in ./plugins
|
||||
CWD=$(pwd)
|
||||
cd ../..
|
||||
bash ./scripts/build_plugins.sh
|
||||
cd $CWD
|
||||
mkdir -p "$PLUGINS_FOLDER_BACKEND"
|
||||
cp -r ../../plugins/backend/*.so "$PLUGINS_FOLDER_BACKEND"
|
||||
# Cracra finished
|
||||
|
||||
###
|
||||
|
||||
if [ -z ${1} ] ; then
|
||||
echo "${0} [-all|/path/to/test]"
|
||||
echo " /path/to/test : path to test directory (ie. ./01ssh/)"
|
||||
echo " -all : run all tests"
|
||||
echo " **./hub/** must be up-to-date hub directory/symlink (ie. hub clone)"
|
||||
exit;
|
||||
fi;
|
||||
|
||||
case ${1} in
|
||||
"-all")
|
||||
for i in `find . -mindepth 1 -type d -iname "0*"` ;
|
||||
do
|
||||
echo "Testing ${i}";
|
||||
dostuff $i ;
|
||||
done
|
||||
;;
|
||||
*)
|
||||
echo "Testing ${1}";
|
||||
dostuff $1 ;
|
||||
;;
|
||||
esac
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
working_dir: "."
|
||||
data_dir: "../../data/"
|
||||
config_dir: "../../config/"
|
||||
pid_dir: "./"
|
||||
log_dir: "./"
|
||||
log_mode: "stdout"
|
||||
log_level: info
|
||||
profiling: false
|
||||
sqlite_path: "./test.db"
|
||||
apimode: false
|
||||
plugin:
|
||||
backend: "./backend/"
|
Binary file not shown.
|
@ -289,9 +289,9 @@ install_crowdsec() {
|
|||
install -v -m 755 -D ./config/profiles.yaml "${CROWDSEC_CONFIG_PATH}" || exit
|
||||
install -v -m 600 -D ./config/api.yaml "${CROWDSEC_CONFIG_PATH}" || exit
|
||||
mkdir -p ${PID_DIR} || exit
|
||||
PID=${PID_DIR} DATA=${CROWDSEC_DATA_DIR} CFG=${CROWDSEC_CONFIG_PATH} envsubst < ./config/prod.yaml > ${CROWDSEC_CONFIG_PATH}"/default.yaml"
|
||||
PID=${PID_DIR} DATA=${CROWDSEC_DATA_DIR} CFG=${CROWDSEC_CONFIG_PATH} envsubst < ./config/user.yaml > ${CROWDSEC_CONFIG_PATH}"/user.yaml"
|
||||
CFG=${CROWDSEC_CONFIG_PATH} PID=${PID_DIR} BIN=${CROWDSEC_BIN_INSTALLED} envsubst < ./config/crowdsec.service > "${SYSTEMD_PATH_FILE}"
|
||||
PID=${PID_DIR} DATA=${CROWDSEC_DATA_DIR} CFG=${CROWDSEC_CONFIG_PATH} envsubst '$CFG $PID $DATA' < ./config/prod.yaml > ${CROWDSEC_CONFIG_PATH}"/default.yaml"
|
||||
PID=${PID_DIR} DATA=${CROWDSEC_DATA_DIR} CFG=${CROWDSEC_CONFIG_PATH} envsubst '$CFG $PID $DATA' < ./config/user.yaml > ${CROWDSEC_CONFIG_PATH}"/user.yaml"
|
||||
CFG=${CROWDSEC_CONFIG_PATH} PID=${PID_DIR} BIN=${CROWDSEC_BIN_INSTALLED} envsubst '$CFG $PID $BIN' < ./config/crowdsec.service > "${SYSTEMD_PATH_FILE}"
|
||||
install_bins
|
||||
systemctl daemon-reload
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue