|
@@ -1,6 +1,7 @@
|
|
package main
|
|
package main
|
|
|
|
|
|
import (
|
|
import (
|
|
|
|
+ "context"
|
|
"fmt"
|
|
"fmt"
|
|
"os"
|
|
"os"
|
|
"path/filepath"
|
|
"path/filepath"
|
|
@@ -13,8 +14,8 @@ import (
|
|
"github.com/crowdsecurity/go-cs-lib/trace"
|
|
"github.com/crowdsecurity/go-cs-lib/trace"
|
|
|
|
|
|
"github.com/crowdsecurity/crowdsec/pkg/acquisition"
|
|
"github.com/crowdsecurity/crowdsec/pkg/acquisition"
|
|
- "github.com/crowdsecurity/crowdsec/pkg/appsec"
|
|
|
|
"github.com/crowdsecurity/crowdsec/pkg/alertcontext"
|
|
"github.com/crowdsecurity/crowdsec/pkg/alertcontext"
|
|
|
|
+ "github.com/crowdsecurity/crowdsec/pkg/appsec"
|
|
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
|
|
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
|
|
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
|
|
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
|
|
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
|
|
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
|
|
@@ -56,63 +57,86 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H
|
|
|
|
|
|
//start go-routines for parsing, buckets pour and outputs.
|
|
//start go-routines for parsing, buckets pour and outputs.
|
|
parserWg := &sync.WaitGroup{}
|
|
parserWg := &sync.WaitGroup{}
|
|
|
|
+
|
|
parsersTomb.Go(func() error {
|
|
parsersTomb.Go(func() error {
|
|
parserWg.Add(1)
|
|
parserWg.Add(1)
|
|
|
|
+
|
|
for i := 0; i < cConfig.Crowdsec.ParserRoutinesCount; i++ {
|
|
for i := 0; i < cConfig.Crowdsec.ParserRoutinesCount; i++ {
|
|
parsersTomb.Go(func() error {
|
|
parsersTomb.Go(func() error {
|
|
defer trace.CatchPanic("crowdsec/runParse")
|
|
defer trace.CatchPanic("crowdsec/runParse")
|
|
|
|
+
|
|
if err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes); err != nil { //this error will never happen as parser.Parse is not able to return errors
|
|
if err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes); err != nil { //this error will never happen as parser.Parse is not able to return errors
|
|
log.Fatalf("starting parse error : %s", err)
|
|
log.Fatalf("starting parse error : %s", err)
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
|
|
+
|
|
return nil
|
|
return nil
|
|
})
|
|
})
|
|
}
|
|
}
|
|
parserWg.Done()
|
|
parserWg.Done()
|
|
|
|
+
|
|
return nil
|
|
return nil
|
|
})
|
|
})
|
|
parserWg.Wait()
|
|
parserWg.Wait()
|
|
|
|
|
|
bucketWg := &sync.WaitGroup{}
|
|
bucketWg := &sync.WaitGroup{}
|
|
|
|
+
|
|
bucketsTomb.Go(func() error {
|
|
bucketsTomb.Go(func() error {
|
|
bucketWg.Add(1)
|
|
bucketWg.Add(1)
|
|
/*restore previous state as well if present*/
|
|
/*restore previous state as well if present*/
|
|
if cConfig.Crowdsec.BucketStateFile != "" {
|
|
if cConfig.Crowdsec.BucketStateFile != "" {
|
|
log.Warningf("Restoring buckets state from %s", cConfig.Crowdsec.BucketStateFile)
|
|
log.Warningf("Restoring buckets state from %s", cConfig.Crowdsec.BucketStateFile)
|
|
|
|
+
|
|
if err := leaky.LoadBucketsState(cConfig.Crowdsec.BucketStateFile, buckets, holders); err != nil {
|
|
if err := leaky.LoadBucketsState(cConfig.Crowdsec.BucketStateFile, buckets, holders); err != nil {
|
|
- return fmt.Errorf("unable to restore buckets : %s", err)
|
|
|
|
|
|
+ return fmt.Errorf("unable to restore buckets: %w", err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
for i := 0; i < cConfig.Crowdsec.BucketsRoutinesCount; i++ {
|
|
for i := 0; i < cConfig.Crowdsec.BucketsRoutinesCount; i++ {
|
|
bucketsTomb.Go(func() error {
|
|
bucketsTomb.Go(func() error {
|
|
defer trace.CatchPanic("crowdsec/runPour")
|
|
defer trace.CatchPanic("crowdsec/runPour")
|
|
|
|
+
|
|
if err := runPour(inputEventChan, holders, buckets, cConfig); err != nil {
|
|
if err := runPour(inputEventChan, holders, buckets, cConfig); err != nil {
|
|
log.Fatalf("starting pour error : %s", err)
|
|
log.Fatalf("starting pour error : %s", err)
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
|
|
+
|
|
return nil
|
|
return nil
|
|
})
|
|
})
|
|
}
|
|
}
|
|
bucketWg.Done()
|
|
bucketWg.Done()
|
|
|
|
+
|
|
return nil
|
|
return nil
|
|
})
|
|
})
|
|
bucketWg.Wait()
|
|
bucketWg.Wait()
|
|
|
|
|
|
|
|
+ apiClient, err := AuthenticatedLAPIClient(*cConfig.API.Client.Credentials, hub)
|
|
|
|
+ if err != nil {
|
|
|
|
+ return err
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ log.Debugf("Starting HeartBeat service")
|
|
|
|
+ apiClient.HeartBeat.StartHeartBeat(context.Background(), &outputsTomb)
|
|
|
|
+
|
|
outputWg := &sync.WaitGroup{}
|
|
outputWg := &sync.WaitGroup{}
|
|
|
|
+
|
|
outputsTomb.Go(func() error {
|
|
outputsTomb.Go(func() error {
|
|
outputWg.Add(1)
|
|
outputWg.Add(1)
|
|
|
|
+
|
|
for i := 0; i < cConfig.Crowdsec.OutputRoutinesCount; i++ {
|
|
for i := 0; i < cConfig.Crowdsec.OutputRoutinesCount; i++ {
|
|
outputsTomb.Go(func() error {
|
|
outputsTomb.Go(func() error {
|
|
defer trace.CatchPanic("crowdsec/runOutput")
|
|
defer trace.CatchPanic("crowdsec/runOutput")
|
|
- if err := runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, *cConfig.API.Client.Credentials, hub); err != nil {
|
|
|
|
|
|
+
|
|
|
|
+ if err := runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, apiClient); err != nil {
|
|
log.Fatalf("starting outputs error : %s", err)
|
|
log.Fatalf("starting outputs error : %s", err)
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
|
|
+
|
|
return nil
|
|
return nil
|
|
})
|
|
})
|
|
}
|
|
}
|
|
outputWg.Done()
|
|
outputWg.Done()
|
|
|
|
+
|
|
return nil
|
|
return nil
|
|
})
|
|
})
|
|
outputWg.Wait()
|
|
outputWg.Wait()
|
|
@@ -122,16 +146,16 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H
|
|
if cConfig.Prometheus.Level == "aggregated" {
|
|
if cConfig.Prometheus.Level == "aggregated" {
|
|
aggregated = true
|
|
aggregated = true
|
|
}
|
|
}
|
|
|
|
+
|
|
if err := acquisition.GetMetrics(dataSources, aggregated); err != nil {
|
|
if err := acquisition.GetMetrics(dataSources, aggregated); err != nil {
|
|
return fmt.Errorf("while fetching prometheus metrics for datasources: %w", err)
|
|
return fmt.Errorf("while fetching prometheus metrics for datasources: %w", err)
|
|
}
|
|
}
|
|
-
|
|
|
|
}
|
|
}
|
|
|
|
+
|
|
log.Info("Starting processing data")
|
|
log.Info("Starting processing data")
|
|
|
|
|
|
if err := acquisition.StartAcquisition(dataSources, inputLineChan, &acquisTomb); err != nil {
|
|
if err := acquisition.StartAcquisition(dataSources, inputLineChan, &acquisTomb); err != nil {
|
|
- log.Fatalf("starting acquisition error : %s", err)
|
|
|
|
- return err
|
|
|
|
|
|
+ return fmt.Errorf("starting acquisition error: %w", err)
|
|
}
|
|
}
|
|
|
|
|
|
return nil
|
|
return nil
|
|
@@ -140,11 +164,13 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H
|
|
func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub.Hub, agentReady chan bool) {
|
|
func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub.Hub, agentReady chan bool) {
|
|
crowdsecTomb.Go(func() error {
|
|
crowdsecTomb.Go(func() error {
|
|
defer trace.CatchPanic("crowdsec/serveCrowdsec")
|
|
defer trace.CatchPanic("crowdsec/serveCrowdsec")
|
|
|
|
+
|
|
go func() {
|
|
go func() {
|
|
defer trace.CatchPanic("crowdsec/runCrowdsec")
|
|
defer trace.CatchPanic("crowdsec/runCrowdsec")
|
|
// this logs every time, even at config reload
|
|
// this logs every time, even at config reload
|
|
log.Debugf("running agent after %s ms", time.Since(crowdsecT0))
|
|
log.Debugf("running agent after %s ms", time.Since(crowdsecT0))
|
|
agentReady <- true
|
|
agentReady <- true
|
|
|
|
+
|
|
if err := runCrowdsec(cConfig, parsers, hub); err != nil {
|
|
if err := runCrowdsec(cConfig, parsers, hub); err != nil {
|
|
log.Fatalf("unable to start crowdsec routines: %s", err)
|
|
log.Fatalf("unable to start crowdsec routines: %s", err)
|
|
}
|
|
}
|
|
@@ -156,16 +182,20 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub
|
|
*/
|
|
*/
|
|
waitOnTomb()
|
|
waitOnTomb()
|
|
log.Debugf("Shutting down crowdsec routines")
|
|
log.Debugf("Shutting down crowdsec routines")
|
|
|
|
+
|
|
if err := ShutdownCrowdsecRoutines(); err != nil {
|
|
if err := ShutdownCrowdsecRoutines(); err != nil {
|
|
log.Fatalf("unable to shutdown crowdsec routines: %s", err)
|
|
log.Fatalf("unable to shutdown crowdsec routines: %s", err)
|
|
}
|
|
}
|
|
|
|
+
|
|
log.Debugf("everything is dead, return crowdsecTomb")
|
|
log.Debugf("everything is dead, return crowdsecTomb")
|
|
|
|
+
|
|
if dumpStates {
|
|
if dumpStates {
|
|
dumpParserState()
|
|
dumpParserState()
|
|
dumpOverflowState()
|
|
dumpOverflowState()
|
|
dumpBucketsPour()
|
|
dumpBucketsPour()
|
|
os.Exit(0)
|
|
os.Exit(0)
|
|
}
|
|
}
|
|
|
|
+
|
|
return nil
|
|
return nil
|
|
})
|
|
})
|
|
}
|
|
}
|
|
@@ -175,55 +205,65 @@ func dumpBucketsPour() {
|
|
if err != nil {
|
|
if err != nil {
|
|
log.Fatalf("open: %s", err)
|
|
log.Fatalf("open: %s", err)
|
|
}
|
|
}
|
|
|
|
+
|
|
out, err := yaml.Marshal(leaky.BucketPourCache)
|
|
out, err := yaml.Marshal(leaky.BucketPourCache)
|
|
if err != nil {
|
|
if err != nil {
|
|
log.Fatalf("marshal: %s", err)
|
|
log.Fatalf("marshal: %s", err)
|
|
}
|
|
}
|
|
|
|
+
|
|
b, err := fd.Write(out)
|
|
b, err := fd.Write(out)
|
|
if err != nil {
|
|
if err != nil {
|
|
log.Fatalf("write: %s", err)
|
|
log.Fatalf("write: %s", err)
|
|
}
|
|
}
|
|
|
|
+
|
|
log.Tracef("wrote %d bytes", b)
|
|
log.Tracef("wrote %d bytes", b)
|
|
|
|
+
|
|
if err := fd.Close(); err != nil {
|
|
if err := fd.Close(); err != nil {
|
|
log.Fatalf(" close: %s", err)
|
|
log.Fatalf(" close: %s", err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
func dumpParserState() {
|
|
func dumpParserState() {
|
|
-
|
|
|
|
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
|
|
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
|
|
if err != nil {
|
|
if err != nil {
|
|
log.Fatalf("open: %s", err)
|
|
log.Fatalf("open: %s", err)
|
|
}
|
|
}
|
|
|
|
+
|
|
out, err := yaml.Marshal(parser.StageParseCache)
|
|
out, err := yaml.Marshal(parser.StageParseCache)
|
|
if err != nil {
|
|
if err != nil {
|
|
log.Fatalf("marshal: %s", err)
|
|
log.Fatalf("marshal: %s", err)
|
|
}
|
|
}
|
|
|
|
+
|
|
b, err := fd.Write(out)
|
|
b, err := fd.Write(out)
|
|
if err != nil {
|
|
if err != nil {
|
|
log.Fatalf("write: %s", err)
|
|
log.Fatalf("write: %s", err)
|
|
}
|
|
}
|
|
|
|
+
|
|
log.Tracef("wrote %d bytes", b)
|
|
log.Tracef("wrote %d bytes", b)
|
|
|
|
+
|
|
if err := fd.Close(); err != nil {
|
|
if err := fd.Close(); err != nil {
|
|
log.Fatalf(" close: %s", err)
|
|
log.Fatalf(" close: %s", err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
func dumpOverflowState() {
|
|
func dumpOverflowState() {
|
|
-
|
|
|
|
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
|
|
fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
|
|
if err != nil {
|
|
if err != nil {
|
|
log.Fatalf("open: %s", err)
|
|
log.Fatalf("open: %s", err)
|
|
}
|
|
}
|
|
|
|
+
|
|
out, err := yaml.Marshal(bucketOverflows)
|
|
out, err := yaml.Marshal(bucketOverflows)
|
|
if err != nil {
|
|
if err != nil {
|
|
log.Fatalf("marshal: %s", err)
|
|
log.Fatalf("marshal: %s", err)
|
|
}
|
|
}
|
|
|
|
+
|
|
b, err := fd.Write(out)
|
|
b, err := fd.Write(out)
|
|
if err != nil {
|
|
if err != nil {
|
|
log.Fatalf("write: %s", err)
|
|
log.Fatalf("write: %s", err)
|
|
}
|
|
}
|
|
|
|
+
|
|
log.Tracef("wrote %d bytes", b)
|
|
log.Tracef("wrote %d bytes", b)
|
|
|
|
+
|
|
if err := fd.Close(); err != nil {
|
|
if err := fd.Close(); err != nil {
|
|
log.Fatalf(" close: %s", err)
|
|
log.Fatalf(" close: %s", err)
|
|
}
|
|
}
|