瀏覽代碼

(finally) cleanup a bit the main and split code for reuse

Thibault bui Koechlin 5 年之前
父節點
當前提交
9ed5743603
共有 3 個文件被更改,包括 200 次插入174 次删除
  1. 163 165
      cmd/crowdsec/main.go
  2. 35 9
      cmd/crowdsec/serve.go
  3. 2 0
      pkg/csconfig/config.go

+ 163 - 165
cmd/crowdsec/main.go

@@ -1,9 +1,7 @@
 package main
 package main
 
 
 import (
 import (
-	"strings"
-
-	"io/ioutil"
+	"fmt"
 
 
 	_ "net/http/pprof"
 	_ "net/http/pprof"
 	"time"
 	"time"
@@ -19,7 +17,6 @@ import (
 	log "github.com/sirupsen/logrus"
 	log "github.com/sirupsen/logrus"
 
 
 	"gopkg.in/tomb.v2"
 	"gopkg.in/tomb.v2"
-	"gopkg.in/yaml.v2"
 )
 )
 
 
 var (
 var (
@@ -28,72 +25,42 @@ var (
 	parsersTomb tomb.Tomb
 	parsersTomb tomb.Tomb
 	bucketsTomb tomb.Tomb
 	bucketsTomb tomb.Tomb
 	outputsTomb tomb.Tomb
 	outputsTomb tomb.Tomb
-
-	holders []leaky.BucketFactory
-	buckets *leaky.Buckets
+	/*global crowdsec config*/
 	cConfig *csconfig.CrowdSec
 	cConfig *csconfig.CrowdSec
-
-	OutputRunner *outputs.Output
-
+	/*the state of acquisition*/
+	acquisitionCTX *acquisition.FileAcquisCtx
+	/*the state of the buckets*/
+	holders         []leaky.BucketFactory
+	buckets         *leaky.Buckets
+	outputEventChan chan types.Event //the buckets init returns its own chan that is used for multiplexing
+	/*the state of outputs*/
+	OutputRunner   *outputs.Output
+	outputProfiles []types.Profile
+	/*the state of the parsers*/
+	parserCTX         *parser.UnixParserCtx
+	postOverflowCTX   *parser.UnixParserCtx
+	parserNodes       []parser.Node
+	postOverflowNodes []parser.Node
 	/*settings*/
 	/*settings*/
 	lastProcessedItem time.Time /*keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them.*/
 	lastProcessedItem time.Time /*keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them.*/
 )
 )
 
 
-func main() {
-	var (
-		err                 error
-		p                   parser.UnixParser
-		parserNodes         []parser.Node = make([]parser.Node, 0)
-		postOverflowNodes   []parser.Node = make([]parser.Node, 0)
-		nbParser            int           = 1
-		parserCTX           *parser.UnixParserCtx
-		postOverflowCTX     *parser.UnixParserCtx
-		acquisitionCTX      *acquisition.FileAcquisCtx
-		CustomParsers       []parser.Stagefile
-		CustomPostoverflows []parser.Stagefile
-		CustomScenarios     []parser.Stagefile
-		outputEventChan     chan types.Event
-	)
-
-	inputLineChan := make(chan types.Event)
-	inputEventChan := make(chan types.Event)
-
-	cConfig = csconfig.NewCrowdSecConfig()
-
-	// Handle command line arguments
-	if err := cConfig.GetOPT(); err != nil {
-		log.Fatalf(err.Error())
-	}
-
-	if err = types.SetDefaultLoggerConfig(cConfig.LogMode, cConfig.LogFolder, cConfig.LogLevel); err != nil {
-		log.Fatal(err.Error())
-	}
-
-	log.Infof("Crowdwatch %s", cwversion.VersionStr())
+func LoadParsers(cConfig *csconfig.CrowdSec) error {
+	var p parser.UnixParser
+	var err error
 
 
-	if cConfig.Prometheus {
-		registerPrometheus()
-		cConfig.Profiling = true
-	}
+	parserNodes = make([]parser.Node, 0)
+	postOverflowNodes = make([]parser.Node, 0)
 
 
 	log.Infof("Loading grok library")
 	log.Infof("Loading grok library")
 	/* load base regexps for two grok parsers */
 	/* load base regexps for two grok parsers */
 	parserCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/"), "data": cConfig.DataFolder})
 	parserCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/"), "data": cConfig.DataFolder})
 	if err != nil {
 	if err != nil {
-		log.Errorf("failed to initialize parser : %v", err)
-		return
+		return fmt.Errorf("failed to load parser patterns : %v", err)
 	}
 	}
 	postOverflowCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/"), "data": cConfig.DataFolder})
 	postOverflowCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/"), "data": cConfig.DataFolder})
 	if err != nil {
 	if err != nil {
-		log.Errorf("failed to initialize postoverflow : %v", err)
-		return
-	}
-
-	/*enable profiling*/
-	if cConfig.Profiling {
-		go runTachymeter(cConfig.HTTPListen)
-		parserCTX.Profiling = true
-		postOverflowCTX.Profiling = true
+		return fmt.Errorf("failed to load postovflw parser patterns : %v", err)
 	}
 	}
 
 
 	/*
 	/*
@@ -102,92 +69,37 @@ func main() {
 	log.Infof("Loading enrich plugins")
 	log.Infof("Loading enrich plugins")
 	parserPlugins, err := parser.Loadplugin(cConfig.DataFolder)
 	parserPlugins, err := parser.Loadplugin(cConfig.DataFolder)
 	if err != nil {
 	if err != nil {
-		log.Errorf("Failed to load plugin geoip : %v", err)
+		return fmt.Errorf("Failed to load enrich plugin : %v", err)
 	}
 	}
-	parser.ECTX = append(parser.ECTX, parserPlugins)
-
-	/*parser the validatormode option if present. mostly used for testing purposes*/
-	if cConfig.ValidatorMode != "" {
-		//beurk : provided 'parser:file.yaml,postoverflow:file.yaml,scenario:file.yaml load only those
-		validators := strings.Split(cConfig.ValidatorMode, ",")
-		for _, val := range validators {
-			splittedValidator := strings.Split(val, ":")
-			if len(splittedValidator) != 2 {
-				log.Fatalf("parser:file,scenario:file,postoverflow:file")
-			}
-
-			configType := splittedValidator[0]
-			configFile := splittedValidator[1]
-
-			var parsedFile []parser.Stagefile
-			dataFile, err := ioutil.ReadFile(configFile)
+	parser.ECTX = []parser.EnricherCtx{parserPlugins}
 
 
-			if err != nil {
-				log.Fatalf("failed opening %s : %s", configFile, err)
-			}
-			if err := yaml.UnmarshalStrict(dataFile, &parsedFile); err != nil {
-				log.Fatalf("failed unmarshalling %s : %s", configFile, err)
-			}
-			switch configType {
-			case "parser":
-				CustomParsers = parsedFile
-			case "scenario":
-				CustomScenarios = parsedFile
-			case "postoverflow":
-				CustomPostoverflows = parsedFile
-			default:
-				log.Fatalf("wrong type, format is parser:file,scenario:file,postoverflow:file")
-			}
+	/*
+	 Load the actual parsers
+	*/
 
 
-		}
-	}
+	log.Infof("Loading parsers")
+	parserNodes, err = parser.LoadStageDir(cConfig.ConfigFolder+"/parsers/", parserCTX)
 
 
-	/* load the parser nodes */
-	if cConfig.ValidatorMode != "" && len(CustomParsers) > 0 {
-		log.Infof("Loading (validatormode) parsers")
-		parserNodes, err = parser.LoadStages(CustomParsers, parserCTX)
-	} else {
-		log.Infof("Loading parsers")
-		parserNodes, err = parser.LoadStageDir(cConfig.ConfigFolder+"/parsers/", parserCTX)
-	}
 	if err != nil {
 	if err != nil {
-		log.Fatalf("failed to load parser config : %v", err)
-	}
-	/* parsers loaded */
-
-	/* load the post-overflow stages*/
-	if cConfig.ValidatorMode != "" && len(CustomPostoverflows) > 0 {
-		log.Infof("Loading (validatormode) postoverflow parsers")
-		postOverflowNodes, err = parser.LoadStages(CustomPostoverflows, postOverflowCTX)
-	} else {
-		log.Infof("Loading postoverflow parsers")
-		postOverflowNodes, err = parser.LoadStageDir(cConfig.ConfigFolder+"/postoverflows/", postOverflowCTX)
-	}
-	if err != nil {
-		log.Fatalf("failed to load postoverflow config : %v", err)
+		return fmt.Errorf("failed to load parser config : %v", err)
 	}
 	}
 
 
-	log.Infof("Loaded Nodes : %d parser, %d postoverflow", len(parserNodes), len(postOverflowNodes))
-	/* post overflow loaded */
+	log.Infof("Loading postoverflow parsers")
+	postOverflowNodes, err = parser.LoadStageDir(cConfig.ConfigFolder+"/postoverflows/", postOverflowCTX)
 
 
-	/* Loading buckets / scenarios */
-	if cConfig.ValidatorMode != "" && len(CustomScenarios) > 0 {
-		log.Infof("Loading (validatormode) scenarios")
-		bucketFiles := []string{}
-		for _, scenarios := range CustomScenarios {
-			bucketFiles = append(bucketFiles, scenarios.Filename)
-		}
-		holders, outputEventChan, err = leaky.LoadBuckets(bucketFiles, cConfig.DataFolder)
-
-	} else {
-		log.Infof("Loading scenarios")
-		holders, outputEventChan, err = leaky.Init(map[string]string{"patterns": cConfig.ConfigFolder + "/scenarios/", "data": cConfig.DataFolder})
-	}
 	if err != nil {
 	if err != nil {
-		log.Fatalf("Scenario loading failed : %v", err)
+		return fmt.Errorf("failed to load postoverflow config : %v", err)
 	}
 	}
-	/* buckets/scenarios loaded */
 
 
+	if cConfig.Profiling {
+		parserCTX.Profiling = true
+		postOverflowCTX.Profiling = true
+	}
+
+	return nil
+}
+
+func GetEnabledScenarios() string {
 	/*keep track of scenarios name for consensus profiling*/
 	/*keep track of scenarios name for consensus profiling*/
 	var scenariosEnabled string
 	var scenariosEnabled string
 	for _, x := range holders {
 	for _, x := range holders {
@@ -196,39 +108,50 @@ func main() {
 		}
 		}
 		scenariosEnabled += x.Name
 		scenariosEnabled += x.Name
 	}
 	}
+	return scenariosEnabled
+}
+
+func LoadBuckets(cConfig *csconfig.CrowdSec) error {
 
 
+	var err error
+
+	log.Infof("Loading scenarios")
+	holders, outputEventChan, err = leaky.Init(map[string]string{"patterns": cConfig.ConfigFolder + "/scenarios/", "data": cConfig.DataFolder})
+
+	if err != nil {
+		return fmt.Errorf("Scenario loading failed : %v", err)
+	}
 	buckets = leaky.NewBuckets()
 	buckets = leaky.NewBuckets()
 
 
 	/*restore as well previous state if present*/
 	/*restore as well previous state if present*/
 	if cConfig.RestoreMode != "" {
 	if cConfig.RestoreMode != "" {
 		log.Warningf("Restoring buckets state from %s", cConfig.RestoreMode)
 		log.Warningf("Restoring buckets state from %s", cConfig.RestoreMode)
 		if err := leaky.LoadBucketsState(cConfig.RestoreMode, buckets, holders); err != nil {
 		if err := leaky.LoadBucketsState(cConfig.RestoreMode, buckets, holders); err != nil {
-			log.Fatalf("unable to restore buckets : %s", err)
+			return fmt.Errorf("unable to restore buckets : %s", err)
 		}
 		}
 	}
 	}
 	if cConfig.Profiling {
 	if cConfig.Profiling {
-		//force the profiling in all buckets
 		for holderIndex := range holders {
 		for holderIndex := range holders {
 			holders[holderIndex].Profiling = true
 			holders[holderIndex].Profiling = true
 		}
 		}
 	}
 	}
+	return nil
+}
 
 
+func LoadOutputs(cConfig *csconfig.CrowdSec) error {
+	var err error
 	/*
 	/*
 		Load output profiles
 		Load output profiles
 	*/
 	*/
 	log.Infof("Loading output profiles")
 	log.Infof("Loading output profiles")
-	outputProfiles, err := outputs.LoadOutputProfiles(cConfig.ConfigFolder + "/profiles.yaml")
+	outputProfiles, err = outputs.LoadOutputProfiles(cConfig.ConfigFolder + "/profiles.yaml")
 	if err != nil || len(outputProfiles) == 0 {
 	if err != nil || len(outputProfiles) == 0 {
-		log.Fatalf("Failed to load output profiles : %v", err)
-	}
-	/* Linting is done */
-	if cConfig.Linter {
-		return
+		return fmt.Errorf("Failed to load output profiles : %v", err)
 	}
 	}
 
 
-	OutputRunner, err := outputs.NewOutput(cConfig.OutputConfig, cConfig.Daemonize)
+	OutputRunner, err = outputs.NewOutput(cConfig.OutputConfig, cConfig.Daemonize)
 	if err != nil {
 	if err != nil {
-		log.Fatalf("output plugins initialization error : %s", err.Error())
+		return fmt.Errorf("output plugins initialization error : %s", err.Error())
 	}
 	}
 
 
 	/* Init the API connector */
 	/* Init the API connector */
@@ -236,31 +159,37 @@ func main() {
 		log.Infof("Loading API client")
 		log.Infof("Loading API client")
 		var apiConfig = map[string]string{
 		var apiConfig = map[string]string{
 			"path":    cConfig.ConfigFolder + "/api.yaml",
 			"path":    cConfig.ConfigFolder + "/api.yaml",
-			"profile": scenariosEnabled,
+			"profile": GetEnabledScenarios(),
 		}
 		}
 		if err := OutputRunner.InitAPI(apiConfig); err != nil {
 		if err := OutputRunner.InitAPI(apiConfig); err != nil {
-			log.Fatalf(err.Error())
+			return fmt.Errorf("failed to load api : %s", err)
 		}
 		}
 	}
 	}
+	return nil
+}
 
 
-	/*if the user is in "single file mode" (might be writting scenario or parsers), allow loading **without** parsers or scenarios */
-	if cConfig.SingleFile == "" {
-		if len(parserNodes) == 0 {
-			log.Fatalf("no parser(s) loaded, abort.")
-		}
+func LoadAcquisition(cConfig *csconfig.CrowdSec) error {
+	var err error
+	//Init the acqusition : from cli or from acquis.yaml file
+	acquisitionCTX, err = acquisition.LoadAcquisitionConfig(cConfig)
+	if err != nil {
+		return fmt.Errorf("Failed to start acquisition : %s", err)
+	}
+	return nil
+}
 
 
-		if len(holders) == 0 {
-			log.Fatalf("no bucket(s) loaded, abort.")
-		}
+func StartProcessingRoutines(cConfig *csconfig.CrowdSec) (chan types.Event, error) {
 
 
-		if len(outputProfiles) == 0 {
-			log.Fatalf("no output profile(s) loaded, abort.")
-		}
-	}
+	acquisTomb = tomb.Tomb{}
+	parsersTomb = tomb.Tomb{}
+	bucketsTomb = tomb.Tomb{}
+	outputsTomb = tomb.Tomb{}
+
+	inputLineChan := make(chan types.Event)
+	inputEventChan := make(chan types.Event)
 
 
-	log.Infof("Starting processing routines")
 	//start go-routines for parsing, buckets pour and ouputs.
 	//start go-routines for parsing, buckets pour and ouputs.
-	for i := 0; i < nbParser; i++ {
+	for i := 0; i < cConfig.NbParsers; i++ {
 		parsersTomb.Go(func() error {
 		parsersTomb.Go(func() error {
 			err := runParse(inputLineChan, inputEventChan, *parserCTX, parserNodes)
 			err := runParse(inputLineChan, inputEventChan, *parserCTX, parserNodes)
 			if err != nil {
 			if err != nil {
@@ -271,7 +200,7 @@ func main() {
 		})
 		})
 	}
 	}
 
 
-	for i := 0; i < nbParser; i++ {
+	for i := 0; i < cConfig.NbParsers; i++ {
 		bucketsTomb.Go(func() error {
 		bucketsTomb.Go(func() error {
 			err := runPour(inputEventChan, holders, buckets)
 			err := runPour(inputEventChan, holders, buckets)
 			if err != nil {
 			if err != nil {
@@ -282,7 +211,7 @@ func main() {
 		})
 		})
 	}
 	}
 
 
-	for i := 0; i < nbParser; i++ {
+	for i := 0; i < cConfig.NbParsers; i++ {
 		outputsTomb.Go(func() error {
 		outputsTomb.Go(func() error {
 			err := runOutput(inputEventChan, outputEventChan, holders, buckets, *postOverflowCTX, postOverflowNodes, outputProfiles, OutputRunner)
 			err := runOutput(inputEventChan, outputEventChan, holders, buckets, *postOverflowCTX, postOverflowNodes, outputProfiles, OutputRunner)
 			if err != nil {
 			if err != nil {
@@ -292,19 +221,88 @@ func main() {
 			return nil
 			return nil
 		})
 		})
 	}
 	}
+	return inputLineChan, nil
+}
 
 
-	log.Warningf("Starting processing data")
+func main() {
+	var (
+		err error
+	)
 
 
-	//Init the acqusition : from cli or from acquis.yaml file
-	acquisitionCTX, err = acquisition.LoadAcquisitionConfig(cConfig)
+	cConfig = csconfig.NewCrowdSecConfig()
+
+	// Handle command line arguments
+	if err := cConfig.GetOPT(); err != nil {
+		log.Fatalf(err.Error())
+	}
+	// Configure logging
+	if err = types.SetDefaultLoggerConfig(cConfig.LogMode, cConfig.LogFolder, cConfig.LogLevel); err != nil {
+		log.Fatal(err.Error())
+	}
+
+	log.Infof("Crowdwatch %s", cwversion.VersionStr())
+
+	// Enable profiling early
+	if cConfig.Prometheus {
+		registerPrometheus()
+		cConfig.Profiling = true
+	}
+	if cConfig.Profiling {
+		go runTachymeter(cConfig.HTTPListen)
+	}
+
+	// Start loading configs
+	if err := LoadParsers(cConfig); err != nil {
+		log.Fatalf("Failed to load parsers: %s", err)
+	}
+
+	if err := LoadBuckets(cConfig); err != nil {
+		log.Fatalf("Failed to load scenarios: %s", err)
+
+	}
+
+	if err := LoadOutputs(cConfig); err != nil {
+		log.Fatalf("failed to initialize outputs : %s", err)
+	}
+
+	if err := LoadAcquisition(cConfig); err != nil {
+		log.Fatalf("Error while loading acquisition config : %s", err)
+	}
+
+	/* if it's just linting, we're done */
+	if cConfig.Linter {
+		return
+	}
+
+	/*if the user is in "single file mode" (might be writting scenario or parsers),
+	allow loading **without** parsers or scenarios */
+	if cConfig.SingleFile == "" {
+		if len(parserNodes) == 0 {
+			log.Fatalf("no parser(s) loaded, abort.")
+		}
+
+		if len(holders) == 0 {
+			log.Fatalf("no bucket(s) loaded, abort.")
+		}
+
+		if len(outputProfiles) == 0 {
+			log.Fatalf("no output profile(s) loaded, abort.")
+		}
+	}
+
+	//Start the background routines that comunicate via chan
+	log.Infof("Starting processing routines")
+	inputLineChan, err := StartProcessingRoutines(cConfig)
 	if err != nil {
 	if err != nil {
-		log.Fatalf("Failed to start acquisition : %s", err)
+		log.Fatalf("failed to start processing routines : %s", err)
 	}
 	}
-	//start reading in the background
+
+	//Fire!
+	log.Warningf("Starting processing data")
+
 	acquisition.AcquisStartReading(acquisitionCTX, inputLineChan, &acquisTomb)
 	acquisition.AcquisStartReading(acquisitionCTX, inputLineChan, &acquisTomb)
 
 
 	if err = serve(*OutputRunner); err != nil {
 	if err = serve(*OutputRunner); err != nil {
 		log.Fatalf(err.Error())
 		log.Fatalf(err.Error())
 	}
 	}
-
 }
 }

+ 35 - 9
cmd/crowdsec/serve.go

@@ -6,6 +6,7 @@ import (
 	"syscall"
 	"syscall"
 	"time"
 	"time"
 
 
+	"github.com/crowdsecurity/crowdsec/pkg/acquisition"
 	leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
 	leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
 	"github.com/crowdsecurity/crowdsec/pkg/outputs"
 	"github.com/crowdsecurity/crowdsec/pkg/outputs"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
@@ -15,28 +16,53 @@ import (
 )
 )
 
 
 func reloadHandler(sig os.Signal) error {
 func reloadHandler(sig os.Signal) error {
-
+	var bucketsState string = "buckets_state.json"
 	//stop go routines
 	//stop go routines
 	if err := ShutdownRoutines(); err != nil {
 	if err := ShutdownRoutines(); err != nil {
 		log.Errorf("Failed to shut down routines: %s", err)
 		log.Errorf("Failed to shut down routines: %s", err)
 	}
 	}
 	//todo : properly stop acquis with the tail readers
 	//todo : properly stop acquis with the tail readers
-	if err := leaky.DumpBucketsStateAt("buckets_state.json", time.Now(), buckets); err != nil {
+	if err := leaky.DumpBucketsStateAt(bucketsState, time.Now(), buckets); err != nil {
 		log.Fatalf("Failed dumping bucket state : %s", err)
 		log.Fatalf("Failed dumping bucket state : %s", err)
 	}
 	}
 	//close logs
 	//close logs
 	types.LogOutput.Close()
 	types.LogOutput.Close()
-	//todo : lumber jack
-	//todo : close sql tx
 
 
-	//reload configurations
+	//reload all and start processing again :)
+	if err := LoadParsers(cConfig); err != nil {
+		log.Fatalf("Failed to load parsers: %s", err)
+	}
+
+	if err := LoadBuckets(cConfig); err != nil {
+		log.Fatalf("Failed to load scenarios: %s", err)
 
 
+	}
 	//restore bucket state
 	//restore bucket state
-	//start processing routines
-	//start acquis routine
+	log.Warningf("Restoring buckets state from %s", bucketsState)
+	if err := leaky.LoadBucketsState(bucketsState, buckets, holders); err != nil {
+		return fmt.Errorf("unable to restore buckets : %s", err)
+	}
 
 
-	log.Printf("reloading")
-	dumpMetrics()
+	if err := LoadOutputs(cConfig); err != nil {
+		log.Fatalf("failed to initialize outputs : %s", err)
+	}
+
+	if err := LoadAcquisition(cConfig); err != nil {
+		log.Fatalf("Error while loading acquisition config : %s", err)
+	}
+	//Start the background routines that comunicate via chan
+	log.Infof("Starting processing routines")
+	inputLineChan, err := StartProcessingRoutines(cConfig)
+	if err != nil {
+		log.Fatalf("failed to start processing routines : %s", err)
+	}
+
+	//Fire!
+	log.Warningf("Starting processing data")
+
+	acquisition.AcquisStartReading(acquisitionCTX, inputLineChan, &acquisTomb)
+
+	log.Printf("Reload is finished")
 	return nil
 	return nil
 }
 }
 
 

+ 2 - 0
pkg/csconfig/config.go

@@ -32,6 +32,7 @@ type CrowdSec struct {
 	SQLiteFile      string    `yaml:"sqlite_path,omitempty"` //path to sqlite output
 	SQLiteFile      string    `yaml:"sqlite_path,omitempty"` //path to sqlite output
 	APIMode         bool      `yaml:"apimode,omitempty"`     //true -> enable api push
 	APIMode         bool      `yaml:"apimode,omitempty"`     //true -> enable api push
 	CsCliFolder     string    `yaml:"cscli_dir"`             //cscli folder
 	CsCliFolder     string    `yaml:"cscli_dir"`             //cscli folder
+	NbParsers       int       `yaml:"parser_routines"`       //the number of go routines to start for parsing
 	Linter          bool
 	Linter          bool
 	Prometheus      bool
 	Prometheus      bool
 	HTTPListen      string `yaml:"http_listen,omitempty"`
 	HTTPListen      string `yaml:"http_listen,omitempty"`
@@ -55,6 +56,7 @@ func NewCrowdSecConfig() *CrowdSec {
 		LogMode:       "stdout",
 		LogMode:       "stdout",
 		SQLiteFile:    "./test.db",
 		SQLiteFile:    "./test.db",
 		APIMode:       false,
 		APIMode:       false,
+		NbParsers:     1,
 		Prometheus:    false,
 		Prometheus:    false,
 		HTTPListen:    "127.0.0.1:6060",
 		HTTPListen:    "127.0.0.1:6060",
 	}
 	}