Browse Source

Polling API Integration (#1715)

Co-authored-by: alteredCoder <kevin@crowdsec.net>
Co-authored-by: he2ss <hamza.essahely@gmail.com>
Co-authored-by: Sebastien Blot <sebastien@crowdsec.net>
Thibault "bui" Koechlin 2 years ago
parent
commit
e927717fa0
71 changed files with 5890 additions and 347 deletions
  1. 65 14
      cmd/crowdsec-cli/alerts.go
  2. 3 1
      cmd/crowdsec-cli/alerts_table.go
  3. 11 10
      cmd/crowdsec-cli/capi.go
  4. 0 1
      cmd/crowdsec-cli/config.go
  5. 22 4
      cmd/crowdsec-cli/console.go
  6. 6 0
      cmd/crowdsec-cli/console_table.go
  7. 12 16
      cmd/crowdsec-cli/decisions.go
  8. 5 1
      cmd/crowdsec/output.go
  9. 6 5
      go.mod
  10. 7 4
      go.sum
  11. 18 8
      pkg/apiclient/client.go
  12. 1 0
      pkg/apiclient/config.go
  13. 34 0
      pkg/apiclient/decisions_sync_service.go
  14. 109 50
      pkg/apiserver/apic.go
  15. 43 27
      pkg/apiserver/apic_test.go
  16. 69 3
      pkg/apiserver/apiserver.go
  17. 19 17
      pkg/apiserver/controllers/controller.go
  18. 16 5
      pkg/apiserver/controllers/v1/alerts.go
  19. 25 18
      pkg/apiserver/controllers/v1/controller.go
  20. 22 4
      pkg/apiserver/controllers/v1/decisions.go
  21. 263 0
      pkg/apiserver/papi.go
  22. 132 0
      pkg/apiserver/papi_cmd.go
  23. 32 6
      pkg/csconfig/api.go
  24. 24 15
      pkg/csconfig/api_test.go
  25. 12 1
      pkg/csconfig/console.go
  26. 1 1
      pkg/csprofiles/csprofiles.go
  27. 187 18
      pkg/database/alerts.go
  28. 33 0
      pkg/database/config.go
  29. 101 31
      pkg/database/decisions.go
  30. 11 1
      pkg/database/ent/alert.go
  31. 3 0
      pkg/database/ent/alert/alert.go
  32. 132 0
      pkg/database/ent/alert/where.go
  33. 22 0
      pkg/database/ent/alert_create.go
  34. 66 0
      pkg/database/ent/alert_update.go
  35. 113 16
      pkg/database/ent/client.go
  36. 7 6
      pkg/database/ent/config.go
  37. 138 0
      pkg/database/ent/configitem.go
  38. 54 0
      pkg/database/ent/configitem/configitem.go
  39. 555 0
      pkg/database/ent/configitem/where.go
  40. 296 0
      pkg/database/ent/configitem_create.go
  41. 111 0
      pkg/database/ent/configitem_delete.go
  42. 921 0
      pkg/database/ent/configitem_query.go
  43. 418 0
      pkg/database/ent/configitem_update.go
  44. 11 1
      pkg/database/ent/decision.go
  45. 3 0
      pkg/database/ent/decision/decision.go
  46. 132 0
      pkg/database/ent/decision/where.go
  47. 22 0
      pkg/database/ent/decision_create.go
  48. 66 0
      pkg/database/ent/decision_update.go
  49. 8 6
      pkg/database/ent/ent.go
  50. 13 0
      pkg/database/ent/hook/hook.go
  51. 19 2
      pkg/database/ent/migrate/schema.go
  52. 670 8
      pkg/database/ent/mutation.go
  53. 3 0
      pkg/database/ent/predicate/predicate.go
  54. 15 0
      pkg/database/ent/runtime.go
  55. 1 0
      pkg/database/ent/schema/alert.go
  56. 31 0
      pkg/database/ent/schema/config.go
  57. 1 0
      pkg/database/ent/schema/decision.go
  58. 3 0
      pkg/database/ent/tx.go
  59. 3 2
      pkg/database/machines.go
  60. 5 1
      pkg/fflag/crowdsec.go
  61. 191 0
      pkg/longpollclient/client.go
  62. 26 2
      pkg/models/add_signals_request_item.go
  63. 73 0
      pkg/models/add_signals_request_item_decisions.go
  64. 201 0
      pkg/models/add_signals_request_item_decisions_item.go
  65. 109 0
      pkg/models/add_signals_request_item_source.go
  66. 18 1
      pkg/models/alert.go
  67. 17 0
      pkg/models/decision.go
  68. 67 0
      pkg/models/decisions_delete_request.go
  69. 27 0
      pkg/models/decisions_delete_request_item.go
  70. 9 41
      pkg/models/localapi_swagger.yaml
  71. 21 0
      pkg/types/constants.go

+ 65 - 14
cmd/crowdsec-cli/alerts.go

@@ -10,6 +10,7 @@ import (
 	"sort"
 	"sort"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
+	"time"
 
 
 	"github.com/fatih/color"
 	"github.com/fatih/color"
 	"github.com/go-openapi/strfmt"
 	"github.com/go-openapi/strfmt"
@@ -22,9 +23,9 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/database"
 	"github.com/crowdsecurity/crowdsec/pkg/database"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
 )
 )
 
 
-
 func DecisionsFromAlert(alert *models.Alert) string {
 func DecisionsFromAlert(alert *models.Alert) string {
 	ret := ""
 	ret := ""
 	var decMap = make(map[string]int)
 	var decMap = make(map[string]int)
@@ -45,6 +46,50 @@ func DecisionsFromAlert(alert *models.Alert) string {
 	return ret
 	return ret
 }
 }
 
 
+func DateFromAlert(alert *models.Alert) string {
+	ts, err := time.Parse(time.RFC3339, alert.CreatedAt)
+	if err != nil {
+		log.Infof("while parsing %s with %s : %s", alert.CreatedAt, time.RFC3339, err)
+		return alert.CreatedAt
+	}
+	return ts.Format(time.RFC822)
+}
+
+func SourceFromAlert(alert *models.Alert) string {
+
+	//more than one item, just number and scope
+	if len(alert.Decisions) > 1 {
+		return fmt.Sprintf("%d %ss (%s)", len(alert.Decisions), *alert.Decisions[0].Scope, *alert.Decisions[0].Origin)
+	}
+
+	//fallback on single decision information
+	if len(alert.Decisions) == 1 {
+		return fmt.Sprintf("%s:%s", *alert.Decisions[0].Scope, *alert.Decisions[0].Value)
+	}
+
+	//try to compose a human friendly version
+	if *alert.Source.Value != "" && *alert.Source.Scope != "" {
+		scope := ""
+		scope = fmt.Sprintf("%s:%s", *alert.Source.Scope, *alert.Source.Value)
+		extra := ""
+		if alert.Source.Cn != "" {
+			extra = alert.Source.Cn
+		}
+		if alert.Source.AsNumber != "" {
+			extra += fmt.Sprintf("/%s", alert.Source.AsNumber)
+		}
+		if alert.Source.AsName != "" {
+			extra += fmt.Sprintf("/%s", alert.Source.AsName)
+		}
+
+		if extra != "" {
+			scope += " (" + extra + ")"
+		}
+		return scope
+	}
+	return ""
+}
+
 func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error {
 func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error {
 
 
 	if csConfig.Cscli.Output == "raw" {
 	if csConfig.Cscli.Output == "raw" {
@@ -97,17 +142,18 @@ func DisplayOneAlert(alert *models.Alert, withDetail bool) error {
 		if *alert.Source.Value != "" {
 		if *alert.Source.Value != "" {
 			scopeAndValue += ":" + *alert.Source.Value
 			scopeAndValue += ":" + *alert.Source.Value
 		}
 		}
-		fmt.Printf(" - ID         : %d\n", alert.ID)
-		fmt.Printf(" - Date       : %s\n", alert.CreatedAt)
-		fmt.Printf(" - Machine    : %s\n", alert.MachineID)
-		fmt.Printf(" - Simulation : %v\n", *alert.Simulated)
-		fmt.Printf(" - Reason     : %s\n", *alert.Scenario)
+		fmt.Printf(" - ID           : %d\n", alert.ID)
+		fmt.Printf(" - Date         : %s\n", alert.CreatedAt)
+		fmt.Printf(" - Machine      : %s\n", alert.MachineID)
+		fmt.Printf(" - Simulation   : %v\n", *alert.Simulated)
+		fmt.Printf(" - Reason       : %s\n", *alert.Scenario)
 		fmt.Printf(" - Events Count : %d\n", *alert.EventsCount)
 		fmt.Printf(" - Events Count : %d\n", *alert.EventsCount)
-		fmt.Printf(" - Scope:Value: %s\n", scopeAndValue)
-		fmt.Printf(" - Country    : %s\n", alert.Source.Cn)
-		fmt.Printf(" - AS         : %s\n", alert.Source.AsName)
-		fmt.Printf(" - Begin      : %s\n", *alert.StartAt)
-		fmt.Printf(" - End        : %s\n\n", *alert.StopAt)
+		fmt.Printf(" - Scope:Value  : %s\n", scopeAndValue)
+		fmt.Printf(" - Country      : %s\n", alert.Source.Cn)
+		fmt.Printf(" - AS           : %s\n", alert.Source.AsName)
+		fmt.Printf(" - Begin        : %s\n", *alert.StartAt)
+		fmt.Printf(" - End          : %s\n", *alert.StopAt)
+		fmt.Printf(" - UUID         : %s\n\n", alert.UUID)
 
 
 		alertDecisionsTable(color.Output, alert)
 		alertDecisionsTable(color.Output, alert)
 
 
@@ -144,7 +190,6 @@ func DisplayOneAlert(alert *models.Alert, withDetail bool) error {
 	return nil
 	return nil
 }
 }
 
 
-
 func NewAlertsCmd() *cobra.Command {
 func NewAlertsCmd() *cobra.Command {
 	var cmdAlerts = &cobra.Command{
 	var cmdAlerts = &cobra.Command{
 		Use:               "alerts [action]",
 		Use:               "alerts [action]",
@@ -183,7 +228,6 @@ func NewAlertsCmd() *cobra.Command {
 	return cmdAlerts
 	return cmdAlerts
 }
 }
 
 
-
 func NewAlertsListCmd() *cobra.Command {
 func NewAlertsListCmd() *cobra.Command {
 	var alertListFilter = apiclient.AlertsListOpts{
 	var alertListFilter = apiclient.AlertsListOpts{
 		ScopeEquals:    new(string),
 		ScopeEquals:    new(string),
@@ -195,6 +239,7 @@ func NewAlertsListCmd() *cobra.Command {
 		Until:          new(string),
 		Until:          new(string),
 		TypeEquals:     new(string),
 		TypeEquals:     new(string),
 		IncludeCAPI:    new(bool),
 		IncludeCAPI:    new(bool),
+		OriginEquals:   new(string),
 	}
 	}
 	var limit = new(int)
 	var limit = new(int)
 	contained := new(bool)
 	contained := new(bool)
@@ -267,9 +312,15 @@ cscli alerts list --type ban`,
 			if *alertListFilter.RangeEquals == "" {
 			if *alertListFilter.RangeEquals == "" {
 				alertListFilter.RangeEquals = nil
 				alertListFilter.RangeEquals = nil
 			}
 			}
+
+			if *alertListFilter.OriginEquals == "" {
+				alertListFilter.OriginEquals = nil
+			}
+
 			if contained != nil && *contained {
 			if contained != nil && *contained {
 				alertListFilter.Contains = new(bool)
 				alertListFilter.Contains = new(bool)
 			}
 			}
+
 			alerts, _, err := Client.Alerts.List(context.Background(), alertListFilter)
 			alerts, _, err := Client.Alerts.List(context.Background(), alertListFilter)
 			if err != nil {
 			if err != nil {
 				log.Fatalf("Unable to list alerts : %v", err)
 				log.Fatalf("Unable to list alerts : %v", err)
@@ -291,6 +342,7 @@ cscli alerts list --type ban`,
 	cmdAlertsList.Flags().StringVar(alertListFilter.TypeEquals, "type", "", "restrict to alerts with given decision type (ie. ban, captcha)")
 	cmdAlertsList.Flags().StringVar(alertListFilter.TypeEquals, "type", "", "restrict to alerts with given decision type (ie. ban, captcha)")
 	cmdAlertsList.Flags().StringVar(alertListFilter.ScopeEquals, "scope", "", "restrict to alerts of this scope (ie. ip,range)")
 	cmdAlertsList.Flags().StringVar(alertListFilter.ScopeEquals, "scope", "", "restrict to alerts of this scope (ie. ip,range)")
 	cmdAlertsList.Flags().StringVarP(alertListFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope")
 	cmdAlertsList.Flags().StringVarP(alertListFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope")
+	cmdAlertsList.Flags().StringVar(alertListFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ",")))
 	cmdAlertsList.Flags().BoolVar(contained, "contained", false, "query decisions contained by range")
 	cmdAlertsList.Flags().BoolVar(contained, "contained", false, "query decisions contained by range")
 	cmdAlertsList.Flags().BoolVarP(&printMachine, "machine", "m", false, "print machines that sent alerts")
 	cmdAlertsList.Flags().BoolVarP(&printMachine, "machine", "m", false, "print machines that sent alerts")
 	cmdAlertsList.Flags().IntVarP(limit, "limit", "l", 50, "limit size of alerts list table (0 to view all alerts)")
 	cmdAlertsList.Flags().IntVarP(limit, "limit", "l", 50, "limit size of alerts list table (0 to view all alerts)")
@@ -396,7 +448,6 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`,
 	return cmdAlertsDelete
 	return cmdAlertsDelete
 }
 }
 
 
-
 func NewAlertsInspectCmd() *cobra.Command {
 func NewAlertsInspectCmd() *cobra.Command {
 	var details bool
 	var details bool
 	var cmdAlertsInspect = &cobra.Command{
 	var cmdAlertsInspect = &cobra.Command{

+ 3 - 1
cmd/crowdsec-cli/alerts_table.go

@@ -23,7 +23,9 @@ func alertsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine b
 
 
 	for _, alertItem := range *alerts {
 	for _, alertItem := range *alerts {
 		displayVal := *alertItem.Source.Scope
 		displayVal := *alertItem.Source.Scope
-		if *alertItem.Source.Value != "" {
+		if len(alertItem.Decisions) > 1 {
+			displayVal = fmt.Sprintf("%d %ss", len(alertItem.Decisions), *alertItem.Decisions[0].Scope)
+		} else if *alertItem.Source.Value != "" {
 			displayVal += ":" + *alertItem.Source.Value
 			displayVal += ":" + *alertItem.Source.Value
 		}
 		}
 
 

+ 11 - 10
cmd/crowdsec-cli/capi.go

@@ -6,17 +6,17 @@ import (
 	"net/url"
 	"net/url"
 	"os"
 	"os"
 
 
-	"github.com/go-openapi/strfmt"
-	"github.com/pkg/errors"
-	log "github.com/sirupsen/logrus"
-	"github.com/spf13/cobra"
-	"gopkg.in/yaml.v2"
-
 	"github.com/crowdsecurity/crowdsec/pkg/apiclient"
 	"github.com/crowdsecurity/crowdsec/pkg/apiclient"
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
 	"github.com/crowdsecurity/crowdsec/pkg/models"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
+	"github.com/go-openapi/strfmt"
+	"github.com/pkg/errors"
+	log "github.com/sirupsen/logrus"
+	"github.com/spf13/cobra"
+	"gopkg.in/yaml.v2"
 )
 )
 
 
 const CAPIBaseURL string = "https://api.crowdsec.net/"
 const CAPIBaseURL string = "https://api.crowdsec.net/"
@@ -64,9 +64,9 @@ func NewCapiRegisterCmd() *cobra.Command {
 				log.Fatalf("unable to generate machine id: %s", err)
 				log.Fatalf("unable to generate machine id: %s", err)
 			}
 			}
 			password := strfmt.Password(generatePassword(passwordLength))
 			password := strfmt.Password(generatePassword(passwordLength))
-			apiurl, err := url.Parse(CAPIBaseURL)
+			apiurl, err := url.Parse(types.CAPIBaseURL)
 			if err != nil {
 			if err != nil {
-				log.Fatalf("unable to parse api url %s : %s", CAPIBaseURL, err)
+				log.Fatalf("unable to parse api url %s : %s", types.CAPIBaseURL, err)
 			}
 			}
 			_, err = apiclient.RegisterClient(&apiclient.Config{
 			_, err = apiclient.RegisterClient(&apiclient.Config{
 				MachineID:     capiUser,
 				MachineID:     capiUser,
@@ -77,7 +77,7 @@ func NewCapiRegisterCmd() *cobra.Command {
 			}, nil)
 			}, nil)
 
 
 			if err != nil {
 			if err != nil {
-				log.Fatalf("api client register ('%s'): %s", CAPIBaseURL, err)
+				log.Fatalf("api client register ('%s'): %s", types.CAPIBaseURL, err)
 			}
 			}
 			log.Printf("Successfully registered to Central API (CAPI)")
 			log.Printf("Successfully registered to Central API (CAPI)")
 
 
@@ -93,7 +93,8 @@ func NewCapiRegisterCmd() *cobra.Command {
 			apiCfg := csconfig.ApiCredentialsCfg{
 			apiCfg := csconfig.ApiCredentialsCfg{
 				Login:    capiUser,
 				Login:    capiUser,
 				Password: password.String(),
 				Password: password.String(),
-				URL:      CAPIBaseURL,
+				URL:      types.CAPIBaseURL,
+				PapiURL:  types.PAPIBaseURL,
 			}
 			}
 			apiConfigDump, err := yaml.Marshal(apiCfg)
 			apiConfigDump, err := yaml.Marshal(apiCfg)
 			if err != nil {
 			if err != nil {

+ 0 - 1
cmd/crowdsec-cli/config.go

@@ -4,7 +4,6 @@ import (
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
 )
 )
 
 
-
 func NewConfigCmd() *cobra.Command {
 func NewConfigCmd() *cobra.Command {
 	cmdConfig := &cobra.Command{
 	cmdConfig := &cobra.Command{
 		Use:               "config [command]",
 		Use:               "config [command]",

+ 22 - 4
cmd/crowdsec-cli/console.go

@@ -19,6 +19,7 @@ import (
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwhub"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
 	"github.com/crowdsecurity/crowdsec/pkg/cwversion"
+	"github.com/crowdsecurity/crowdsec/pkg/fflag"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 )
 )
 
 
@@ -209,10 +210,11 @@ Disable given information push to the central API.`,
 				}
 				}
 
 
 				rows := [][]string{
 				rows := [][]string{
-					{"share_manual_decisions", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareManualDecisions)},
-					{"share_custom", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios)},
-					{"share_tainted", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios)},
-					{"share_context", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareContext)},
+					{csconfig.SEND_MANUAL_SCENARIOS, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareManualDecisions)},
+					{csconfig.SEND_CUSTOM_SCENARIOS, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios)},
+					{csconfig.SEND_TAINTED_SCENARIOS, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios)},
+					{csconfig.SEND_CONTEXT, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareContext)},
+					{csconfig.CONSOLE_MANAGEMENT, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ReceiveDecisions)},
 				}
 				}
 				for _, row := range rows {
 				for _, row := range rows {
 					err = csvwriter.Write(row)
 					err = csvwriter.Write(row)
@@ -232,6 +234,22 @@ Disable given information push to the central API.`,
 func SetConsoleOpts(args []string, wanted bool) {
 func SetConsoleOpts(args []string, wanted bool) {
 	for _, arg := range args {
 	for _, arg := range args {
 		switch arg {
 		switch arg {
+		case csconfig.CONSOLE_MANAGEMENT:
+			if !fflag.PapiClient.IsEnabled() {
+				log.Fatalf("Feature flag %s is disabled, cannot set %s", fflag.PapiClient.Name, csconfig.CONSOLE_MANAGEMENT)
+			}
+			/*for each flag check if it's already set before setting it*/
+			if csConfig.API.Server.ConsoleConfig.ReceiveDecisions != nil {
+				if *csConfig.API.Server.ConsoleConfig.ReceiveDecisions == wanted {
+					log.Infof("%s already set to %t", csconfig.CONSOLE_MANAGEMENT, wanted)
+				} else {
+					log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted)
+					*csConfig.API.Server.ConsoleConfig.ReceiveDecisions = wanted
+				}
+			} else {
+				log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted)
+				csConfig.API.Server.ConsoleConfig.ReceiveDecisions = types.BoolPtr(wanted)
+			}
 		case csconfig.SEND_CUSTOM_SCENARIOS:
 		case csconfig.SEND_CUSTOM_SCENARIOS:
 			/*for each flag check if it's already set before setting it*/
 			/*for each flag check if it's already set before setting it*/
 			if csConfig.API.Server.ConsoleConfig.ShareCustomScenarios != nil {
 			if csConfig.API.Server.ConsoleConfig.ShareCustomScenarios != nil {

+ 6 - 0
cmd/crowdsec-cli/console_table.go

@@ -47,6 +47,12 @@ func cmdConsoleStatusTable(out io.Writer, csConfig csconfig.Config) {
 				activated = string(emoji.CheckMarkButton)
 				activated = string(emoji.CheckMarkButton)
 			}
 			}
 			t.AddRow(option, activated, "Send context with alerts to the console")
 			t.AddRow(option, activated, "Send context with alerts to the console")
+		case csconfig.CONSOLE_MANAGEMENT:
+			activated := string(emoji.CrossMark)
+			if *csConfig.API.Server.ConsoleConfig.ReceiveDecisions {
+				activated = string(emoji.CheckMarkButton)
+			}
+			t.AddRow(option, activated, "Receive decisions from console")
 		}
 		}
 	}
 	}
 
 

+ 12 - 16
cmd/crowdsec-cli/decisions.go

@@ -139,7 +139,6 @@ func NewDecisionsCmd() *cobra.Command {
 	return cmdDecisions
 	return cmdDecisions
 }
 }
 
 
-
 func NewDecisionsListCmd() *cobra.Command {
 func NewDecisionsListCmd() *cobra.Command {
 	var filter = apiclient.AlertsListOpts{
 	var filter = apiclient.AlertsListOpts{
 		ValueEquals:    new(string),
 		ValueEquals:    new(string),
@@ -252,7 +251,7 @@ cscli decisions list -t ban
 	cmdDecisionsList.Flags().StringVar(filter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)")
 	cmdDecisionsList.Flags().StringVar(filter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)")
 	cmdDecisionsList.Flags().StringVarP(filter.TypeEquals, "type", "t", "", "restrict to this decision type (ie. ban,captcha)")
 	cmdDecisionsList.Flags().StringVarP(filter.TypeEquals, "type", "t", "", "restrict to this decision type (ie. ban,captcha)")
 	cmdDecisionsList.Flags().StringVar(filter.ScopeEquals, "scope", "", "restrict to this scope (ie. ip,range,session)")
 	cmdDecisionsList.Flags().StringVar(filter.ScopeEquals, "scope", "", "restrict to this scope (ie. ip,range,session)")
-	cmdDecisionsList.Flags().StringVar(filter.OriginEquals, "origin", "", "restrict to this origin (ie. lists,CAPI,cscli,cscli-import,crowdsec)")
+	cmdDecisionsList.Flags().StringVar(filter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ",")))
 	cmdDecisionsList.Flags().StringVarP(filter.ValueEquals, "value", "v", "", "restrict to this value (ie. 1.2.3.4,userName)")
 	cmdDecisionsList.Flags().StringVarP(filter.ValueEquals, "value", "v", "", "restrict to this value (ie. 1.2.3.4,userName)")
 	cmdDecisionsList.Flags().StringVarP(filter.ScenarioEquals, "scenario", "s", "", "restrict to this scenario (ie. crowdsecurity/ssh-bf)")
 	cmdDecisionsList.Flags().StringVarP(filter.ScenarioEquals, "scenario", "s", "", "restrict to this scenario (ie. crowdsecurity/ssh-bf)")
 	cmdDecisionsList.Flags().StringVarP(filter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value <IP>)")
 	cmdDecisionsList.Flags().StringVarP(filter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value <IP>)")
@@ -265,7 +264,6 @@ cscli decisions list -t ban
 	return cmdDecisionsList
 	return cmdDecisionsList
 }
 }
 
 
-
 func NewDecisionsAddCmd() *cobra.Command {
 func NewDecisionsAddCmd() *cobra.Command {
 	var (
 	var (
 		addIP       string
 		addIP       string
@@ -290,9 +288,8 @@ cscli decisions add --scope username --value foobar
 		DisableAutoGenTag: true,
 		DisableAutoGenTag: true,
 		Run: func(cmd *cobra.Command, args []string) {
 		Run: func(cmd *cobra.Command, args []string) {
 			var err error
 			var err error
-			var ipRange string
 			alerts := models.AddAlertsRequest{}
 			alerts := models.AddAlertsRequest{}
-			origin := "cscli"
+			origin := types.CscliOrigin
 			capacity := int32(0)
 			capacity := int32(0)
 			leakSpeed := "0"
 			leakSpeed := "0"
 			eventsCount := int32(1)
 			eventsCount := int32(1)
@@ -340,12 +337,13 @@ cscli decisions add --scope username --value foobar
 				Scenario:        &addReason,
 				Scenario:        &addReason,
 				ScenarioVersion: &empty,
 				ScenarioVersion: &empty,
 				Simulated:       &simulated,
 				Simulated:       &simulated,
+				//setting empty scope/value broke plugins, and it didn't seem to be needed anymore w/ latest papi changes
 				Source: &models.Source{
 				Source: &models.Source{
 					AsName:   empty,
 					AsName:   empty,
 					AsNumber: empty,
 					AsNumber: empty,
 					Cn:       empty,
 					Cn:       empty,
 					IP:       addValue,
 					IP:       addValue,
-					Range:    ipRange,
+					Range:    "",
 					Scope:    &addScope,
 					Scope:    &addScope,
 					Value:    &addValue,
 					Value:    &addValue,
 				},
 				},
@@ -376,7 +374,6 @@ cscli decisions add --scope username --value foobar
 	return cmdDecisionsAdd
 	return cmdDecisionsAdd
 }
 }
 
 
-
 func NewDecisionsDeleteCmd() *cobra.Command {
 func NewDecisionsDeleteCmd() *cobra.Command {
 	var delFilter = apiclient.DecisionsDeleteOpts{
 	var delFilter = apiclient.DecisionsDeleteOpts{
 		ScopeEquals:    new(string),
 		ScopeEquals:    new(string),
@@ -476,18 +473,17 @@ cscli decisions delete --type captcha
 	return cmdDecisionsDelete
 	return cmdDecisionsDelete
 }
 }
 
 
-
 func NewDecisionsImportCmd() *cobra.Command {
 func NewDecisionsImportCmd() *cobra.Command {
 	var (
 	var (
 		defaultDuration = "4h"
 		defaultDuration = "4h"
 		defaultScope    = "ip"
 		defaultScope    = "ip"
 		defaultType     = "ban"
 		defaultType     = "ban"
 		defaultReason   = "manual"
 		defaultReason   = "manual"
-		importDuration string
-		importScope    string
-		importReason   string
-		importType     string
-		importFile     string
+		importDuration  string
+		importScope     string
+		importReason    string
+		importType      string
+		importFile      string
 	)
 	)
 
 
 	var cmdDecisionImport = &cobra.Command{
 	var cmdDecisionImport = &cobra.Command{
@@ -551,7 +547,7 @@ decisions.json :
 					decisionLine.Duration = importDuration
 					decisionLine.Duration = importDuration
 					log.Debugf("'duration' line %d, using supplied value: '%s'", line, importDuration)
 					log.Debugf("'duration' line %d, using supplied value: '%s'", line, importDuration)
 				}
 				}
-				decisionLine.Origin = "cscli-import"
+				decisionLine.Origin = types.CscliImportOrigin
 
 
 				if decisionLine.Scenario == "" {
 				if decisionLine.Scenario == "" {
 					decisionLine.Scenario = defaultReason
 					decisionLine.Scenario = defaultReason
@@ -591,11 +587,11 @@ decisions.json :
 			alerts := models.AddAlertsRequest{}
 			alerts := models.AddAlertsRequest{}
 			importAlert := models.Alert{
 			importAlert := models.Alert{
 				CreatedAt: time.Now().UTC().Format(time.RFC3339),
 				CreatedAt: time.Now().UTC().Format(time.RFC3339),
-				Scenario:  types.StrPtr(fmt.Sprintf("add: %d IPs", len(decisionsList))),
+				Scenario:  types.StrPtr(fmt.Sprintf("import %s : %d IPs", importFile, len(decisionsList))),
 				Message:   types.StrPtr(""),
 				Message:   types.StrPtr(""),
 				Events:    []*models.Event{},
 				Events:    []*models.Event{},
 				Source: &models.Source{
 				Source: &models.Source{
-					Scope: types.StrPtr("cscli/manual-import"),
+					Scope: types.StrPtr(""),
 					Value: types.StrPtr(""),
 					Value: types.StrPtr(""),
 				},
 				},
 				StartAt:         types.StrPtr(time.Now().UTC().Format(time.RFC3339)),
 				StartAt:         types.StrPtr(time.Now().UTC().Format(time.RFC3339)),

+ 5 - 1
cmd/crowdsec/output.go

@@ -78,7 +78,10 @@ func runOutput(input chan types.Event, overflow chan types.Event, buckets *leaky
 	if err != nil {
 	if err != nil {
 		return errors.Wrapf(err, "parsing api url ('%s'): %s", apiConfig.URL, err)
 		return errors.Wrapf(err, "parsing api url ('%s'): %s", apiConfig.URL, err)
 	}
 	}
-
+	papiURL, err := url.Parse(apiConfig.PapiURL)
+	if err != nil {
+		return errors.Wrapf(err, "parsing polling api url ('%s'): %s", apiConfig.PapiURL, err)
+	}
 	password := strfmt.Password(apiConfig.Password)
 	password := strfmt.Password(apiConfig.Password)
 
 
 	Client, err := apiclient.NewClient(&apiclient.Config{
 	Client, err := apiclient.NewClient(&apiclient.Config{
@@ -87,6 +90,7 @@ func runOutput(input chan types.Event, overflow chan types.Event, buckets *leaky
 		Scenarios:      scenarios,
 		Scenarios:      scenarios,
 		UserAgent:      fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()),
 		UserAgent:      fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()),
 		URL:            apiURL,
 		URL:            apiURL,
+		PapiURL:        papiURL,
 		VersionPrefix:  "v1",
 		VersionPrefix:  "v1",
 		UpdateScenario: cwhub.GetInstalledScenariosAsString,
 		UpdateScenario: cwhub.GetInstalledScenariosAsString,
 	})
 	})

+ 6 - 5
go.mod

@@ -47,11 +47,11 @@ require (
 	github.com/oschwald/geoip2-golang v1.4.0
 	github.com/oschwald/geoip2-golang v1.4.0
 	github.com/oschwald/maxminddb-golang v1.8.0
 	github.com/oschwald/maxminddb-golang v1.8.0
 	github.com/pkg/errors v0.9.1
 	github.com/pkg/errors v0.9.1
-	github.com/prometheus/client_golang v1.13.0
-	github.com/prometheus/client_model v0.2.0
+	github.com/prometheus/client_golang v1.14.0
+	github.com/prometheus/client_model v0.3.0
 	github.com/prometheus/prom2json v1.3.0
 	github.com/prometheus/prom2json v1.3.0
 	github.com/r3labs/diff/v2 v2.14.1
 	github.com/r3labs/diff/v2 v2.14.1
-	github.com/sirupsen/logrus v1.8.1
+	github.com/sirupsen/logrus v1.9.0
 	github.com/spf13/cobra v1.5.0
 	github.com/spf13/cobra v1.5.0
 	github.com/stretchr/testify v1.8.0
 	github.com/stretchr/testify v1.8.0
 	golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d
 	golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d
@@ -69,7 +69,10 @@ require (
 	github.com/aquasecurity/table v1.8.0
 	github.com/aquasecurity/table v1.8.0
 	github.com/beevik/etree v1.1.0
 	github.com/beevik/etree v1.1.0
 	github.com/blackfireio/osinfo v1.0.3
 	github.com/blackfireio/osinfo v1.0.3
+	github.com/bluele/gcache v0.0.2
 	github.com/goccy/go-yaml v1.9.7
 	github.com/goccy/go-yaml v1.9.7
+	github.com/gofrs/uuid v4.0.0+incompatible
+	github.com/golang-jwt/jwt/v4 v4.2.0
 	github.com/google/winops v0.0.0-20211216095627-f0e86eb1453b
 	github.com/google/winops v0.0.0-20211216095627-f0e86eb1453b
 	github.com/ivanpirog/coloredcobra v1.0.1
 	github.com/ivanpirog/coloredcobra v1.0.1
 	github.com/mattn/go-isatty v0.0.14
 	github.com/mattn/go-isatty v0.0.14
@@ -91,7 +94,6 @@ require (
 	github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
 	github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
 	github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect
 	github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
-	github.com/bluele/gcache v0.0.2 // indirect
 	github.com/cespare/xxhash/v2 v2.1.2 // indirect
 	github.com/cespare/xxhash/v2 v2.1.2 // indirect
 	github.com/containerd/containerd v1.6.12 // indirect
 	github.com/containerd/containerd v1.6.12 // indirect
 	github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
 	github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
@@ -111,7 +113,6 @@ require (
 	github.com/go-playground/validator/v10 v10.10.0 // indirect
 	github.com/go-playground/validator/v10 v10.10.0 // indirect
 	github.com/go-stack/stack v1.8.0 // indirect
 	github.com/go-stack/stack v1.8.0 // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
-	github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
 	github.com/golang/glog v0.0.0-20210429001901-424d2337a529 // indirect
 	github.com/golang/glog v0.0.0-20210429001901-424d2337a529 // indirect
 	github.com/golang/protobuf v1.5.2 // indirect
 	github.com/golang/protobuf v1.5.2 // indirect
 	github.com/google/go-cmp v0.5.8 // indirect
 	github.com/google/go-cmp v0.5.8 // indirect

+ 7 - 4
go.sum

@@ -777,14 +777,15 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
 github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
 github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
 github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
 github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
 github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
 github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
-github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
+github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
+github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
 github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
 github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
 github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
@@ -845,8 +846,9 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB
 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
 github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
 github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
 github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
 github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
 github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
 github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@@ -1183,6 +1185,7 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
 golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=

+ 18 - 8
pkg/apiclient/client.go

@@ -27,15 +27,21 @@ type ApiClient struct {
 	common service
 	common service
 	/*config stuff*/
 	/*config stuff*/
 	BaseURL   *url.URL
 	BaseURL   *url.URL
+	PapiURL   *url.URL
 	URLPrefix string
 	URLPrefix string
 	UserAgent string
 	UserAgent string
 	/*exposed Services*/
 	/*exposed Services*/
-	Decisions *DecisionsService
-	Alerts    *AlertsService
-	Auth      *AuthService
-	Metrics   *MetricsService
-	Signal    *SignalService
-	HeartBeat *HeartBeatService
+	Decisions      *DecisionsService
+	DecisionDelete *DecisionDeleteService
+	Alerts         *AlertsService
+	Auth           *AuthService
+	Metrics        *MetricsService
+	Signal         *SignalService
+	HeartBeat      *HeartBeatService
+}
+
+func (a *ApiClient) GetClient() *http.Client {
+	return a.client
 }
 }
 
 
 type service struct {
 type service struct {
@@ -57,14 +63,17 @@ func NewClient(config *Config) (*ApiClient, error) {
 	if Cert != nil {
 	if Cert != nil {
 		tlsconfig.Certificates = []tls.Certificate{*Cert}
 		tlsconfig.Certificates = []tls.Certificate{*Cert}
 	}
 	}
-	http.DefaultTransport.(*http.Transport).TLSClientConfig = &tlsconfig
-	c := &ApiClient{client: t.Client(), BaseURL: config.URL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix}
+	if ht, ok := http.DefaultTransport.(*http.Transport); ok {
+		ht.TLSClientConfig = &tlsconfig
+	}
+	c := &ApiClient{client: t.Client(), BaseURL: config.URL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL}
 	c.common.client = c
 	c.common.client = c
 	c.Decisions = (*DecisionsService)(&c.common)
 	c.Decisions = (*DecisionsService)(&c.common)
 	c.Alerts = (*AlertsService)(&c.common)
 	c.Alerts = (*AlertsService)(&c.common)
 	c.Auth = (*AuthService)(&c.common)
 	c.Auth = (*AuthService)(&c.common)
 	c.Metrics = (*MetricsService)(&c.common)
 	c.Metrics = (*MetricsService)(&c.common)
 	c.Signal = (*SignalService)(&c.common)
 	c.Signal = (*SignalService)(&c.common)
+	c.DecisionDelete = (*DecisionDeleteService)(&c.common)
 	c.HeartBeat = (*HeartBeatService)(&c.common)
 	c.HeartBeat = (*HeartBeatService)(&c.common)
 
 
 	return c, nil
 	return c, nil
@@ -90,6 +99,7 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt
 	c.Auth = (*AuthService)(&c.common)
 	c.Auth = (*AuthService)(&c.common)
 	c.Metrics = (*MetricsService)(&c.common)
 	c.Metrics = (*MetricsService)(&c.common)
 	c.Signal = (*SignalService)(&c.common)
 	c.Signal = (*SignalService)(&c.common)
+	c.DecisionDelete = (*DecisionDeleteService)(&c.common)
 	c.HeartBeat = (*HeartBeatService)(&c.common)
 	c.HeartBeat = (*HeartBeatService)(&c.common)
 
 
 	return c, nil
 	return c, nil

+ 1 - 0
pkg/apiclient/config.go

@@ -11,6 +11,7 @@ type Config struct {
 	Password       strfmt.Password
 	Password       strfmt.Password
 	Scenarios      []string
 	Scenarios      []string
 	URL            *url.URL
 	URL            *url.URL
+	PapiURL        *url.URL
 	VersionPrefix  string
 	VersionPrefix  string
 	UserAgent      string
 	UserAgent      string
 	UpdateScenario func() ([]string, error)
 	UpdateScenario func() ([]string, error)

+ 34 - 0
pkg/apiclient/decisions_sync_service.go

@@ -0,0 +1,34 @@
+package apiclient
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+
+	"github.com/crowdsecurity/crowdsec/pkg/models"
+	"github.com/pkg/errors"
+	log "github.com/sirupsen/logrus"
+)
+
+type DecisionDeleteService service
+
+// DecisionDeleteService purposely reuses AddSignalsRequestItemDecisions model
+func (d *DecisionDeleteService) Add(ctx context.Context, deletedDecisions *models.DecisionsDeleteRequest) (interface{}, *Response, error) {
+	var response interface{}
+	u := fmt.Sprintf("%s/decisions/delete", d.client.URLPrefix)
+	req, err := d.client.NewRequest(http.MethodPost, u, &deletedDecisions)
+	if err != nil {
+		return nil, nil, errors.Wrap(err, "while building request")
+	}
+
+	resp, err := d.client.Do(ctx, req, &response)
+	if err != nil {
+		return nil, resp, errors.Wrap(err, "while performing request")
+	}
+	if resp.Response.StatusCode != http.StatusOK {
+		log.Warnf("Decisions delete response : http %s", resp.Response.Status)
+	} else {
+		log.Debugf("Decisions delete response : http %s", resp.Response.Status)
+	}
+	return &response, resp, nil
+}

+ 109 - 50
pkg/apiserver/apic.go

@@ -28,15 +28,13 @@ import (
 var (
 var (
 	pullIntervalDefault    = time.Hour * 2
 	pullIntervalDefault    = time.Hour * 2
 	pullIntervalDelta      = 5 * time.Minute
 	pullIntervalDelta      = 5 * time.Minute
-	pushIntervalDefault    = time.Second * 30
-	pushIntervalDelta      = time.Second * 15
+	pushIntervalDefault    = time.Second * 10
+	pushIntervalDelta      = time.Second * 7
 	metricsIntervalDefault = time.Minute * 30
 	metricsIntervalDefault = time.Minute * 30
 	metricsIntervalDelta   = time.Minute * 15
 	metricsIntervalDelta   = time.Minute * 15
 )
 )
 
 
-var SCOPE_CAPI string = "CAPI"
-var SCOPE_CAPI_ALIAS string = "crowdsecurity/community-blocklist" //we don't use "CAPI" directly, to make it less confusing for the user
-var SCOPE_LISTS string = "lists"
+var SCOPE_CAPI_ALIAS_ALIAS string = "crowdsecurity/community-blocklist" //we don't use "CAPI" directly, to make it less confusing for the user
 
 
 type apic struct {
 type apic struct {
 	// when changing the intervals in tests, always set *First too
 	// when changing the intervals in tests, always set *First too
@@ -49,15 +47,16 @@ type apic struct {
 	metricsIntervalFirst time.Duration
 	metricsIntervalFirst time.Duration
 	dbClient             *database.Client
 	dbClient             *database.Client
 	apiClient            *apiclient.ApiClient
 	apiClient            *apiclient.ApiClient
-	alertToPush          chan []*models.Alert
-	mu                   sync.Mutex
-	pushTomb             tomb.Tomb
-	pullTomb             tomb.Tomb
-	metricsTomb          tomb.Tomb
-	startup              bool
-	credentials          *csconfig.ApiCredentialsCfg
-	scenarioList         []string
-	consoleConfig        *csconfig.ConsoleConfig
+	AlertsAddChan        chan []*models.Alert
+
+	mu            sync.Mutex
+	pushTomb      tomb.Tomb
+	pullTomb      tomb.Tomb
+	metricsTomb   tomb.Tomb
+	startup       bool
+	credentials   *csconfig.ApiCredentialsCfg
+	scenarioList  []string
+	consoleConfig *csconfig.ConsoleConfig
 }
 }
 
 
 // randomDuration returns a duration value between d-delta and d+delta
 // randomDuration returns a duration value between d-delta and d+delta
@@ -85,18 +84,54 @@ func (a *apic) FetchScenariosListFromDB() ([]string, error) {
 	return scenarios, nil
 	return scenarios, nil
 }
 }
 
 
+func decisionsToApiDecisions(decisions []*models.Decision) models.AddSignalsRequestItemDecisions {
+	apiDecisions := models.AddSignalsRequestItemDecisions{}
+	for _, decision := range decisions {
+		x := &models.AddSignalsRequestItemDecisionsItem{
+			Duration: types.StrPtr(*decision.Duration),
+			ID:       new(int64),
+			Origin:   types.StrPtr(*decision.Origin),
+			Scenario: types.StrPtr(*decision.Scenario),
+			Scope:    types.StrPtr(*decision.Scope),
+			//Simulated: *decision.Simulated,
+			Type:  types.StrPtr(*decision.Type),
+			Until: decision.Until,
+			Value: types.StrPtr(*decision.Value),
+			UUID:  decision.UUID,
+		}
+		*x.ID = decision.ID
+		if decision.Simulated != nil {
+			x.Simulated = *decision.Simulated
+		}
+		apiDecisions = append(apiDecisions, x)
+	}
+	return apiDecisions
+}
+
 func alertToSignal(alert *models.Alert, scenarioTrust string, shareContext bool) *models.AddSignalsRequestItem {
 func alertToSignal(alert *models.Alert, scenarioTrust string, shareContext bool) *models.AddSignalsRequestItem {
 	signal := &models.AddSignalsRequestItem{
 	signal := &models.AddSignalsRequestItem{
 		Message:         alert.Message,
 		Message:         alert.Message,
 		Scenario:        alert.Scenario,
 		Scenario:        alert.Scenario,
 		ScenarioHash:    alert.ScenarioHash,
 		ScenarioHash:    alert.ScenarioHash,
 		ScenarioVersion: alert.ScenarioVersion,
 		ScenarioVersion: alert.ScenarioVersion,
-		Source:          alert.Source,
-		StartAt:         alert.StartAt,
-		StopAt:          alert.StopAt,
-		CreatedAt:       alert.CreatedAt,
-		MachineID:       alert.MachineID,
-		ScenarioTrust:   scenarioTrust,
+		Source: &models.AddSignalsRequestItemSource{
+			AsName:    alert.Source.AsName,
+			AsNumber:  alert.Source.AsNumber,
+			Cn:        alert.Source.Cn,
+			IP:        alert.Source.IP,
+			Latitude:  alert.Source.Latitude,
+			Longitude: alert.Source.Longitude,
+			Range:     alert.Source.Range,
+			Scope:     alert.Source.Scope,
+			Value:     alert.Source.Value,
+		},
+		StartAt:       alert.StartAt,
+		StopAt:        alert.StopAt,
+		CreatedAt:     alert.CreatedAt,
+		MachineID:     alert.MachineID,
+		ScenarioTrust: scenarioTrust,
+		Decisions:     decisionsToApiDecisions(alert.Decisions),
+		UUID:          alert.UUID,
 	}
 	}
 	if shareContext {
 	if shareContext {
 		signal.Context = make([]*models.AddSignalsRequestItemContextItems0, 0)
 		signal.Context = make([]*models.AddSignalsRequestItemContextItems0, 0)
@@ -114,7 +149,8 @@ func alertToSignal(alert *models.Alert, scenarioTrust string, shareContext bool)
 func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig) (*apic, error) {
 func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig) (*apic, error) {
 	var err error
 	var err error
 	ret := &apic{
 	ret := &apic{
-		alertToPush:          make(chan []*models.Alert),
+
+		AlertsAddChan:        make(chan []*models.Alert),
 		dbClient:             dbClient,
 		dbClient:             dbClient,
 		mu:                   sync.Mutex{},
 		mu:                   sync.Mutex{},
 		startup:              true,
 		startup:              true,
@@ -137,6 +173,11 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con
 	if err != nil {
 	if err != nil {
 		return nil, errors.Wrapf(err, "while parsing '%s'", config.Credentials.URL)
 		return nil, errors.Wrapf(err, "while parsing '%s'", config.Credentials.URL)
 	}
 	}
+	papiURL, err := url.Parse(config.Credentials.PapiURL)
+	if err != nil {
+		return nil, errors.Wrapf(err, "while parsing '%s'", config.Credentials.PapiURL)
+	}
+
 	ret.scenarioList, err = ret.FetchScenariosListFromDB()
 	ret.scenarioList, err = ret.FetchScenariosListFromDB()
 	if err != nil {
 	if err != nil {
 		return nil, errors.Wrap(err, "while fetching scenarios from db")
 		return nil, errors.Wrap(err, "while fetching scenarios from db")
@@ -146,10 +187,28 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con
 		Password:       password,
 		Password:       password,
 		UserAgent:      fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()),
 		UserAgent:      fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()),
 		URL:            apiURL,
 		URL:            apiURL,
+		PapiURL:        papiURL,
 		VersionPrefix:  "v2",
 		VersionPrefix:  "v2",
 		Scenarios:      ret.scenarioList,
 		Scenarios:      ret.scenarioList,
 		UpdateScenario: ret.FetchScenariosListFromDB,
 		UpdateScenario: ret.FetchScenariosListFromDB,
 	})
 	})
+	if err != nil {
+		return nil, errors.Wrap(err, "while creating api client")
+	}
+
+	scenarios, err := ret.FetchScenariosListFromDB()
+	if err != nil {
+		return ret, errors.Wrapf(err, "get scenario in db: %s", err)
+	}
+
+	if _, err = ret.apiClient.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{
+		MachineID: &config.Credentials.Login,
+		Password:  &password,
+		Scenarios: scenarios,
+	}); err != nil {
+		return ret, errors.Wrapf(err, "authenticate watcher (%s)", config.Credentials.Login)
+	}
+
 	return ret, err
 	return ret, err
 }
 }
 
 
@@ -183,7 +242,7 @@ func (a *apic) Push() error {
 				log.Infof("Signal push: %d signals to push", len(cacheCopy))
 				log.Infof("Signal push: %d signals to push", len(cacheCopy))
 				go a.Send(&cacheCopy)
 				go a.Send(&cacheCopy)
 			}
 			}
-		case alerts := <-a.alertToPush:
+		case alerts := <-a.AlertsAddChan:
 			var signals []*models.AddSignalsRequestItem
 			var signals []*models.AddSignalsRequestItem
 			for _, alert := range alerts {
 			for _, alert := range alerts {
 				if ok := shouldShareAlert(alert, a.consoleConfig); ok {
 				if ok := shouldShareAlert(alert, a.consoleConfig); ok {
@@ -205,7 +264,7 @@ func getScenarioTrustOfAlert(alert *models.Alert) string {
 		scenarioTrust = "tainted"
 		scenarioTrust = "tainted"
 	}
 	}
 	if len(alert.Decisions) > 0 {
 	if len(alert.Decisions) > 0 {
-		if *alert.Decisions[0].Origin == "cscli" {
+		if *alert.Decisions[0].Origin == types.CscliOrigin {
 			scenarioTrust = "manual"
 			scenarioTrust = "manual"
 		}
 		}
 	}
 	}
@@ -264,7 +323,7 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) {
 			defer cancel()
 			defer cancel()
 			_, _, err := a.apiClient.Signal.Add(ctx, &send)
 			_, _, err := a.apiClient.Signal.Add(ctx, &send)
 			if err != nil {
 			if err != nil {
-				log.Errorf("Error while sending final chunk to central API : %s", err)
+				log.Errorf("sending signal to central API: %s", err)
 				return
 				return
 			}
 			}
 			break
 			break
@@ -275,7 +334,7 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) {
 		_, _, err := a.apiClient.Signal.Add(ctx, &send)
 		_, _, err := a.apiClient.Signal.Add(ctx, &send)
 		if err != nil {
 		if err != nil {
 			//we log it here as well, because the return value of func might be discarded
 			//we log it here as well, because the return value of func might be discarded
-			log.Errorf("Error while sending chunk to central API : %s", err)
+			log.Errorf("sending signal to central API: %s", err)
 		}
 		}
 		pageStart += bulkSize
 		pageStart += bulkSize
 		pageEnd += bulkSize
 		pageEnd += bulkSize
@@ -313,7 +372,7 @@ func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, delet
 		}
 		}
 		filter["origin"] = []string{*decision.Origin}
 		filter["origin"] = []string{*decision.Origin}
 
 
-		dbCliRet, err := a.dbClient.SoftDeleteDecisionsWithFilter(filter)
+		dbCliRet, _, err := a.dbClient.SoftDeleteDecisionsWithFilter(filter)
 		if err != nil {
 		if err != nil {
 			return 0, errors.Wrap(err, "deleting decisions error")
 			return 0, errors.Wrap(err, "deleting decisions error")
 		}
 		}
@@ -337,12 +396,12 @@ func createAlertsForDecisions(decisions []*models.Decision) []*models.Alert {
 				log.Warningf("nil scope in %+v", sub)
 				log.Warningf("nil scope in %+v", sub)
 				continue
 				continue
 			}
 			}
-			if *decision.Origin == SCOPE_CAPI {
-				if *sub.Source.Scope == SCOPE_CAPI {
+			if *decision.Origin == types.CAPIOrigin {
+				if *sub.Source.Scope == types.CAPIOrigin {
 					found = true
 					found = true
 					break
 					break
 				}
 				}
-			} else if *decision.Origin == SCOPE_LISTS {
+			} else if *decision.Origin == types.ListOrigin {
 				if *sub.Source.Scope == *decision.Origin {
 				if *sub.Source.Scope == *decision.Origin {
 					if sub.Scenario == nil {
 					if sub.Scenario == nil {
 						log.Warningf("nil scenario in %+v", sub)
 						log.Warningf("nil scenario in %+v", sub)
@@ -368,12 +427,12 @@ func createAlertForDecision(decision *models.Decision) *models.Alert {
 	newAlert := &models.Alert{}
 	newAlert := &models.Alert{}
 	newAlert.Source = &models.Source{}
 	newAlert.Source = &models.Source{}
 	newAlert.Source.Scope = types.StrPtr("")
 	newAlert.Source.Scope = types.StrPtr("")
-	if *decision.Origin == SCOPE_CAPI { //to make things more user friendly, we replace CAPI with community-blocklist
-		newAlert.Scenario = types.StrPtr(SCOPE_CAPI)
-		newAlert.Source.Scope = types.StrPtr(SCOPE_CAPI)
-	} else if *decision.Origin == SCOPE_LISTS {
+	if *decision.Origin == types.CAPIOrigin { //to make things more user friendly, we replace CAPI with community-blocklist
+		newAlert.Scenario = types.StrPtr(types.CAPIOrigin)
+		newAlert.Source.Scope = types.StrPtr(types.CAPIOrigin)
+	} else if *decision.Origin == types.ListOrigin {
 		newAlert.Scenario = types.StrPtr(*decision.Scenario)
 		newAlert.Scenario = types.StrPtr(*decision.Scenario)
-		newAlert.Source.Scope = types.StrPtr(SCOPE_LISTS)
+		newAlert.Source.Scope = types.StrPtr(types.ListOrigin)
 	} else {
 	} else {
 		log.Warningf("unknown origin %s", *decision.Origin)
 		log.Warningf("unknown origin %s", *decision.Origin)
 	}
 	}
@@ -407,14 +466,14 @@ func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decisio
 		found := false
 		found := false
 		//add the individual decisions to the right list
 		//add the individual decisions to the right list
 		for idx, alert := range alerts {
 		for idx, alert := range alerts {
-			if *decision.Origin == SCOPE_CAPI {
-				if *alert.Source.Scope == SCOPE_CAPI {
+			if *decision.Origin == types.CAPIOrigin {
+				if *alert.Source.Scope == types.CAPIOrigin {
 					alerts[idx].Decisions = append(alerts[idx].Decisions, decision)
 					alerts[idx].Decisions = append(alerts[idx].Decisions, decision)
 					found = true
 					found = true
 					break
 					break
 				}
 				}
-			} else if *decision.Origin == SCOPE_LISTS {
-				if *alert.Source.Scope == SCOPE_LISTS && *alert.Scenario == *decision.Scenario {
+			} else if *decision.Origin == types.ListOrigin {
+				if *alert.Source.Scope == types.ListOrigin && *alert.Scenario == *decision.Scenario {
 					alerts[idx].Decisions = append(alerts[idx].Decisions, decision)
 					alerts[idx].Decisions = append(alerts[idx].Decisions, decision)
 					found = true
 					found = true
 					break
 					break
@@ -489,12 +548,12 @@ func (a *apic) PullTop() error {
 }
 }
 
 
 func setAlertScenario(add_counters map[string]map[string]int, delete_counters map[string]map[string]int, alert *models.Alert) *models.Alert {
 func setAlertScenario(add_counters map[string]map[string]int, delete_counters map[string]map[string]int, alert *models.Alert) *models.Alert {
-	if *alert.Source.Scope == SCOPE_CAPI {
-		*alert.Source.Scope = SCOPE_CAPI_ALIAS
-		alert.Scenario = types.StrPtr(fmt.Sprintf("update : +%d/-%d IPs", add_counters[SCOPE_CAPI]["all"], delete_counters[SCOPE_CAPI]["all"]))
-	} else if *alert.Source.Scope == SCOPE_LISTS {
-		*alert.Source.Scope = fmt.Sprintf("%s:%s", SCOPE_LISTS, *alert.Scenario)
-		alert.Scenario = types.StrPtr(fmt.Sprintf("update : +%d/-%d IPs", add_counters[SCOPE_LISTS][*alert.Scenario], delete_counters[SCOPE_LISTS][*alert.Scenario]))
+	if *alert.Source.Scope == types.CAPIOrigin {
+		*alert.Source.Scope = SCOPE_CAPI_ALIAS_ALIAS
+		alert.Scenario = types.StrPtr(fmt.Sprintf("update : +%d/-%d IPs", add_counters[types.CAPIOrigin]["all"], delete_counters[types.CAPIOrigin]["all"]))
+	} else if *alert.Source.Scope == types.ListOrigin {
+		*alert.Source.Scope = fmt.Sprintf("%s:%s", types.ListOrigin, *alert.Scenario)
+		alert.Scenario = types.StrPtr(fmt.Sprintf("update : +%d/-%d IPs", add_counters[types.ListOrigin][*alert.Scenario], delete_counters[types.ListOrigin][*alert.Scenario]))
 	}
 	}
 	return alert
 	return alert
 }
 }
@@ -622,20 +681,20 @@ func (a *apic) Shutdown() {
 
 
 func makeAddAndDeleteCounters() (map[string]map[string]int, map[string]map[string]int) {
 func makeAddAndDeleteCounters() (map[string]map[string]int, map[string]map[string]int) {
 	add_counters := make(map[string]map[string]int)
 	add_counters := make(map[string]map[string]int)
-	add_counters[SCOPE_CAPI] = make(map[string]int)
-	add_counters[SCOPE_LISTS] = make(map[string]int)
+	add_counters[types.CAPIOrigin] = make(map[string]int)
+	add_counters[types.ListOrigin] = make(map[string]int)
 
 
 	delete_counters := make(map[string]map[string]int)
 	delete_counters := make(map[string]map[string]int)
-	delete_counters[SCOPE_CAPI] = make(map[string]int)
-	delete_counters[SCOPE_LISTS] = make(map[string]int)
+	delete_counters[types.CAPIOrigin] = make(map[string]int)
+	delete_counters[types.ListOrigin] = make(map[string]int)
 
 
 	return add_counters, delete_counters
 	return add_counters, delete_counters
 }
 }
 
 
 func updateCounterForDecision(counter map[string]map[string]int, decision *models.Decision, totalDecisions int) {
 func updateCounterForDecision(counter map[string]map[string]int, decision *models.Decision, totalDecisions int) {
-	if *decision.Origin == SCOPE_CAPI {
+	if *decision.Origin == types.CAPIOrigin {
 		counter[*decision.Origin]["all"] += totalDecisions
 		counter[*decision.Origin]["all"] += totalDecisions
-	} else if *decision.Origin == SCOPE_LISTS {
+	} else if *decision.Origin == types.ListOrigin {
 		counter[*decision.Origin][*decision.Scenario] += totalDecisions
 		counter[*decision.Origin][*decision.Scenario] += totalDecisions
 	} else {
 	} else {
 		log.Warningf("Unknown origin %s", *decision.Origin)
 		log.Warningf("Unknown origin %s", *decision.Origin)

+ 43 - 27
pkg/apiserver/apic_test.go

@@ -46,7 +46,8 @@ func getAPIC(t *testing.T) *apic {
 	t.Helper()
 	t.Helper()
 	dbClient := getDBClient(t)
 	dbClient := getDBClient(t)
 	return &apic{
 	return &apic{
-		alertToPush:  make(chan []*models.Alert),
+		AlertsAddChan: make(chan []*models.Alert),
+		//DecisionDeleteChan: make(chan []*models.Decision),
 		dbClient:     dbClient,
 		dbClient:     dbClient,
 		mu:           sync.Mutex{},
 		mu:           sync.Mutex{},
 		startup:      true,
 		startup:      true,
@@ -108,7 +109,7 @@ func TestAPICCAPIPullIsOld(t *testing.T) {
 		SetType("IP").
 		SetType("IP").
 		SetScope("Country").
 		SetScope("Country").
 		SetValue("Blah").
 		SetValue("Blah").
-		SetOrigin(SCOPE_CAPI).
+		SetOrigin(types.CAPIOrigin).
 		SaveX(context.Background())
 		SaveX(context.Background())
 
 
 	api.dbClient.Ent.Alert.Create().
 	api.dbClient.Ent.Alert.Create().
@@ -178,12 +179,13 @@ func TestNewAPIC(t *testing.T) {
 	setConfig := func() {
 	setConfig := func() {
 		testConfig = &csconfig.OnlineApiClientCfg{
 		testConfig = &csconfig.OnlineApiClientCfg{
 			Credentials: &csconfig.ApiCredentialsCfg{
 			Credentials: &csconfig.ApiCredentialsCfg{
-				URL:      "foobar",
+				URL:      "http://foobar/",
 				Login:    "foo",
 				Login:    "foo",
 				Password: "bar",
 				Password: "bar",
 			},
 			},
 		}
 		}
 	}
 	}
+
 	type args struct {
 	type args struct {
 		dbClient      *database.Client
 		dbClient      *database.Client
 		consoleConfig *csconfig.ConsoleConfig
 		consoleConfig *csconfig.ConsoleConfig
@@ -216,6 +218,17 @@ func TestNewAPIC(t *testing.T) {
 		tc := tc
 		tc := tc
 		t.Run(tc.name, func(t *testing.T) {
 		t.Run(tc.name, func(t *testing.T) {
 			setConfig()
 			setConfig()
+			httpmock.Activate()
+			defer httpmock.DeactivateAndReset()
+			httpmock.RegisterResponder("POST", "http://foobar/v2/watchers/login", httpmock.NewBytesResponder(
+				200, jsonMarshalX(
+					models.WatcherAuthResponse{
+						Code:   200,
+						Expire: "2023-01-12T22:51:43Z",
+						Token:  "MyToken",
+					},
+				),
+			))
 			tc.action()
 			tc.action()
 			_, err := NewAPIC(testConfig, tc.args.dbClient, tc.args.consoleConfig)
 			_, err := NewAPIC(testConfig, tc.args.dbClient, tc.args.consoleConfig)
 			cstest.RequireErrorContains(t, err, tc.expectedErr)
 			cstest.RequireErrorContains(t, err, tc.expectedErr)
@@ -233,7 +246,7 @@ func TestAPICHandleDeletedDecisions(t *testing.T) {
 		SetType("ban").
 		SetType("ban").
 		SetScope("IP").
 		SetScope("IP").
 		SetValue("1.2.3.4").
 		SetValue("1.2.3.4").
-		SetOrigin(SCOPE_CAPI).
+		SetOrigin(types.CAPIOrigin).
 		SaveX(context.Background())
 		SaveX(context.Background())
 
 
 	api.dbClient.Ent.Decision.Create().
 	api.dbClient.Ent.Decision.Create().
@@ -242,14 +255,14 @@ func TestAPICHandleDeletedDecisions(t *testing.T) {
 		SetType("ban").
 		SetType("ban").
 		SetScope("IP").
 		SetScope("IP").
 		SetValue("1.2.3.4").
 		SetValue("1.2.3.4").
-		SetOrigin(SCOPE_CAPI).
+		SetOrigin(types.CAPIOrigin).
 		SaveX(context.Background())
 		SaveX(context.Background())
 
 
 	assertTotalDecisionCount(t, api.dbClient, 2)
 	assertTotalDecisionCount(t, api.dbClient, 2)
 
 
 	nbDeleted, err := api.HandleDeletedDecisions([]*models.Decision{{
 	nbDeleted, err := api.HandleDeletedDecisions([]*models.Decision{{
 		Value:    types.StrPtr("1.2.3.4"),
 		Value:    types.StrPtr("1.2.3.4"),
-		Origin:   &SCOPE_CAPI,
+		Origin:   types.StrPtr(types.CAPIOrigin),
 		Type:     &decision1.Type,
 		Type:     &decision1.Type,
 		Scenario: types.StrPtr("crowdsec/test"),
 		Scenario: types.StrPtr("crowdsec/test"),
 		Scope:    types.StrPtr("IP"),
 		Scope:    types.StrPtr("IP"),
@@ -257,7 +270,7 @@ func TestAPICHandleDeletedDecisions(t *testing.T) {
 
 
 	assert.NoError(t, err)
 	assert.NoError(t, err)
 	assert.Equal(t, 2, nbDeleted)
 	assert.Equal(t, 2, nbDeleted)
-	assert.Equal(t, 2, deleteCounters[SCOPE_CAPI]["all"])
+	assert.Equal(t, 2, deleteCounters[types.CAPIOrigin]["all"])
 }
 }
 
 
 func TestAPICGetMetrics(t *testing.T) {
 func TestAPICGetMetrics(t *testing.T) {
@@ -347,22 +360,22 @@ func TestAPICGetMetrics(t *testing.T) {
 
 
 func TestCreateAlertsForDecision(t *testing.T) {
 func TestCreateAlertsForDecision(t *testing.T) {
 	httpBfDecisionList := &models.Decision{
 	httpBfDecisionList := &models.Decision{
-		Origin:   &SCOPE_LISTS,
+		Origin:   types.StrPtr(types.ListOrigin),
 		Scenario: types.StrPtr("crowdsecurity/http-bf"),
 		Scenario: types.StrPtr("crowdsecurity/http-bf"),
 	}
 	}
 
 
 	sshBfDecisionList := &models.Decision{
 	sshBfDecisionList := &models.Decision{
-		Origin:   &SCOPE_LISTS,
+		Origin:   types.StrPtr(types.ListOrigin),
 		Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 		Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 	}
 	}
 
 
 	httpBfDecisionCommunity := &models.Decision{
 	httpBfDecisionCommunity := &models.Decision{
-		Origin:   &SCOPE_CAPI,
+		Origin:   types.StrPtr(types.CAPIOrigin),
 		Scenario: types.StrPtr("crowdsecurity/http-bf"),
 		Scenario: types.StrPtr("crowdsecurity/http-bf"),
 	}
 	}
 
 
 	sshBfDecisionCommunity := &models.Decision{
 	sshBfDecisionCommunity := &models.Decision{
-		Origin:   &SCOPE_CAPI,
+		Origin:   types.StrPtr(types.CAPIOrigin),
 		Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 		Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 	}
 	}
 	type args struct {
 	type args struct {
@@ -426,25 +439,25 @@ func TestCreateAlertsForDecision(t *testing.T) {
 
 
 func TestFillAlertsWithDecisions(t *testing.T) {
 func TestFillAlertsWithDecisions(t *testing.T) {
 	httpBfDecisionCommunity := &models.Decision{
 	httpBfDecisionCommunity := &models.Decision{
-		Origin:   &SCOPE_CAPI,
+		Origin:   types.StrPtr(types.CAPIOrigin),
 		Scenario: types.StrPtr("crowdsecurity/http-bf"),
 		Scenario: types.StrPtr("crowdsecurity/http-bf"),
 		Scope:    types.StrPtr("ip"),
 		Scope:    types.StrPtr("ip"),
 	}
 	}
 
 
 	sshBfDecisionCommunity := &models.Decision{
 	sshBfDecisionCommunity := &models.Decision{
-		Origin:   &SCOPE_CAPI,
+		Origin:   types.StrPtr(types.CAPIOrigin),
 		Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 		Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 		Scope:    types.StrPtr("ip"),
 		Scope:    types.StrPtr("ip"),
 	}
 	}
 
 
 	httpBfDecisionList := &models.Decision{
 	httpBfDecisionList := &models.Decision{
-		Origin:   &SCOPE_LISTS,
+		Origin:   types.StrPtr(types.ListOrigin),
 		Scenario: types.StrPtr("crowdsecurity/http-bf"),
 		Scenario: types.StrPtr("crowdsecurity/http-bf"),
 		Scope:    types.StrPtr("ip"),
 		Scope:    types.StrPtr("ip"),
 	}
 	}
 
 
 	sshBfDecisionList := &models.Decision{
 	sshBfDecisionList := &models.Decision{
-		Origin:   &SCOPE_LISTS,
+		Origin:   types.StrPtr(types.ListOrigin),
 		Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 		Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 		Scope:    types.StrPtr("ip"),
 		Scope:    types.StrPtr("ip"),
 	}
 	}
@@ -505,7 +518,7 @@ func TestFillAlertsWithDecisions(t *testing.T) {
 func TestAPICPullTop(t *testing.T) {
 func TestAPICPullTop(t *testing.T) {
 	api := getAPIC(t)
 	api := getAPIC(t)
 	api.dbClient.Ent.Decision.Create().
 	api.dbClient.Ent.Decision.Create().
-		SetOrigin(SCOPE_LISTS).
+		SetOrigin(types.ListOrigin).
 		SetType("ban").
 		SetType("ban").
 		SetValue("9.9.9.9").
 		SetValue("9.9.9.9").
 		SetScope("Ip").
 		SetScope("Ip").
@@ -521,7 +534,7 @@ func TestAPICPullTop(t *testing.T) {
 			models.DecisionsStreamResponse{
 			models.DecisionsStreamResponse{
 				Deleted: models.GetDecisionsResponse{
 				Deleted: models.GetDecisionsResponse{
 					&models.Decision{
 					&models.Decision{
-						Origin:   &SCOPE_LISTS,
+						Origin:   types.StrPtr(types.ListOrigin),
 						Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 						Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 						Value:    types.StrPtr("9.9.9.9"),
 						Value:    types.StrPtr("9.9.9.9"),
 						Scope:    types.StrPtr("Ip"),
 						Scope:    types.StrPtr("Ip"),
@@ -529,7 +542,7 @@ func TestAPICPullTop(t *testing.T) {
 						Type:     types.StrPtr("ban"),
 						Type:     types.StrPtr("ban"),
 					}, // This is already present in DB
 					}, // This is already present in DB
 					&models.Decision{
 					&models.Decision{
-						Origin:   &SCOPE_LISTS,
+						Origin:   types.StrPtr(types.ListOrigin),
 						Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 						Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 						Value:    types.StrPtr("9.1.9.9"),
 						Value:    types.StrPtr("9.1.9.9"),
 						Scope:    types.StrPtr("Ip"),
 						Scope:    types.StrPtr("Ip"),
@@ -539,7 +552,7 @@ func TestAPICPullTop(t *testing.T) {
 				},
 				},
 				New: models.GetDecisionsResponse{
 				New: models.GetDecisionsResponse{
 					&models.Decision{
 					&models.Decision{
-						Origin:   &SCOPE_CAPI,
+						Origin:   types.StrPtr(types.CAPIOrigin),
 						Scenario: types.StrPtr("crowdsecurity/test1"),
 						Scenario: types.StrPtr("crowdsecurity/test1"),
 						Value:    types.StrPtr("1.2.3.4"),
 						Value:    types.StrPtr("1.2.3.4"),
 						Scope:    types.StrPtr("Ip"),
 						Scope:    types.StrPtr("Ip"),
@@ -547,7 +560,7 @@ func TestAPICPullTop(t *testing.T) {
 						Type:     types.StrPtr("ban"),
 						Type:     types.StrPtr("ban"),
 					},
 					},
 					&models.Decision{
 					&models.Decision{
-						Origin:   &SCOPE_CAPI,
+						Origin:   types.StrPtr(types.CAPIOrigin),
 						Scenario: types.StrPtr("crowdsecurity/test2"),
 						Scenario: types.StrPtr("crowdsecurity/test2"),
 						Value:    types.StrPtr("1.2.3.5"),
 						Value:    types.StrPtr("1.2.3.5"),
 						Scope:    types.StrPtr("Ip"),
 						Scope:    types.StrPtr("Ip"),
@@ -555,7 +568,7 @@ func TestAPICPullTop(t *testing.T) {
 						Type:     types.StrPtr("ban"),
 						Type:     types.StrPtr("ban"),
 					}, // These two are from community list.
 					}, // These two are from community list.
 					&models.Decision{
 					&models.Decision{
-						Origin:   &SCOPE_LISTS,
+						Origin:   types.StrPtr(types.ListOrigin),
 						Scenario: types.StrPtr("crowdsecurity/http-bf"),
 						Scenario: types.StrPtr("crowdsecurity/http-bf"),
 						Value:    types.StrPtr("1.2.3.6"),
 						Value:    types.StrPtr("1.2.3.6"),
 						Scope:    types.StrPtr("Ip"),
 						Scope:    types.StrPtr("Ip"),
@@ -563,7 +576,7 @@ func TestAPICPullTop(t *testing.T) {
 						Type:     types.StrPtr("ban"),
 						Type:     types.StrPtr("ban"),
 					},
 					},
 					&models.Decision{
 					&models.Decision{
-						Origin:   &SCOPE_LISTS,
+						Origin:   types.StrPtr(types.ListOrigin),
 						Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 						Scenario: types.StrPtr("crowdsecurity/ssh-bf"),
 						Value:    types.StrPtr("1.2.3.7"),
 						Value:    types.StrPtr("1.2.3.7"),
 						Scope:    types.StrPtr("Ip"),
 						Scope:    types.StrPtr("Ip"),
@@ -604,7 +617,7 @@ func TestAPICPullTop(t *testing.T) {
 		alertScenario[alert.SourceScope]++
 		alertScenario[alert.SourceScope]++
 	}
 	}
 	assert.Equal(t, 3, len(alertScenario))
 	assert.Equal(t, 3, len(alertScenario))
-	assert.Equal(t, 1, alertScenario[SCOPE_CAPI_ALIAS])
+	assert.Equal(t, 1, alertScenario[SCOPE_CAPI_ALIAS_ALIAS])
 	assert.Equal(t, 1, alertScenario["lists:crowdsecurity/ssh-bf"])
 	assert.Equal(t, 1, alertScenario["lists:crowdsecurity/ssh-bf"])
 	assert.Equal(t, 1, alertScenario["lists:crowdsecurity/http-bf"])
 	assert.Equal(t, 1, alertScenario["lists:crowdsecurity/http-bf"])
 
 
@@ -632,6 +645,7 @@ func TestAPICPush(t *testing.T) {
 					ScenarioHash:    types.StrPtr("certified"),
 					ScenarioHash:    types.StrPtr("certified"),
 					ScenarioVersion: types.StrPtr("v1.0"),
 					ScenarioVersion: types.StrPtr("v1.0"),
 					Simulated:       types.BoolPtr(false),
 					Simulated:       types.BoolPtr(false),
+					Source:          &models.Source{},
 				},
 				},
 			},
 			},
 			expectedCalls: 1,
 			expectedCalls: 1,
@@ -644,6 +658,7 @@ func TestAPICPush(t *testing.T) {
 					ScenarioHash:    types.StrPtr("certified"),
 					ScenarioHash:    types.StrPtr("certified"),
 					ScenarioVersion: types.StrPtr("v1.0"),
 					ScenarioVersion: types.StrPtr("v1.0"),
 					Simulated:       types.BoolPtr(true),
 					Simulated:       types.BoolPtr(true),
+					Source:          &models.Source{},
 				},
 				},
 			},
 			},
 			expectedCalls: 0,
 			expectedCalls: 0,
@@ -659,6 +674,7 @@ func TestAPICPush(t *testing.T) {
 						ScenarioHash:    types.StrPtr("certified"),
 						ScenarioHash:    types.StrPtr("certified"),
 						ScenarioVersion: types.StrPtr("v1.0"),
 						ScenarioVersion: types.StrPtr("v1.0"),
 						Simulated:       types.BoolPtr(false),
 						Simulated:       types.BoolPtr(false),
+						Source:          &models.Source{},
 					}
 					}
 				}
 				}
 				return alerts
 				return alerts
@@ -688,7 +704,7 @@ func TestAPICPush(t *testing.T) {
 			api.apiClient = apic
 			api.apiClient = apic
 			httpmock.RegisterResponder("POST", "http://api.crowdsec.net/api/signals", httpmock.NewBytesResponder(200, []byte{}))
 			httpmock.RegisterResponder("POST", "http://api.crowdsec.net/api/signals", httpmock.NewBytesResponder(200, []byte{}))
 			go func() {
 			go func() {
-				api.alertToPush <- tc.alerts
+				api.AlertsAddChan <- tc.alerts
 				time.Sleep(time.Second)
 				time.Sleep(time.Second)
 				api.Shutdown()
 				api.Shutdown()
 			}()
 			}()
@@ -832,7 +848,7 @@ func TestAPICPull(t *testing.T) {
 				models.DecisionsStreamResponse{
 				models.DecisionsStreamResponse{
 					New: models.GetDecisionsResponse{
 					New: models.GetDecisionsResponse{
 						&models.Decision{
 						&models.Decision{
-							Origin:   &SCOPE_CAPI,
+							Origin:   types.StrPtr(types.CAPIOrigin),
 							Scenario: types.StrPtr("crowdsecurity/test2"),
 							Scenario: types.StrPtr("crowdsecurity/test2"),
 							Value:    types.StrPtr("1.2.3.5"),
 							Value:    types.StrPtr("1.2.3.5"),
 							Scope:    types.StrPtr("Ip"),
 							Scope:    types.StrPtr("Ip"),
@@ -892,7 +908,7 @@ func TestShouldShareAlert(t *testing.T) {
 			},
 			},
 			alert: &models.Alert{
 			alert: &models.Alert{
 				Simulated: types.BoolPtr(false),
 				Simulated: types.BoolPtr(false),
-				Decisions: []*models.Decision{{Origin: types.StrPtr("cscli")}},
+				Decisions: []*models.Decision{{Origin: types.StrPtr(types.CscliOrigin)}},
 			},
 			},
 			expectedRet:   true,
 			expectedRet:   true,
 			expectedTrust: "manual",
 			expectedTrust: "manual",
@@ -904,7 +920,7 @@ func TestShouldShareAlert(t *testing.T) {
 			},
 			},
 			alert: &models.Alert{
 			alert: &models.Alert{
 				Simulated: types.BoolPtr(false),
 				Simulated: types.BoolPtr(false),
-				Decisions: []*models.Decision{{Origin: types.StrPtr("cscli")}},
+				Decisions: []*models.Decision{{Origin: types.StrPtr(types.CscliOrigin)}},
 			},
 			},
 			expectedRet:   false,
 			expectedRet:   false,
 			expectedTrust: "manual",
 			expectedTrust: "manual",

+ 69 - 3
pkg/apiserver/apiserver.go

@@ -12,14 +12,17 @@ import (
 	"strings"
 	"strings"
 	"time"
 	"time"
 
 
+	"github.com/crowdsecurity/crowdsec/pkg/apiclient"
 	"github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers"
 	"github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers"
 	v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1"
 	v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1"
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
 	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
 	"github.com/crowdsecurity/crowdsec/pkg/csplugin"
 	"github.com/crowdsecurity/crowdsec/pkg/csplugin"
 	"github.com/crowdsecurity/crowdsec/pkg/database"
 	"github.com/crowdsecurity/crowdsec/pkg/database"
+	"github.com/crowdsecurity/crowdsec/pkg/fflag"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/gin-gonic/gin"
 	"github.com/gin-gonic/gin"
 	"github.com/go-co-op/gocron"
 	"github.com/go-co-op/gocron"
+	"github.com/golang-jwt/jwt/v4"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	log "github.com/sirupsen/logrus"
 	log "github.com/sirupsen/logrus"
 	"gopkg.in/natefinch/lumberjack.v2"
 	"gopkg.in/natefinch/lumberjack.v2"
@@ -40,8 +43,10 @@ type APIServer struct {
 	router         *gin.Engine
 	router         *gin.Engine
 	httpServer     *http.Server
 	httpServer     *http.Server
 	apic           *apic
 	apic           *apic
+	papi           *Papi
 	httpServerTomb tomb.Tomb
 	httpServerTomb tomb.Tomb
 	consoleConfig  *csconfig.ConsoleConfig
 	consoleConfig  *csconfig.ConsoleConfig
+	isEnrolled     bool
 }
 }
 
 
 // RecoveryWithWriter returns a middleware for a given writer that recovers from any panics and writes a 500 if there was one.
 // RecoveryWithWriter returns a middleware for a given writer that recovers from any panics and writes a 500 if there was one.
@@ -206,18 +211,34 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) {
 	}
 	}
 
 
 	var apiClient *apic
 	var apiClient *apic
+	var papiClient *Papi
+	var isMachineEnrolled = false
 
 
 	if config.OnlineClient != nil && config.OnlineClient.Credentials != nil {
 	if config.OnlineClient != nil && config.OnlineClient.Credentials != nil {
-		log.Printf("Loading CAPI pusher")
+		log.Printf("Loading CAPI manager")
 		apiClient, err = NewAPIC(config.OnlineClient, dbClient, config.ConsoleConfig)
 		apiClient, err = NewAPIC(config.OnlineClient, dbClient, config.ConsoleConfig)
 		if err != nil {
 		if err != nil {
 			return &APIServer{}, err
 			return &APIServer{}, err
 		}
 		}
-		controller.CAPIChan = apiClient.alertToPush
+		log.Infof("CAPI manager configured successfully")
+		isMachineEnrolled = isEnrolled(apiClient.apiClient)
+		if isMachineEnrolled {
+			log.Infof("Machine is enrolled in the console, Loading PAPI Client")
+			papiClient, err = NewPAPI(apiClient, dbClient, config.ConsoleConfig, *config.PapiLogLevel)
+			if err != nil {
+				return &APIServer{}, err
+			}
+			controller.DecisionDeleteChan = papiClient.Channels.DeleteDecisionChannel
+			controller.AlertsAddChan = apiClient.AlertsAddChan
+		} else {
+			log.Errorf("Machine is not enrolled in the console, can't synchronize with the console")
+		}
 	} else {
 	} else {
 		apiClient = nil
 		apiClient = nil
-		controller.CAPIChan = nil
+		controller.AlertsAddChan = nil
+		controller.DecisionDeleteChan = nil
 	}
 	}
+
 	if trustedIPs, err := config.GetTrustedIPs(); err == nil {
 	if trustedIPs, err := config.GetTrustedIPs(); err == nil {
 		controller.TrustedIPs = trustedIPs
 		controller.TrustedIPs = trustedIPs
 	} else {
 	} else {
@@ -233,12 +254,29 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) {
 		flushScheduler: flushScheduler,
 		flushScheduler: flushScheduler,
 		router:         router,
 		router:         router,
 		apic:           apiClient,
 		apic:           apiClient,
+		papi:           papiClient,
 		httpServerTomb: tomb.Tomb{},
 		httpServerTomb: tomb.Tomb{},
 		consoleConfig:  config.ConsoleConfig,
 		consoleConfig:  config.ConsoleConfig,
+		isEnrolled:     isMachineEnrolled,
 	}, nil
 	}, nil
 
 
 }
 }
 
 
+func isEnrolled(client *apiclient.ApiClient) bool {
+	apiHTTPClient := client.GetClient()
+	jwtTransport := apiHTTPClient.Transport.(*apiclient.JWTTransport)
+	tokenStr := jwtTransport.Token
+
+	token, _ := jwt.Parse(tokenStr, nil)
+	if token == nil {
+		return false
+	}
+	claims := token.Claims.(jwt.MapClaims)
+	_, ok := claims["organization_id"]
+
+	return ok
+}
+
 func (s *APIServer) Router() (*gin.Engine, error) {
 func (s *APIServer) Router() (*gin.Engine, error) {
 	return s.router, nil
 	return s.router, nil
 }
 }
@@ -303,6 +341,7 @@ func (s *APIServer) Run(apiReady chan bool) error {
 			}
 			}
 			return nil
 			return nil
 		})
 		})
+
 		s.apic.pullTomb.Go(func() error {
 		s.apic.pullTomb.Go(func() error {
 			if err := s.apic.Pull(); err != nil {
 			if err := s.apic.Pull(); err != nil {
 				log.Errorf("capi pull: %s", err)
 				log.Errorf("capi pull: %s", err)
@@ -310,6 +349,33 @@ func (s *APIServer) Run(apiReady chan bool) error {
 			}
 			}
 			return nil
 			return nil
 		})
 		})
+
+		//csConfig.API.Server.ConsoleConfig.ShareCustomScenarios
+		if s.isEnrolled {
+			if fflag.PapiClient.IsEnabled() {
+				if s.consoleConfig.ReceiveDecisions != nil && *s.consoleConfig.ReceiveDecisions {
+					log.Infof("Starting PAPI decision receiver")
+					s.papi.pullTomb.Go(func() error {
+						if err := s.papi.Pull(); err != nil {
+							log.Errorf("papi pull: %s", err)
+							return err
+						}
+						return nil
+					})
+
+					s.papi.syncTomb.Go(func() error {
+						if err := s.papi.SyncDecisions(); err != nil {
+							log.Errorf("capi decisions sync: %s", err)
+							return err
+						}
+						return nil
+					})
+				} else {
+					log.Warningf("Machine is not allowed to synchronize decisions, you can enable it with `cscli console enable console_management`")
+				}
+			}
+		}
+
 		s.apic.metricsTomb.Go(func() error {
 		s.apic.metricsTomb.Go(func() error {
 			s.apic.SendMetrics(make(chan bool))
 			s.apic.SendMetrics(make(chan bool))
 			return nil
 			return nil

+ 19 - 17
pkg/apiserver/controllers/controller.go

@@ -16,16 +16,17 @@ import (
 )
 )
 
 
 type Controller struct {
 type Controller struct {
-	Ectx          context.Context
-	DBClient      *database.Client
-	Router        *gin.Engine
-	Profiles      []*csconfig.ProfileCfg
-	CAPIChan      chan []*models.Alert
-	PluginChannel chan csplugin.ProfileAlert
-	Log           *log.Logger
-	ConsoleConfig *csconfig.ConsoleConfig
-	TrustedIPs    []net.IPNet
-	HandlerV1     *v1.Controller
+	Ectx               context.Context
+	DBClient           *database.Client
+	Router             *gin.Engine
+	Profiles           []*csconfig.ProfileCfg
+	AlertsAddChan      chan []*models.Alert
+	DecisionDeleteChan chan []*models.Decision
+	PluginChannel      chan csplugin.ProfileAlert
+	Log                *log.Logger
+	ConsoleConfig      *csconfig.ConsoleConfig
+	TrustedIPs         []net.IPNet
+	HandlerV1          *v1.Controller
 }
 }
 
 
 func (c *Controller) Init() error {
 func (c *Controller) Init() error {
@@ -59,13 +60,14 @@ func (c *Controller) NewV1() error {
 	var err error
 	var err error
 
 
 	v1Config := v1.ControllerV1Config{
 	v1Config := v1.ControllerV1Config{
-		DbClient:      c.DBClient,
-		Ctx:           c.Ectx,
-		ProfilesCfg:   c.Profiles,
-		CapiChan:      c.CAPIChan,
-		PluginChannel: c.PluginChannel,
-		ConsoleConfig: *c.ConsoleConfig,
-		TrustedIPs:    c.TrustedIPs,
+		DbClient:           c.DBClient,
+		Ctx:                c.Ectx,
+		ProfilesCfg:        c.Profiles,
+		DecisionDeleteChan: c.DecisionDeleteChan,
+		AlertsAddChan:      c.AlertsAddChan,
+		PluginChannel:      c.PluginChannel,
+		ConsoleConfig:      *c.ConsoleConfig,
+		TrustedIPs:         c.TrustedIPs,
 	}
 	}
 
 
 	c.HandlerV1, err = v1.New(&v1Config)
 	c.HandlerV1, err = v1.New(&v1Config)

+ 16 - 5
pkg/apiserver/controllers/v1/alerts.go

@@ -10,6 +10,7 @@ import (
 	"time"
 	"time"
 
 
 	jwt "github.com/appleboy/gin-jwt/v2"
 	jwt "github.com/appleboy/gin-jwt/v2"
+	"github.com/google/uuid"
 
 
 	"github.com/crowdsecurity/crowdsec/pkg/csplugin"
 	"github.com/crowdsecurity/crowdsec/pkg/csplugin"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent"
@@ -44,6 +45,7 @@ func FormatOneAlert(alert *ent.Alert) *models.Alert {
 		Capacity:        &alert.Capacity,
 		Capacity:        &alert.Capacity,
 		Leakspeed:       &alert.LeakSpeed,
 		Leakspeed:       &alert.LeakSpeed,
 		Simulated:       &alert.Simulated,
 		Simulated:       &alert.Simulated,
+		UUID:            alert.UUID,
 		Source: &models.Source{
 		Source: &models.Source{
 			Scope:     &alert.SourceScope,
 			Scope:     &alert.SourceScope,
 			Value:     &alert.SourceValue,
 			Value:     &alert.SourceValue,
@@ -159,8 +161,15 @@ func (c *Controller) CreateAlert(gctx *gin.Context) {
 		}
 		}
 
 
 		alert.MachineID = machineID
 		alert.MachineID = machineID
+		//generate uuid here for alert
+		alert.UUID = uuid.NewString()
+
 		//if coming from cscli, alert already has decisions
 		//if coming from cscli, alert already has decisions
 		if len(alert.Decisions) != 0 {
 		if len(alert.Decisions) != 0 {
+			//alert already has a decision (cscli decisions add etc.), generate uuid here
+			for _, decision := range alert.Decisions {
+				decision.UUID = uuid.NewString()
+			}
 			for pIdx, profile := range c.Profiles {
 			for pIdx, profile := range c.Profiles {
 				_, matched, err := profile.EvaluateProfile(alert)
 				_, matched, err := profile.EvaluateProfile(alert)
 				if err != nil {
 				if err != nil {
@@ -176,7 +185,7 @@ func (c *Controller) CreateAlert(gctx *gin.Context) {
 				}
 				}
 			}
 			}
 			decision := alert.Decisions[0]
 			decision := alert.Decisions[0]
-			if decision.Origin != nil && *decision.Origin == "cscli-import" {
+			if decision.Origin != nil && *decision.Origin == types.CscliImportOrigin {
 				stopFlush = true
 				stopFlush = true
 			}
 			}
 			continue
 			continue
@@ -201,11 +210,13 @@ func (c *Controller) CreateAlert(gctx *gin.Context) {
 					return
 					return
 				}
 				}
 			}
 			}
-
 			if !matched {
 			if !matched {
 				continue
 				continue
 			}
 			}
-
+			for _, decision := range profileDecisions {
+				decision.UUID = uuid.NewString()
+			}
+			//generate uuid here for alert
 			if len(alert.Decisions) == 0 { // non manual decision
 			if len(alert.Decisions) == 0 { // non manual decision
 				alert.Decisions = append(alert.Decisions, profileDecisions...)
 				alert.Decisions = append(alert.Decisions, profileDecisions...)
 			}
 			}
@@ -229,9 +240,9 @@ func (c *Controller) CreateAlert(gctx *gin.Context) {
 		return
 		return
 	}
 	}
 
 
-	if c.CAPIChan != nil {
+	if c.AlertsAddChan != nil {
 		select {
 		select {
-		case c.CAPIChan <- input:
+		case c.AlertsAddChan <- input:
 			log.Debug("alert sent to CAPI channel")
 			log.Debug("alert sent to CAPI channel")
 		default:
 		default:
 			log.Warning("Cannot send alert to Central API channel")
 			log.Warning("Cannot send alert to Central API channel")

+ 25 - 18
pkg/apiserver/controllers/v1/controller.go

@@ -16,22 +16,28 @@ import (
 )
 )
 
 
 type Controller struct {
 type Controller struct {
-	Ectx          context.Context
-	DBClient      *database.Client
-	APIKeyHeader  string
-	Middlewares   *middlewares.Middlewares
-	Profiles      []*csprofiles.Runtime
-	CAPIChan      chan []*models.Alert
+	Ectx         context.Context
+	DBClient     *database.Client
+	APIKeyHeader string
+	Middlewares  *middlewares.Middlewares
+	Profiles     []*csprofiles.Runtime
+
+	AlertsAddChan      chan []*models.Alert
+	DecisionDeleteChan chan []*models.Decision
+
 	PluginChannel chan csplugin.ProfileAlert
 	PluginChannel chan csplugin.ProfileAlert
 	ConsoleConfig csconfig.ConsoleConfig
 	ConsoleConfig csconfig.ConsoleConfig
 	TrustedIPs    []net.IPNet
 	TrustedIPs    []net.IPNet
 }
 }
 
 
 type ControllerV1Config struct {
 type ControllerV1Config struct {
-	DbClient      *database.Client
-	Ctx           context.Context
-	ProfilesCfg   []*csconfig.ProfileCfg
-	CapiChan      chan []*models.Alert
+	DbClient    *database.Client
+	Ctx         context.Context
+	ProfilesCfg []*csconfig.ProfileCfg
+
+	AlertsAddChan      chan []*models.Alert
+	DecisionDeleteChan chan []*models.Decision
+
 	PluginChannel chan csplugin.ProfileAlert
 	PluginChannel chan csplugin.ProfileAlert
 	ConsoleConfig csconfig.ConsoleConfig
 	ConsoleConfig csconfig.ConsoleConfig
 	TrustedIPs    []net.IPNet
 	TrustedIPs    []net.IPNet
@@ -46,14 +52,15 @@ func New(cfg *ControllerV1Config) (*Controller, error) {
 	}
 	}
 
 
 	v1 := &Controller{
 	v1 := &Controller{
-		Ectx:          cfg.Ctx,
-		DBClient:      cfg.DbClient,
-		APIKeyHeader:  middlewares.APIKeyHeader,
-		Profiles:      profiles,
-		CAPIChan:      cfg.CapiChan,
-		PluginChannel: cfg.PluginChannel,
-		ConsoleConfig: cfg.ConsoleConfig,
-		TrustedIPs:    cfg.TrustedIPs,
+		Ectx:               cfg.Ctx,
+		DBClient:           cfg.DbClient,
+		APIKeyHeader:       middlewares.APIKeyHeader,
+		Profiles:           profiles,
+		AlertsAddChan:      cfg.AlertsAddChan,
+		DecisionDeleteChan: cfg.DecisionDeleteChan,
+		PluginChannel:      cfg.PluginChannel,
+		ConsoleConfig:      cfg.ConsoleConfig,
+		TrustedIPs:         cfg.TrustedIPs,
 	}
 	}
 	v1.Middlewares, err = middlewares.NewMiddlewares(cfg.DbClient)
 	v1.Middlewares, err = middlewares.NewMiddlewares(cfg.DbClient)
 	if err != nil {
 	if err != nil {

+ 22 - 4
pkg/apiserver/controllers/v1/decisions.go

@@ -34,6 +34,7 @@ func FormatDecisions(decisions []*ent.Decision, dedup bool) ([]*models.Decision,
 			Value:    &dbDecision.Value,
 			Value:    &dbDecision.Value,
 			Type:     &dbDecision.Type,
 			Type:     &dbDecision.Type,
 			Origin:   &dbDecision.Origin,
 			Origin:   &dbDecision.Origin,
+			UUID:     dbDecision.UUID,
 		}
 		}
 		results = append(results, &decision)
 		results = append(results, &decision)
 	}
 	}
@@ -93,11 +94,20 @@ func (c *Controller) DeleteDecisionById(gctx *gin.Context) {
 		gctx.JSON(http.StatusBadRequest, gin.H{"message": "decision_id must be valid integer"})
 		gctx.JSON(http.StatusBadRequest, gin.H{"message": "decision_id must be valid integer"})
 		return
 		return
 	}
 	}
-	nbDeleted, err := c.DBClient.SoftDeleteDecisionByID(decisionID)
+	nbDeleted, deletedFromDB, err := c.DBClient.SoftDeleteDecisionByID(decisionID)
 	if err != nil {
 	if err != nil {
 		c.HandleDBErrors(gctx, err)
 		c.HandleDBErrors(gctx, err)
 		return
 		return
 	}
 	}
+	//transform deleted decisions to be sendable to capi
+	deletedDecisions, err := FormatDecisions(deletedFromDB, false)
+	if err != nil {
+		log.Warningf("failed to format decisions: %v", err)
+	}
+
+	if c.DecisionDeleteChan != nil {
+		c.DecisionDeleteChan <- deletedDecisions
+	}
 
 
 	deleteDecisionResp := models.DeleteDecisionResponse{
 	deleteDecisionResp := models.DeleteDecisionResponse{
 		NbDeleted: strconv.Itoa(nbDeleted),
 		NbDeleted: strconv.Itoa(nbDeleted),
@@ -108,16 +118,24 @@ func (c *Controller) DeleteDecisionById(gctx *gin.Context) {
 
 
 func (c *Controller) DeleteDecisions(gctx *gin.Context) {
 func (c *Controller) DeleteDecisions(gctx *gin.Context) {
 	var err error
 	var err error
-
-	nbDeleted, err := c.DBClient.SoftDeleteDecisionsWithFilter(gctx.Request.URL.Query())
+	nbDeleted, deletedFromDB, err := c.DBClient.SoftDeleteDecisionsWithFilter(gctx.Request.URL.Query())
 	if err != nil {
 	if err != nil {
 		c.HandleDBErrors(gctx, err)
 		c.HandleDBErrors(gctx, err)
 		return
 		return
 	}
 	}
+	//transform deleted decisions to be sendable to capi
+	deletedDecisions, err := FormatDecisions(deletedFromDB, false)
+	if err != nil {
+		log.Warningf("failed to format decisions: %v", err)
+	}
+
+	if c.DecisionDeleteChan != nil {
+		c.DecisionDeleteChan <- deletedDecisions
+	}
+
 	deleteDecisionResp := models.DeleteDecisionResponse{
 	deleteDecisionResp := models.DeleteDecisionResponse{
 		NbDeleted: nbDeleted,
 		NbDeleted: nbDeleted,
 	}
 	}
-
 	gctx.JSON(http.StatusOK, deleteDecisionResp)
 	gctx.JSON(http.StatusOK, deleteDecisionResp)
 }
 }
 
 

+ 263 - 0
pkg/apiserver/papi.go

@@ -0,0 +1,263 @@
+package apiserver
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/crowdsecurity/crowdsec/pkg/apiclient"
+	"github.com/crowdsecurity/crowdsec/pkg/csconfig"
+	"github.com/crowdsecurity/crowdsec/pkg/database"
+	"github.com/crowdsecurity/crowdsec/pkg/longpollclient"
+	"github.com/crowdsecurity/crowdsec/pkg/models"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+	log "github.com/sirupsen/logrus"
+	"gopkg.in/tomb.v2"
+)
+
+var (
+	SyncInterval = time.Second * 10
+)
+
+const (
+	PapiPullKey = "papi:last_pull"
+)
+
+var (
+	operationMap = map[string]func(*Message, *Papi) error{
+		"decision": DecisionCmd,
+		"alert":    AlertCmd,
+	}
+)
+
+type Header struct {
+	OperationType string    `json:"operation_type"`
+	OperationCmd  string    `json:"operation_cmd"`
+	Timestamp     time.Time `json:"timestamp"`
+	Message       string    `json:"message"`
+	UUID          string    `json:"uuid"`
+	Source        *Source   `json:"source"`
+	Destination   string    `json:"destination"`
+}
+
+type Source struct {
+	User string `json:"user"`
+}
+
+type Message struct {
+	Header *Header
+	Data   interface{} `json:"data"`
+}
+
+type OperationChannels struct {
+	AddAlertChannel       chan []*models.Alert
+	DeleteDecisionChannel chan []*models.Decision
+}
+
+type Papi struct {
+	URL           string
+	Client        *longpollclient.LongPollClient
+	DBClient      *database.Client
+	apiClient     *apiclient.ApiClient
+	Channels      *OperationChannels
+	mu            sync.Mutex
+	pullTomb      tomb.Tomb
+	syncTomb      tomb.Tomb
+	SyncInterval  time.Duration
+	consoleConfig *csconfig.ConsoleConfig
+	Logger        *log.Entry
+}
+
+func NewPAPI(apic *apic, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig, logLevel log.Level) (*Papi, error) {
+
+	logger := logrus.New()
+	if err := types.ConfigureLogger(logger); err != nil {
+		return &Papi{}, fmt.Errorf("creating papi logger: %s", err)
+	}
+	logger.SetLevel(logLevel)
+
+	longPollClient, err := longpollclient.NewLongPollClient(longpollclient.LongPollClientConfig{
+		Url:        *apic.apiClient.PapiURL,
+		Logger:     logger,
+		HttpClient: apic.apiClient.GetClient(),
+	})
+
+	if err != nil {
+		return &Papi{}, errors.Wrap(err, "failed to create PAPI client")
+	}
+
+	channels := &OperationChannels{
+		AddAlertChannel:       apic.AlertsAddChan,
+		DeleteDecisionChannel: make(chan []*models.Decision),
+	}
+
+	papi := &Papi{
+		URL:           apic.apiClient.PapiURL.String(),
+		Client:        longPollClient,
+		DBClient:      dbClient,
+		Channels:      channels,
+		SyncInterval:  SyncInterval,
+		mu:            sync.Mutex{},
+		pullTomb:      tomb.Tomb{},
+		syncTomb:      tomb.Tomb{},
+		apiClient:     apic.apiClient,
+		consoleConfig: consoleConfig,
+		Logger:        logger.WithFields(log.Fields{"interval": SyncInterval.Seconds(), "source": "papi"}),
+	}
+
+	return papi, nil
+}
+
+// PullPAPI is the long polling client for real-time decisions from PAPI
+func (p *Papi) Pull() error {
+
+	defer types.CatchPanic("lapi/PullPAPI")
+	p.Logger.Infof("Starting Polling API Pull")
+
+	lastTimestamp := time.Time{}
+	lastTimestampStr, err := p.DBClient.GetConfigItem(PapiPullKey)
+	if err != nil {
+		p.Logger.Warningf("failed to get last timestamp for papi pull: %s", err)
+	}
+	//value doesn't exist, it's first time we're pulling
+	if lastTimestampStr == nil {
+		binTime, err := lastTimestamp.MarshalText()
+		if err != nil {
+			return errors.Wrap(err, "failed to marshal last timestamp")
+		}
+		if err := p.DBClient.SetConfigItem(PapiPullKey, string(binTime)); err != nil {
+			p.Logger.Errorf("error setting papi pull last key: %s", err)
+		} else {
+			p.Logger.Debugf("config item '%s' set in database with value '%s'", PapiPullKey, string(binTime))
+		}
+	} else {
+		if err := lastTimestamp.UnmarshalText([]byte(*lastTimestampStr)); err != nil {
+			return errors.Wrap(err, "failed to unmarshal last timestamp")
+		}
+	}
+
+	p.Logger.Infof("Starting PAPI pull (since:%s)", lastTimestamp)
+	for event := range p.Client.Start(lastTimestamp) {
+		logger := p.Logger.WithField("request-id", event.RequestId)
+		//update last timestamp in database
+		newTime := time.Now().UTC()
+		binTime, err := newTime.MarshalText()
+		if err != nil {
+			return errors.Wrap(err, "failed to marshal last timestamp")
+		}
+		logger.Debugf("message received: %+v", event.Data)
+		message := &Message{}
+		if err := json.Unmarshal([]byte(event.Data), message); err != nil {
+			logger.Errorf("polling papi message format is not compatible: %+v: %s", event.Data, err)
+			// do we want to continue or exit ?
+			continue
+		}
+
+		if message.Header == nil {
+			logger.Errorf("no header in message, skipping")
+			continue
+		}
+
+		if message.Header.Source == nil {
+			logger.Errorf("no source user in header message, skipping")
+			continue
+		}
+
+		if operationFunc, ok := operationMap[message.Header.OperationType]; ok {
+			logger.Debugf("Calling operation '%s'", message.Header.OperationType)
+			err := operationFunc(message, p)
+			if err != nil {
+				logger.Errorf("'%s %s failed: %s", message.Header.OperationType, message.Header.OperationCmd, err)
+				continue
+			}
+		} else {
+			logger.Errorf("operation '%s' unknown, continue", message.Header.OperationType)
+			continue
+		}
+
+		if err := p.DBClient.SetConfigItem(PapiPullKey, string(binTime)); err != nil {
+			return errors.Wrap(err, "failed to update last timestamp")
+		} else {
+			logger.Debugf("set last timestamp to %s", newTime)
+		}
+
+	}
+	return nil
+}
+
+func (p *Papi) SyncDecisions() error {
+	defer types.CatchPanic("lapi/syncDecisionsToCAPI")
+
+	var cache models.DecisionsDeleteRequest
+	ticker := time.NewTicker(p.SyncInterval)
+	p.Logger.Infof("Start decisions sync to CrowdSec Central API (interval: %s)", p.SyncInterval)
+
+	for {
+		select {
+		case <-p.syncTomb.Dying(): // if one apic routine is dying, do we kill the others?
+			p.Logger.Infof("sync decisions tomb is dying, sending cache (%d elements) before exiting", len(cache))
+			if len(cache) == 0 {
+				return nil
+			}
+			go p.SendDeletedDecisions(&cache)
+			return nil
+		case <-ticker.C:
+			if len(cache) > 0 {
+				p.mu.Lock()
+				cacheCopy := cache
+				cache = make([]models.DecisionsDeleteRequestItem, 0)
+				p.mu.Unlock()
+				p.Logger.Infof("sync decisions: %d deleted decisions to push", len(cacheCopy))
+				go p.SendDeletedDecisions(&cacheCopy)
+			}
+		case deletedDecisions := <-p.Channels.DeleteDecisionChannel:
+			if (p.consoleConfig.ShareManualDecisions != nil && *p.consoleConfig.ShareManualDecisions) || (p.consoleConfig.ReceiveDecisions != nil && *p.consoleConfig.ReceiveDecisions) {
+				var tmpDecisions []models.DecisionsDeleteRequestItem
+				p.Logger.Debugf("%d decisions deletion to add in cache", len(deletedDecisions))
+				for _, decision := range deletedDecisions {
+					tmpDecisions = append(tmpDecisions, models.DecisionsDeleteRequestItem(decision.UUID))
+				}
+				p.mu.Lock()
+				cache = append(cache, tmpDecisions...)
+				p.mu.Unlock()
+			}
+		}
+	}
+}
+
+func (p *Papi) SendDeletedDecisions(cacheOrig *models.DecisionsDeleteRequest) {
+
+	var cache []models.DecisionsDeleteRequestItem = *cacheOrig
+	var send models.DecisionsDeleteRequest
+
+	bulkSize := 50
+	pageStart := 0
+	pageEnd := bulkSize
+	for {
+		if pageEnd >= len(cache) {
+			send = cache[pageStart:]
+			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+			defer cancel()
+			_, _, err := p.apiClient.DecisionDelete.Add(ctx, &send)
+			if err != nil {
+				p.Logger.Errorf("sending deleted decisions to central API: %s", err)
+				return
+			}
+			break
+		}
+		send = cache[pageStart:pageEnd]
+		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+		defer cancel()
+		_, _, err := p.apiClient.DecisionDelete.Add(ctx, &send)
+		if err != nil {
+			//we log it here as well, because the return value of func might be discarded
+			p.Logger.Errorf("sending deleted decisions to central API: %s", err)
+		}
+		pageStart += bulkSize
+		pageEnd += bulkSize
+	}
+}

+ 132 - 0
pkg/apiserver/papi_cmd.go

@@ -0,0 +1,132 @@
+package apiserver
+
+import (
+	"encoding/json"
+	"fmt"
+	"time"
+
+	"github.com/crowdsecurity/crowdsec/pkg/models"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
+	"github.com/pkg/errors"
+	log "github.com/sirupsen/logrus"
+)
+
+type deleteDecisions struct {
+	UUID      string   `json:"uuid"`
+	Decisions []string `json:"decisions"`
+}
+
+func DecisionCmd(message *Message, p *Papi) error {
+	switch message.Header.OperationCmd {
+	case "delete":
+
+		data, err := json.Marshal(message.Data)
+		if err != nil {
+			return err
+		}
+		UUIDs := make([]string, 0)
+		deleteDecisionMsg := deleteDecisions{
+			Decisions: make([]string, 0),
+		}
+		if err := json.Unmarshal(data, &deleteDecisionMsg); err != nil {
+			return fmt.Errorf("message for '%s' contains bad data format: %s", message.Header.OperationType, err)
+		}
+
+		UUIDs = append(UUIDs, deleteDecisionMsg.Decisions...)
+		log.Infof("Decisions UUIDs to remove: %+v", UUIDs)
+
+		filter := make(map[string][]string)
+		filter["uuid"] = UUIDs
+		_, deletedDecisions, err := p.DBClient.SoftDeleteDecisionsWithFilter(filter)
+		if err != nil {
+			return fmt.Errorf("unable to delete decisions %+v : %s", UUIDs, err)
+		}
+		decisions := make([]*models.Decision, 0)
+		for _, deletedDecision := range deletedDecisions {
+			log.Infof("Decision from '%s' for '%s' (%s) has been deleted", deletedDecision.Origin, deletedDecision.Value, deletedDecision.Type)
+			dec := &models.Decision{
+				UUID:     deletedDecision.UUID,
+				Origin:   &deletedDecision.Origin,
+				Scenario: &deletedDecision.Scenario,
+				Scope:    &deletedDecision.Scope,
+				Value:    &deletedDecision.Value,
+				ID:       int64(deletedDecision.ID),
+				Until:    deletedDecision.Until.String(),
+				Type:     &deletedDecision.Type,
+			}
+			decisions = append(decisions, dec)
+		}
+		p.Channels.DeleteDecisionChannel <- decisions
+	default:
+		return fmt.Errorf("unknown command '%s' for operation type '%s'", message.Header.OperationCmd, message.Header.OperationType)
+	}
+
+	return nil
+}
+
+func AlertCmd(message *Message, p *Papi) error {
+	switch message.Header.OperationCmd {
+	case "add":
+		data, err := json.Marshal(message.Data)
+		if err != nil {
+			return err
+		}
+		alert := &models.Alert{}
+
+		if err := json.Unmarshal(data, alert); err != nil {
+			return errors.Wrapf(err, "message for '%s' contains bad alert format", message.Header.OperationType)
+		}
+
+		log.Infof("Received order %s from PAPI (%d decisions)", alert.UUID, len(alert.Decisions))
+
+		/*Fix the alert with missing mandatory items*/
+		if alert.StartAt == nil || *alert.StartAt == "" {
+			log.Warnf("Alert %d has no StartAt, setting it to now", alert.ID)
+			alert.StartAt = types.StrPtr(time.Now().UTC().Format(time.RFC3339))
+		}
+		if alert.StopAt == nil || *alert.StopAt == "" {
+			log.Warnf("Alert %d has no StopAt, setting it to now", alert.ID)
+			alert.StopAt = types.StrPtr(time.Now().UTC().Format(time.RFC3339))
+		}
+		alert.EventsCount = types.Int32Ptr(0)
+		alert.Capacity = types.Int32Ptr(0)
+		alert.Leakspeed = types.StrPtr("")
+		alert.Simulated = types.BoolPtr(false)
+		alert.ScenarioHash = types.StrPtr("")
+		alert.ScenarioVersion = types.StrPtr("")
+		alert.Message = types.StrPtr("")
+		alert.Scenario = types.StrPtr("")
+		alert.Source = &models.Source{}
+
+		//if we're setting Source.Scope to types.ConsoleOrigin, it messes up the alert's value
+		if len(alert.Decisions) >= 1 {
+			alert.Source.Scope = alert.Decisions[0].Scope
+			alert.Source.Value = alert.Decisions[0].Value
+		} else {
+			log.Warningf("No decision found in alert for Polling API (%s : %s)", message.Header.Source.User, message.Header.Message)
+			alert.Source.Scope = types.StrPtr(types.ConsoleOrigin)
+			alert.Source.Value = &message.Header.Source.User
+		}
+		alert.Scenario = &message.Header.Message
+
+		for _, decision := range alert.Decisions {
+			if *decision.Scenario == "" {
+				decision.Scenario = &message.Header.Message
+			}
+			log.Infof("Adding decision for '%s' with UUID: %s", *decision.Value, decision.UUID)
+		}
+
+		//use a different method : alert and/or decision might already be partially present in the database
+		_, err = p.DBClient.CreateOrUpdateAlert("", alert)
+		if err != nil {
+			log.Errorf("Failed to create alerts in DB: %s", err)
+		} else {
+			p.Channels.AddAlertChannel <- []*models.Alert{alert}
+		}
+
+	default:
+		return fmt.Errorf("unknown command '%s' for operation type '%s'", message.Header.OperationCmd, message.Header.OperationType)
+	}
+
+	return nil
+}

+ 32 - 6
pkg/csconfig/api.go

@@ -25,6 +25,7 @@ type APICfg struct {
 }
 }
 
 
 type ApiCredentialsCfg struct {
 type ApiCredentialsCfg struct {
+	PapiURL    string `yaml:"papi_url,omitempty" json:"papi_url,omitempty"`
 	URL        string `yaml:"url,omitempty" json:"url,omitempty"`
 	URL        string `yaml:"url,omitempty" json:"url,omitempty"`
 	Login      string `yaml:"login,omitempty" json:"login,omitempty"`
 	Login      string `yaml:"login,omitempty" json:"login,omitempty"`
 	Password   string `yaml:"password,omitempty" json:"-"`
 	Password   string `yaml:"password,omitempty" json:"-"`
@@ -91,6 +92,7 @@ func (o *OnlineApiClientCfg) Load() error {
 		log.Warningf("can't load CAPI credentials from '%s' (missing field)", o.CredentialsFilePath)
 		log.Warningf("can't load CAPI credentials from '%s' (missing field)", o.CredentialsFilePath)
 		o.Credentials = nil
 		o.Credentials = nil
 	}
 	}
+
 	return nil
 	return nil
 }
 }
 
 
@@ -192,6 +194,7 @@ type LocalApiServerCfg struct {
 	LogMaxAge              int                 `yaml:"-"`
 	LogMaxAge              int                 `yaml:"-"`
 	LogMaxFiles            int                 `yaml:"-"`
 	LogMaxFiles            int                 `yaml:"-"`
 	TrustedIPs             []string            `yaml:"trusted_ips,omitempty"`
 	TrustedIPs             []string            `yaml:"trusted_ips,omitempty"`
+	PapiLogLevel           *log.Level          `yaml:"papi_log_level"`
 }
 }
 
 
 type TLSCfg struct {
 type TLSCfg struct {
@@ -211,8 +214,35 @@ func (c *Config) LoadAPIServer() error {
 		log.Warning("crowdsec local API is disabled from flag")
 		log.Warning("crowdsec local API is disabled from flag")
 	}
 	}
 
 
-	if c.API.Server == nil {
-		log.Warning("crowdsec local API is disabled because its configuration is not present")
+	if c.API.Server != nil {
+
+		//inherit log level from common, then api->server
+		var logLevel log.Level
+		if c.API.Server.LogLevel != nil {
+			logLevel = *c.API.Server.LogLevel
+		} else if c.Common.LogLevel != nil {
+			logLevel = *c.Common.LogLevel
+		} else {
+			logLevel = log.InfoLevel
+		}
+
+		if c.API.Server.PapiLogLevel == nil {
+			c.API.Server.PapiLogLevel = &logLevel
+		}
+
+		if c.API.Server.OnlineClient != nil && c.API.Server.OnlineClient.CredentialsFilePath != "" {
+			if err := c.API.Server.OnlineClient.Load(); err != nil {
+				return errors.Wrap(err, "loading online client credentials")
+			}
+		}
+		if c.API.Server.OnlineClient == nil || c.API.Server.OnlineClient.Credentials == nil {
+			log.Printf("push and pull to Central API disabled")
+		}
+		if err := c.LoadDBConfig(); err != nil {
+			return err
+		}
+	} else {
+		log.Warning("crowdsec local API is disabled")
 		c.DisableAPI = true
 		c.DisableAPI = true
 		return nil
 		return nil
 	}
 	}
@@ -272,10 +302,6 @@ func (c *Config) LoadAPIServer() error {
 		}
 		}
 	}
 	}
 
 
-	if err := c.LoadDBConfig(); err != nil {
-		return err
-	}
-
 	return nil
 	return nil
 }
 }
 
 

+ 24 - 15
pkg/csconfig/api_test.go

@@ -1,15 +1,18 @@
 package csconfig
 package csconfig
 
 
 import (
 import (
+	"fmt"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
+	"strings"
 	"testing"
 	"testing"
 
 
+	"github.com/crowdsecurity/crowdsec/pkg/types"
+	log "github.com/sirupsen/logrus"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/assert"
 	"gopkg.in/yaml.v2"
 	"gopkg.in/yaml.v2"
 
 
 	"github.com/crowdsecurity/crowdsec/pkg/cstest"
 	"github.com/crowdsecurity/crowdsec/pkg/cstest"
-	"github.com/crowdsecurity/crowdsec/pkg/types"
 )
 )
 
 
 func TestLoadLocalApiClientCfg(t *testing.T) {
 func TestLoadLocalApiClientCfg(t *testing.T) {
@@ -143,7 +146,7 @@ func TestLoadAPIServer(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-
+	logLevel := log.InfoLevel
 	config := &Config{}
 	config := &Config{}
 	fcontent, err := os.ReadFile("./tests/config.yaml")
 	fcontent, err := os.ReadFile("./tests/config.yaml")
 	if err != nil {
 	if err != nil {
@@ -171,6 +174,7 @@ func TestLoadAPIServer(t *testing.T) {
 							CredentialsFilePath: "./tests/online-api-secrets.yaml",
 							CredentialsFilePath: "./tests/online-api-secrets.yaml",
 						},
 						},
 						ProfilesPath: "./tests/profiles.yaml",
 						ProfilesPath: "./tests/profiles.yaml",
+						PapiLogLevel: &logLevel,
 					},
 					},
 				},
 				},
 				DbConfig: &DatabaseCfg{
 				DbConfig: &DatabaseCfg{
@@ -198,6 +202,7 @@ func TestLoadAPIServer(t *testing.T) {
 					ShareTaintedScenarios: types.BoolPtr(true),
 					ShareTaintedScenarios: types.BoolPtr(true),
 					ShareCustomScenarios:  types.BoolPtr(true),
 					ShareCustomScenarios:  types.BoolPtr(true),
 					ShareContext:          types.BoolPtr(false),
 					ShareContext:          types.BoolPtr(false),
+					ReceiveDecisions:      types.BoolPtr(false),
 				},
 				},
 				LogDir:   LogDirFullPath,
 				LogDir:   LogDirFullPath,
 				LogMedia: "stdout",
 				LogMedia: "stdout",
@@ -212,6 +217,7 @@ func TestLoadAPIServer(t *testing.T) {
 				Profiles:               tmpLAPI.Profiles,
 				Profiles:               tmpLAPI.Profiles,
 				ProfilesPath:           "./tests/profiles.yaml",
 				ProfilesPath:           "./tests/profiles.yaml",
 				UseForwardedForHeaders: false,
 				UseForwardedForHeaders: false,
+				PapiLogLevel:           &logLevel,
 			},
 			},
 		},
 		},
 		{
 		{
@@ -228,24 +234,27 @@ func TestLoadAPIServer(t *testing.T) {
 				DisableAPI: false,
 				DisableAPI: false,
 			},
 			},
 			expected: &LocalApiServerCfg{
 			expected: &LocalApiServerCfg{
-				Enable:   types.BoolPtr(true),
-				LogDir:   LogDirFullPath,
-				LogMedia: "stdout",
+				PapiLogLevel: &logLevel,
 			},
 			},
-			expectedErr: "while loading profiles for LAPI",
+			expectedErr: "no database configuration provided",
 		},
 		},
 	}
 	}
 
 
-	for _, tc := range tests {
-		tc := tc
-		t.Run(tc.name, func(t *testing.T) {
-			err := tc.input.LoadAPIServer()
-			cstest.RequireErrorContains(t, err, tc.expectedErr)
-			if tc.expectedErr != "" {
-				return
+	for idx, test := range tests {
+		err := test.input.LoadAPIServer()
+		if err == nil && test.expectedErr != "" {
+			fmt.Printf("TEST '%s': NOK\n", test.name)
+			t.Fatalf("Test number %d/%d expected error, didn't get it", idx+1, len(tests))
+		} else if test.expectedErr != "" {
+			fmt.Printf("ERR: %+v\n", err)
+			if !strings.HasPrefix(fmt.Sprintf("%s", err), test.expectedErr) {
+				fmt.Printf("TEST '%s': NOK\n", test.name)
+				t.Fatalf("%d/%d expected '%s' got '%s'", idx, len(tests),
+					test.expectedErr,
+					fmt.Sprintf("%s", err))
 			}
 			}
 
 
-			assert.Equal(t, tc.expected, tc.input.API.Server)
-		})
+			assert.Equal(t, test.expected, test.input.API.Server)
+		}
 	}
 	}
 }
 }

+ 12 - 1
pkg/csconfig/console.go

@@ -4,6 +4,7 @@ import (
 	"fmt"
 	"fmt"
 	"os"
 	"os"
 
 
+	"github.com/crowdsecurity/crowdsec/pkg/fflag"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	log "github.com/sirupsen/logrus"
 	log "github.com/sirupsen/logrus"
@@ -14,10 +15,11 @@ const (
 	SEND_CUSTOM_SCENARIOS  = "custom"
 	SEND_CUSTOM_SCENARIOS  = "custom"
 	SEND_TAINTED_SCENARIOS = "tainted"
 	SEND_TAINTED_SCENARIOS = "tainted"
 	SEND_MANUAL_SCENARIOS  = "manual"
 	SEND_MANUAL_SCENARIOS  = "manual"
+	CONSOLE_MANAGEMENT     = "console_management"
 	SEND_CONTEXT           = "context"
 	SEND_CONTEXT           = "context"
 )
 )
 
 
-var CONSOLE_CONFIGS = []string{SEND_CUSTOM_SCENARIOS, SEND_MANUAL_SCENARIOS, SEND_TAINTED_SCENARIOS, SEND_CONTEXT}
+var CONSOLE_CONFIGS = []string{SEND_CUSTOM_SCENARIOS, SEND_MANUAL_SCENARIOS, SEND_TAINTED_SCENARIOS, SEND_CONTEXT, CONSOLE_MANAGEMENT}
 
 
 var DefaultConsoleConfigFilePath = DefaultConfigPath("console.yaml")
 var DefaultConsoleConfigFilePath = DefaultConfigPath("console.yaml")
 
 
@@ -25,6 +27,7 @@ type ConsoleConfig struct {
 	ShareManualDecisions  *bool `yaml:"share_manual_decisions"`
 	ShareManualDecisions  *bool `yaml:"share_manual_decisions"`
 	ShareTaintedScenarios *bool `yaml:"share_tainted"`
 	ShareTaintedScenarios *bool `yaml:"share_tainted"`
 	ShareCustomScenarios  *bool `yaml:"share_custom"`
 	ShareCustomScenarios  *bool `yaml:"share_custom"`
+	ReceiveDecisions      *bool `yaml:"console_management"`
 	ShareContext          *bool `yaml:"share_context"`
 	ShareContext          *bool `yaml:"share_context"`
 }
 }
 
 
@@ -35,6 +38,7 @@ func (c *LocalApiServerCfg) LoadConsoleConfig() error {
 		c.ConsoleConfig.ShareCustomScenarios = types.BoolPtr(true)
 		c.ConsoleConfig.ShareCustomScenarios = types.BoolPtr(true)
 		c.ConsoleConfig.ShareTaintedScenarios = types.BoolPtr(true)
 		c.ConsoleConfig.ShareTaintedScenarios = types.BoolPtr(true)
 		c.ConsoleConfig.ShareManualDecisions = types.BoolPtr(false)
 		c.ConsoleConfig.ShareManualDecisions = types.BoolPtr(false)
+		c.ConsoleConfig.ReceiveDecisions = types.BoolPtr(false)
 		c.ConsoleConfig.ShareContext = types.BoolPtr(false)
 		c.ConsoleConfig.ShareContext = types.BoolPtr(false)
 		return nil
 		return nil
 	}
 	}
@@ -61,6 +65,13 @@ func (c *LocalApiServerCfg) LoadConsoleConfig() error {
 		c.ConsoleConfig.ShareManualDecisions = types.BoolPtr(false)
 		c.ConsoleConfig.ShareManualDecisions = types.BoolPtr(false)
 	}
 	}
 
 
+	if !fflag.PapiClient.IsEnabled() {
+		c.ConsoleConfig.ReceiveDecisions = types.BoolPtr(false)
+	} else if c.ConsoleConfig.ReceiveDecisions == nil {
+		log.Debugf("no console_management found, setting to false")
+		c.ConsoleConfig.ReceiveDecisions = types.BoolPtr(false)
+	}
+
 	if c.ConsoleConfig.ShareContext == nil {
 	if c.ConsoleConfig.ShareContext == nil {
 		log.Debugf("no 'context' found, setting to false")
 		log.Debugf("no 'context' found, setting to false")
 		c.ConsoleConfig.ShareContext = types.BoolPtr(false)
 		c.ConsoleConfig.ShareContext = types.BoolPtr(false)

+ 1 - 1
pkg/csprofiles/csprofiles.go

@@ -147,7 +147,7 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod
 		decision.Value = new(string)
 		decision.Value = new(string)
 		*decision.Value = *Alert.Source.Value
 		*decision.Value = *Alert.Source.Value
 		decision.Origin = new(string)
 		decision.Origin = new(string)
-		*decision.Origin = "crowdsec"
+		*decision.Origin = types.CrowdSecOrigin
 		if refDecision.Origin != nil {
 		if refDecision.Origin != nil {
 			*decision.Origin = fmt.Sprintf("%s/%s", *decision.Origin, *refDecision.Origin)
 			*decision.Origin = fmt.Sprintf("%s/%s", *decision.Origin, *refDecision.Origin)
 		}
 		}

+ 187 - 18
pkg/database/alerts.go

@@ -4,6 +4,7 @@ import (
 	"context"
 	"context"
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
+	"sort"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 	"time"
 	"time"
@@ -64,16 +65,19 @@ func formatAlertAsString(machineId string, alert *models.Alert) []string {
 
 
 	/**/
 	/**/
 	reason := ""
 	reason := ""
-	if *alert.Scenario != "" {
-		reason = fmt.Sprintf("%s by %s", *alert.Scenario, src)
-	} else if *alert.Message != "" {
-		reason = fmt.Sprintf("%s by %s", *alert.Scenario, src)
+	msg := ""
+	if alert.Scenario != nil && *alert.Scenario != "" {
+		msg = *alert.Scenario
+	} else if alert.Message != nil && *alert.Message != "" {
+		msg = *alert.Message
 	} else {
 	} else {
-		reason = fmt.Sprintf("empty scenario by %s", src)
+		msg = fmt.Sprintf("empty scenario by %s", src)
 	}
 	}
 
 
+	reason = fmt.Sprintf("%s by %s", msg, src)
+
 	if len(alert.Decisions) > 0 {
 	if len(alert.Decisions) > 0 {
-		for _, decisionItem := range alert.Decisions {
+		for i, decisionItem := range alert.Decisions {
 			decision := ""
 			decision := ""
 			if alert.Simulated != nil && *alert.Simulated {
 			if alert.Simulated != nil && *alert.Simulated {
 				decision = "(simulated alert)"
 				decision = "(simulated alert)"
@@ -84,11 +88,20 @@ func formatAlertAsString(machineId string, alert *models.Alert) []string {
 				/*spew is expensive*/
 				/*spew is expensive*/
 				log.Debugf("%s", spew.Sdump(decisionItem))
 				log.Debugf("%s", spew.Sdump(decisionItem))
 			}
 			}
+			if len(alert.Decisions) > 1 {
+				reason = fmt.Sprintf("%s for %d/%d decisions", msg, i+1, len(alert.Decisions))
+			}
+			machineIdOrigin := ""
+			if machineId == "" {
+				machineIdOrigin = *decisionItem.Origin
+			} else {
+				machineIdOrigin = fmt.Sprintf("%s/%s", machineId, *decisionItem.Origin)
+			}
+
 			decision += fmt.Sprintf("%s %s on %s %s", *decisionItem.Duration,
 			decision += fmt.Sprintf("%s %s on %s %s", *decisionItem.Duration,
 				*decisionItem.Type, *decisionItem.Scope, *decisionItem.Value)
 				*decisionItem.Type, *decisionItem.Scope, *decisionItem.Value)
 			retStr = append(retStr,
 			retStr = append(retStr,
-				fmt.Sprintf("(%s/%s) %s : %s", machineId,
-					*decisionItem.Origin, reason, decision))
+				fmt.Sprintf("(%s) %s : %s", machineIdOrigin, reason, decision))
 		}
 		}
 	} else {
 	} else {
 		retStr = append(retStr, fmt.Sprintf("(%s) alert : %s", machineId, reason))
 		retStr = append(retStr, fmt.Sprintf("(%s) alert : %s", machineId, reason))
@@ -96,6 +109,149 @@ func formatAlertAsString(machineId string, alert *models.Alert) []string {
 	return retStr
 	return retStr
 }
 }
 
 
+// CreateOrUpdateAlert is specific to PAPI : It checks if alert already exists, otherwise inserts it
+// if alert already exists, it checks it associated decisions already exists
+// if some associated decisions are missing (ie. previous insert ended up in error) it inserts them
+func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) (string, error) {
+
+	if alertItem.UUID == "" {
+		return "", fmt.Errorf("alert UUID is empty")
+	}
+
+	alerts, err := c.Ent.Alert.Query().Where(alert.UUID(alertItem.UUID)).WithDecisions().All(c.CTX)
+
+	if err != nil && !ent.IsNotFound(err) {
+		return "", errors.Wrap(err, "unable to query alerts for uuid %s")
+	}
+
+	//alert wasn't found, insert it (expected hotpath)
+	if ent.IsNotFound(err) || len(alerts) == 0 {
+		ret, err := c.CreateAlert(machineID, []*models.Alert{alertItem})
+		if err != nil {
+			return "", errors.Wrap(err, "unable to create alert")
+		}
+		return ret[0], nil
+	}
+
+	//this should never happen
+	if len(alerts) > 1 {
+		return "", fmt.Errorf("multiple alerts found for uuid %s", alertItem.UUID)
+	}
+
+	log.Infof("Alert %s already exists, checking associated decisions", alertItem.UUID)
+	//alert is found, check for any missing decisions
+	missingUuids := []string{}
+	newUuids := []string{}
+	for _, decItem := range alertItem.Decisions {
+		newUuids = append(newUuids, decItem.UUID)
+	}
+
+	foundAlert := alerts[0]
+	foundUuids := []string{}
+	for _, decItem := range foundAlert.Edges.Decisions {
+		foundUuids = append(foundUuids, decItem.UUID)
+	}
+
+	sort.Strings(foundUuids)
+	sort.Strings(newUuids)
+
+	for idx, uuid := range newUuids {
+		if len(foundUuids) < idx+1 || uuid != foundUuids[idx] {
+			log.Warningf("Decision with uuid %s not found in alert %s", uuid, foundAlert.UUID)
+			missingUuids = append(missingUuids, uuid)
+		}
+	}
+
+	//add any and all missing decisions based on their uuids
+	if len(missingUuids) > 0 {
+		//prepare missing decisions
+		missingDecisions := []*models.Decision{}
+		for _, uuid := range missingUuids {
+			for _, newDecision := range alertItem.Decisions {
+				if newDecision.UUID == uuid {
+					missingDecisions = append(missingDecisions, newDecision)
+				}
+			}
+		}
+
+		//add missing decisions
+		log.Debugf("Adding %d missing decisions to alert %s", len(missingDecisions), foundAlert.UUID)
+
+		decisions := make([]*ent.Decision, 0)
+		decisionBulk := make([]*ent.DecisionCreate, 0, decisionBulkSize)
+
+		for i, decisionItem := range missingDecisions {
+			var start_ip, start_sfx, end_ip, end_sfx int64
+			var sz int
+
+			/*if the scope is IP or Range, convert the value to integers */
+			if strings.ToLower(*decisionItem.Scope) == "ip" || strings.ToLower(*decisionItem.Scope) == "range" {
+				sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(*decisionItem.Value)
+				if err != nil {
+					return "", errors.Wrapf(ParseDurationFail, "invalid addr/range %s : %s", *decisionItem.Value, err)
+				}
+			}
+			decisionDuration, err := time.ParseDuration(*decisionItem.Duration)
+			if err != nil {
+				log.Warningf("invalid duration %s for decision %s", *decisionItem.Duration, decisionItem.UUID)
+				continue
+			}
+			//use the created_at from the alert instead
+			alertTime, err := time.Parse(time.RFC3339, alertItem.CreatedAt)
+			if err != nil {
+				log.Errorf("unable to parse alert time %s : %s", alertItem.CreatedAt, err)
+				alertTime = time.Now()
+			}
+			decisionUntil := alertTime.UTC().Add(decisionDuration)
+
+			decisionCreate := c.Ent.Decision.Create().
+				SetUntil(decisionUntil).
+				SetScenario(*decisionItem.Scenario).
+				SetType(*decisionItem.Type).
+				SetStartIP(start_ip).
+				SetStartSuffix(start_sfx).
+				SetEndIP(end_ip).
+				SetEndSuffix(end_sfx).
+				SetIPSize(int64(sz)).
+				SetValue(*decisionItem.Value).
+				SetScope(*decisionItem.Scope).
+				SetOrigin(*decisionItem.Origin).
+				SetSimulated(*alertItem.Simulated).
+				SetUUID(decisionItem.UUID)
+
+			decisionBulk = append(decisionBulk, decisionCreate)
+			if len(decisionBulk) == decisionBulkSize {
+				decisionsCreateRet, err := c.Ent.Decision.CreateBulk(decisionBulk...).Save(c.CTX)
+				if err != nil {
+					return "", errors.Wrapf(BulkError, "creating alert decisions: %s", err)
+
+				}
+				decisions = append(decisions, decisionsCreateRet...)
+				if len(missingDecisions)-i <= decisionBulkSize {
+					decisionBulk = make([]*ent.DecisionCreate, 0, (len(missingDecisions) - i))
+				} else {
+					decisionBulk = make([]*ent.DecisionCreate, 0, decisionBulkSize)
+				}
+			}
+		}
+		decisionsCreateRet, err := c.Ent.Decision.CreateBulk(decisionBulk...).Save(c.CTX)
+		if err != nil {
+			return "", errors.Wrapf(BulkError, "creating alert decisions: %s", err)
+		}
+		decisions = append(decisions, decisionsCreateRet...)
+		//now that we bulk created missing decisions, let's update the alert
+		err = c.Ent.Alert.Update().Where(alert.UUID(alertItem.UUID)).AddDecisions(decisions...).Exec(c.CTX)
+		if err != nil {
+			return "", errors.Wrapf(err, "updating alert %s : %s", alertItem.UUID, err)
+		}
+	} else {
+		log.Warningf("alert %s was already complete with decisions %+v", alertItem.UUID, foundUuids)
+	}
+
+	return "", nil
+
+}
+
 func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]string, error) {
 func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]string, error) {
 	pageStart := 0
 	pageStart := 0
 	pageEnd := bulkSize
 	pageEnd := bulkSize
@@ -337,6 +493,21 @@ func chunkDecisions(decisions []*ent.Decision, chunkSize int) [][]*ent.Decision
 func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([]string, error) {
 func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([]string, error) {
 	ret := []string{}
 	ret := []string{}
 	bulkSize := 20
 	bulkSize := 20
+	var owner *ent.Machine
+	var err error
+
+	if machineId != "" {
+		owner, err = c.QueryMachineByID(machineId)
+		if err != nil {
+			if errors.Cause(err) != UserNotExists {
+				return []string{}, errors.Wrapf(QueryFail, "machine '%s': %s", machineId, err)
+			}
+			c.Log.Debugf("CreateAlertBulk: Machine Id %s doesn't exist", machineId)
+			owner = nil
+		}
+	} else {
+		owner = nil
+	}
 
 
 	c.Log.Debugf("writing %d items", len(alertList))
 	c.Log.Debugf("writing %d items", len(alertList))
 	bulk := make([]*ent.AlertCreate, 0, bulkSize)
 	bulk := make([]*ent.AlertCreate, 0, bulkSize)
@@ -346,14 +517,6 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([
 		var metas []*ent.Meta
 		var metas []*ent.Meta
 		var events []*ent.Event
 		var events []*ent.Event
 
 
-		owner, err := c.QueryMachineByID(machineId)
-		if err != nil {
-			if errors.Cause(err) != UserNotExists {
-				return []string{}, errors.Wrapf(QueryFail, "machine '%s': %s", alertItem.MachineID, err)
-			}
-			c.Log.Debugf("CreateAlertBulk: Machine Id %s doesn't exist", machineId)
-			owner = nil
-		}
 		startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt)
 		startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt)
 		if err != nil {
 		if err != nil {
 			c.Log.Errorf("CreateAlertBulk: Failed to parse startAtTime '%s', defaulting to now: %s", *alertItem.StartAt, err)
 			c.Log.Errorf("CreateAlertBulk: Failed to parse startAtTime '%s', defaulting to now: %s", *alertItem.StartAt, err)
@@ -480,7 +643,8 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([
 					SetValue(*decisionItem.Value).
 					SetValue(*decisionItem.Value).
 					SetScope(*decisionItem.Scope).
 					SetScope(*decisionItem.Scope).
 					SetOrigin(*decisionItem.Origin).
 					SetOrigin(*decisionItem.Origin).
-					SetSimulated(*alertItem.Simulated)
+					SetSimulated(*alertItem.Simulated).
+					SetUUID(decisionItem.UUID)
 
 
 				decisionBulk = append(decisionBulk, decisionCreate)
 				decisionBulk = append(decisionBulk, decisionCreate)
 				if len(decisionBulk) == decisionBulkSize {
 				if len(decisionBulk) == decisionBulkSize {
@@ -525,6 +689,7 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([
 			SetSimulated(*alertItem.Simulated).
 			SetSimulated(*alertItem.Simulated).
 			SetScenarioVersion(*alertItem.ScenarioVersion).
 			SetScenarioVersion(*alertItem.ScenarioVersion).
 			SetScenarioHash(*alertItem.ScenarioHash).
 			SetScenarioHash(*alertItem.ScenarioHash).
+			SetUUID(alertItem.UUID).
 			AddEvents(events...).
 			AddEvents(events...).
 			AddMetas(metas...)
 			AddMetas(metas...)
 
 
@@ -661,7 +826,11 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e
 			predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0])))
 			predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0])))
 		case "include_capi": //allows to exclude one or more specific origins
 		case "include_capi": //allows to exclude one or more specific origins
 			if value[0] == "false" {
 			if value[0] == "false" {
-				predicates = append(predicates, alert.HasDecisionsWith(decision.Or(decision.OriginEQ("crowdsec"), decision.OriginEQ("cscli"))))
+				predicates = append(predicates, alert.HasDecisionsWith(
+					decision.Or(decision.OriginEQ(types.CrowdSecOrigin),
+						decision.OriginEQ(types.CscliOrigin),
+						decision.OriginEQ(types.ConsoleOrigin),
+						decision.OriginEQ(types.CscliImportOrigin))))
 			} else if value[0] != "true" {
 			} else if value[0] != "true" {
 				log.Errorf("Invalid bool '%s' for include_capi", value[0])
 				log.Errorf("Invalid bool '%s' for include_capi", value[0])
 			}
 			}

+ 33 - 0
pkg/database/config.go

@@ -0,0 +1,33 @@
+package database
+
+import (
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
+	"github.com/pkg/errors"
+)
+
+func (c *Client) GetConfigItem(key string) (*string, error) {
+	result, err := c.Ent.ConfigItem.Query().Where(configitem.NameEQ(key)).First(c.CTX)
+	if err != nil && ent.IsNotFound(err) {
+		return nil, nil
+	}
+	if err != nil {
+		return nil, errors.Wrapf(QueryFail, "select config item: %s", err)
+	}
+
+	return &result.Value, nil
+}
+
+func (c *Client) SetConfigItem(key string, value string) error {
+
+	nbUpdated, err := c.Ent.ConfigItem.Update().SetValue(value).Where(configitem.NameEQ(key)).Save(c.CTX)
+	if (err != nil && ent.IsNotFound(err)) || nbUpdated == 0 { //not found, create
+		err := c.Ent.ConfigItem.Create().SetName(key).SetValue(value).Exec(c.CTX)
+		if err != nil {
+			return errors.Wrapf(QueryFail, "insert config item: %s", err)
+		}
+	} else if err != nil {
+		return errors.Wrapf(QueryFail, "update config item: %s", err)
+	}
+	return nil
+}

+ 101 - 31
pkg/database/decisions.go

@@ -269,16 +269,18 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(since time.Time, filters map[
 	return data, nil
 	return data, nil
 }
 }
 
 
-func (c *Client) DeleteDecisionById(decisionId int) error {
-	err := c.Ent.Decision.DeleteOneID(decisionId).Exec(c.CTX)
+func (c *Client) DeleteDecisionById(decisionId int) ([]*ent.Decision, error) {
+	toDelete, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionId)).All(c.CTX)
 	if err != nil {
 	if err != nil {
 		c.Log.Warningf("DeleteDecisionById : %s", err)
 		c.Log.Warningf("DeleteDecisionById : %s", err)
-		return errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionId)
+		return nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionId)
 	}
 	}
-	return nil
+	count, err := c.BulkDeleteDecisions(toDelete, false)
+	c.Log.Debugf("deleted %d decisions", count)
+	return toDelete, err
 }
 }
 
 
-func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, error) {
+func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, []*ent.Decision, error) {
 	var err error
 	var err error
 	var start_ip, start_sfx, end_ip, end_sfx int64
 	var start_ip, start_sfx, end_ip, end_sfx int64
 	var ip_sz int
 	var ip_sz int
@@ -286,13 +288,13 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string,
 	/*if contains is true, return bans that *contains* the given value (value is the inner)
 	/*if contains is true, return bans that *contains* the given value (value is the inner)
 	  else, return bans that are *contained* by the given value (value is the outer) */
 	  else, return bans that are *contained* by the given value (value is the outer) */
 
 
-	decisions := c.Ent.Decision.Delete()
+	decisions := c.Ent.Decision.Query()
 	for param, value := range filter {
 	for param, value := range filter {
 		switch param {
 		switch param {
 		case "contains":
 		case "contains":
 			contains, err = strconv.ParseBool(value[0])
 			contains, err = strconv.ParseBool(value[0])
 			if err != nil {
 			if err != nil {
-				return "0", errors.Wrapf(InvalidFilter, "invalid contains value : %s", err)
+				return "0", nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err)
 			}
 			}
 		case "scope":
 		case "scope":
 			decisions = decisions.Where(decision.ScopeEQ(value[0]))
 			decisions = decisions.Where(decision.ScopeEQ(value[0]))
@@ -303,12 +305,12 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string,
 		case "ip", "range":
 		case "ip", "range":
 			ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0])
 			ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0])
 			if err != nil {
 			if err != nil {
-				return "0", errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err)
+				return "0", nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err)
 			}
 			}
 		case "scenario":
 		case "scenario":
 			decisions = decisions.Where(decision.ScenarioEQ(value[0]))
 			decisions = decisions.Where(decision.ScenarioEQ(value[0]))
 		default:
 		default:
-			return "0", errors.Wrap(InvalidFilter, fmt.Sprintf("'%s' doesn't exist", param))
+			return "0", nil, errors.Wrap(InvalidFilter, fmt.Sprintf("'%s' doesn't exist", param))
 		}
 		}
 	}
 	}
 
 
@@ -377,35 +379,42 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string,
 			))
 			))
 		}
 		}
 	} else if ip_sz != 0 {
 	} else if ip_sz != 0 {
-		return "0", errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz)
+		return "0", nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz)
 	}
 	}
 
 
-	nbDeleted, err := decisions.Exec(c.CTX)
+	toDelete, err := decisions.All(c.CTX)
 	if err != nil {
 	if err != nil {
 		c.Log.Warningf("DeleteDecisionsWithFilter : %s", err)
 		c.Log.Warningf("DeleteDecisionsWithFilter : %s", err)
-		return "0", errors.Wrap(DeleteFail, "decisions with provided filter")
+		return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter")
+	}
+	count, err := c.BulkDeleteDecisions(toDelete, false)
+	if err != nil {
+		c.Log.Warningf("While deleting decisions : %s", err)
+		return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter")
 	}
 	}
-	return strconv.Itoa(nbDeleted), nil
+	return strconv.Itoa(count), toDelete, nil
 }
 }
 
 
-// SoftDeleteDecisionsWithFilter updates the expiration time to now() for the decisions matching the filter
-func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (string, error) {
+// SoftDeleteDecisionsWithFilter updates the expiration time to now() for the decisions matching the filter, and returns the updated items
+func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (string, []*ent.Decision, error) {
 	var err error
 	var err error
 	var start_ip, start_sfx, end_ip, end_sfx int64
 	var start_ip, start_sfx, end_ip, end_sfx int64
 	var ip_sz int
 	var ip_sz int
 	var contains bool = true
 	var contains bool = true
 	/*if contains is true, return bans that *contains* the given value (value is the inner)
 	/*if contains is true, return bans that *contains* the given value (value is the inner)
 	  else, return bans that are *contained* by the given value (value is the outer)*/
 	  else, return bans that are *contained* by the given value (value is the outer)*/
-	decisions := c.Ent.Decision.Update().Where(decision.UntilGT(time.Now().UTC()))
+	decisions := c.Ent.Decision.Query().Where(decision.UntilGT(time.Now().UTC()))
 	for param, value := range filter {
 	for param, value := range filter {
 		switch param {
 		switch param {
 		case "contains":
 		case "contains":
 			contains, err = strconv.ParseBool(value[0])
 			contains, err = strconv.ParseBool(value[0])
 			if err != nil {
 			if err != nil {
-				return "0", errors.Wrapf(InvalidFilter, "invalid contains value : %s", err)
+				return "0", nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err)
 			}
 			}
 		case "scopes":
 		case "scopes":
 			decisions = decisions.Where(decision.ScopeEQ(value[0]))
 			decisions = decisions.Where(decision.ScopeEQ(value[0]))
+		case "uuid":
+			decisions = decisions.Where(decision.UUIDIn(value...))
 		case "origin":
 		case "origin":
 			decisions = decisions.Where(decision.OriginEQ(value[0]))
 			decisions = decisions.Where(decision.OriginEQ(value[0]))
 		case "value":
 		case "value":
@@ -415,12 +424,12 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri
 		case "ip", "range":
 		case "ip", "range":
 			ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0])
 			ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0])
 			if err != nil {
 			if err != nil {
-				return "0", errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err)
+				return "0", nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err)
 			}
 			}
 		case "scenario":
 		case "scenario":
 			decisions = decisions.Where(decision.ScenarioEQ(value[0]))
 			decisions = decisions.Where(decision.ScenarioEQ(value[0]))
 		default:
 		default:
-			return "0", errors.Wrapf(InvalidFilter, "'%s' doesn't exist", param)
+			return "0", nil, errors.Wrapf(InvalidFilter, "'%s' doesn't exist", param)
 		}
 		}
 	}
 	}
 	if ip_sz == 4 {
 	if ip_sz == 4 {
@@ -492,28 +501,89 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri
 			))
 			))
 		}
 		}
 	} else if ip_sz != 0 {
 	} else if ip_sz != 0 {
-		return "0", errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz)
+		return "0", nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz)
 	}
 	}
-	nbDeleted, err := decisions.SetUntil(time.Now().UTC()).Save(c.CTX)
+	DecisionsToDelete, err := decisions.All(c.CTX)
 	if err != nil {
 	if err != nil {
 		c.Log.Warningf("SoftDeleteDecisionsWithFilter : %s", err)
 		c.Log.Warningf("SoftDeleteDecisionsWithFilter : %s", err)
-		return "0", errors.Wrap(DeleteFail, "soft delete decisions with provided filter")
+		return "0", nil, errors.Wrap(DeleteFail, "soft delete decisions with provided filter")
+	}
+
+	count, err := c.BulkDeleteDecisions(DecisionsToDelete, true)
+	if err != nil {
+		return "0", nil, errors.Wrapf(DeleteFail, "soft delete decisions with provided filter : %s", err)
 	}
 	}
-	return strconv.Itoa(nbDeleted), nil
+	return strconv.Itoa(count), DecisionsToDelete, err
+}
+
+// BulkDeleteDecisions set the expiration of a bulk of decisions to now() or hard deletes them.
+// We are doing it this way so we can return impacted decisions for sync with CAPI/PAPI
+func (c *Client) BulkDeleteDecisions(DecisionsToDelete []*ent.Decision, softDelete bool) (int, error) {
+	bulkSize := 256 //scientifically proven to be the best value for bulk delete
+	idsToDelete := make([]int, 0, bulkSize)
+	totalUpdates := 0
+	for i := 0; i < len(DecisionsToDelete); i++ {
+		idsToDelete = append(idsToDelete, DecisionsToDelete[i].ID)
+		if len(idsToDelete) == bulkSize {
+
+			if softDelete {
+				nbUpdates, err := c.Ent.Decision.Update().Where(
+					decision.IDIn(idsToDelete...),
+				).SetUntil(time.Now().UTC()).Save(c.CTX)
+				if err != nil {
+					return totalUpdates, errors.Wrap(err, "soft delete decisions with provided filter")
+				}
+				totalUpdates += nbUpdates
+			} else {
+				nbUpdates, err := c.Ent.Decision.Delete().Where(
+					decision.IDIn(idsToDelete...),
+				).Exec(c.CTX)
+				if err != nil {
+					return totalUpdates, errors.Wrap(err, "hard delete decisions with provided filter")
+				}
+				totalUpdates += nbUpdates
+			}
+			idsToDelete = make([]int, 0, bulkSize)
+		}
+	}
+
+	if len(idsToDelete) > 0 {
+		if softDelete {
+			nbUpdates, err := c.Ent.Decision.Update().Where(
+				decision.IDIn(idsToDelete...),
+			).SetUntil(time.Now().UTC()).Save(c.CTX)
+			if err != nil {
+				return totalUpdates, errors.Wrap(err, "soft delete decisions with provided filter")
+			}
+			totalUpdates += nbUpdates
+		} else {
+			nbUpdates, err := c.Ent.Decision.Delete().Where(
+				decision.IDIn(idsToDelete...),
+			).Exec(c.CTX)
+			if err != nil {
+				return totalUpdates, errors.Wrap(err, "hard delete decisions with provided filter")
+			}
+			totalUpdates += nbUpdates
+		}
+
+	}
+	return totalUpdates, nil
 }
 }
 
 
 // SoftDeleteDecisionByID set the expiration of a decision to now()
 // SoftDeleteDecisionByID set the expiration of a decision to now()
-func (c *Client) SoftDeleteDecisionByID(decisionID int) (int, error) {
-	nbUpdated, err := c.Ent.Decision.Update().Where(decision.IDEQ(decisionID)).SetUntil(time.Now().UTC()).Save(c.CTX)
-	if err != nil || nbUpdated == 0 {
-		c.Log.Warningf("SoftDeleteDecisionByID : %v (nb soft deleted: %d)", err, nbUpdated)
-		return 0, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID)
+func (c *Client) SoftDeleteDecisionByID(decisionID int) (int, []*ent.Decision, error) {
+	toUpdate, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(c.CTX)
+
+	if err != nil || len(toUpdate) == 0 {
+		c.Log.Warningf("SoftDeleteDecisionByID : %v (nb soft deleted: %d)", err, len(toUpdate))
+		return 0, nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID)
 	}
 	}
 
 
-	if nbUpdated == 0 {
-		return 0, ItemNotFound
+	if len(toUpdate) == 0 {
+		return 0, nil, ItemNotFound
 	}
 	}
-	return nbUpdated, nil
+	count, err := c.BulkDeleteDecisions(toUpdate, true)
+	return count, toUpdate, err
 }
 }
 
 
 func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) {
 func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) {

+ 11 - 1
pkg/database/ent/alert.go

@@ -61,6 +61,8 @@ type Alert struct {
 	ScenarioHash string `json:"scenarioHash,omitempty"`
 	ScenarioHash string `json:"scenarioHash,omitempty"`
 	// Simulated holds the value of the "simulated" field.
 	// Simulated holds the value of the "simulated" field.
 	Simulated bool `json:"simulated,omitempty"`
 	Simulated bool `json:"simulated,omitempty"`
+	// UUID holds the value of the "uuid" field.
+	UUID string `json:"uuid,omitempty"`
 	// Edges holds the relations/edges for other nodes in the graph.
 	// Edges holds the relations/edges for other nodes in the graph.
 	// The values are being populated by the AlertQuery when eager-loading is set.
 	// The values are being populated by the AlertQuery when eager-loading is set.
 	Edges          AlertEdges `json:"edges"`
 	Edges          AlertEdges `json:"edges"`
@@ -133,7 +135,7 @@ func (*Alert) scanValues(columns []string) ([]any, error) {
 			values[i] = new(sql.NullFloat64)
 			values[i] = new(sql.NullFloat64)
 		case alert.FieldID, alert.FieldEventsCount, alert.FieldCapacity:
 		case alert.FieldID, alert.FieldEventsCount, alert.FieldCapacity:
 			values[i] = new(sql.NullInt64)
 			values[i] = new(sql.NullInt64)
-		case alert.FieldScenario, alert.FieldBucketId, alert.FieldMessage, alert.FieldSourceIp, alert.FieldSourceRange, alert.FieldSourceAsNumber, alert.FieldSourceAsName, alert.FieldSourceCountry, alert.FieldSourceScope, alert.FieldSourceValue, alert.FieldLeakSpeed, alert.FieldScenarioVersion, alert.FieldScenarioHash:
+		case alert.FieldScenario, alert.FieldBucketId, alert.FieldMessage, alert.FieldSourceIp, alert.FieldSourceRange, alert.FieldSourceAsNumber, alert.FieldSourceAsName, alert.FieldSourceCountry, alert.FieldSourceScope, alert.FieldSourceValue, alert.FieldLeakSpeed, alert.FieldScenarioVersion, alert.FieldScenarioHash, alert.FieldUUID:
 			values[i] = new(sql.NullString)
 			values[i] = new(sql.NullString)
 		case alert.FieldCreatedAt, alert.FieldUpdatedAt, alert.FieldStartedAt, alert.FieldStoppedAt:
 		case alert.FieldCreatedAt, alert.FieldUpdatedAt, alert.FieldStartedAt, alert.FieldStoppedAt:
 			values[i] = new(sql.NullTime)
 			values[i] = new(sql.NullTime)
@@ -294,6 +296,12 @@ func (a *Alert) assignValues(columns []string, values []any) error {
 			} else if value.Valid {
 			} else if value.Valid {
 				a.Simulated = value.Bool
 				a.Simulated = value.Bool
 			}
 			}
+		case alert.FieldUUID:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field uuid", values[i])
+			} else if value.Valid {
+				a.UUID = value.String
+			}
 		case alert.ForeignKeys[0]:
 		case alert.ForeignKeys[0]:
 			if value, ok := values[i].(*sql.NullInt64); !ok {
 			if value, ok := values[i].(*sql.NullInt64); !ok {
 				return fmt.Errorf("unexpected type %T for edge-field machine_alerts", value)
 				return fmt.Errorf("unexpected type %T for edge-field machine_alerts", value)
@@ -418,6 +426,8 @@ func (a *Alert) String() string {
 	builder.WriteString(", ")
 	builder.WriteString(", ")
 	builder.WriteString("simulated=")
 	builder.WriteString("simulated=")
 	builder.WriteString(fmt.Sprintf("%v", a.Simulated))
 	builder.WriteString(fmt.Sprintf("%v", a.Simulated))
+	builder.WriteString(", uuid=")
+	builder.WriteString(a.UUID)
 	builder.WriteByte(')')
 	builder.WriteByte(')')
 	return builder.String()
 	return builder.String()
 }
 }

+ 3 - 0
pkg/database/ent/alert/alert.go

@@ -55,6 +55,8 @@ const (
 	FieldScenarioHash = "scenario_hash"
 	FieldScenarioHash = "scenario_hash"
 	// FieldSimulated holds the string denoting the simulated field in the database.
 	// FieldSimulated holds the string denoting the simulated field in the database.
 	FieldSimulated = "simulated"
 	FieldSimulated = "simulated"
+	// FieldUUID holds the string denoting the uuid field in the database.
+	FieldUUID = "uuid"
 	// EdgeOwner holds the string denoting the owner edge name in mutations.
 	// EdgeOwner holds the string denoting the owner edge name in mutations.
 	EdgeOwner = "owner"
 	EdgeOwner = "owner"
 	// EdgeDecisions holds the string denoting the decisions edge name in mutations.
 	// EdgeDecisions holds the string denoting the decisions edge name in mutations.
@@ -120,6 +122,7 @@ var Columns = []string{
 	FieldScenarioVersion,
 	FieldScenarioVersion,
 	FieldScenarioHash,
 	FieldScenarioHash,
 	FieldSimulated,
 	FieldSimulated,
+	FieldUUID,
 }
 }
 
 
 // ForeignKeys holds the SQL foreign-keys that are owned by the "alerts"
 // ForeignKeys holds the SQL foreign-keys that are owned by the "alerts"

+ 132 - 0
pkg/database/ent/alert/where.go

@@ -235,6 +235,13 @@ func Simulated(v bool) predicate.Alert {
 	})
 	})
 }
 }
 
 
+// UUID applies equality check predicate on the "uuid" field. It's identical to UUIDEQ.
+func UUID(v string) predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldUUID), v))
+	})
+}
+
 // CreatedAtEQ applies the EQ predicate on the "created_at" field.
 // CreatedAtEQ applies the EQ predicate on the "created_at" field.
 func CreatedAtEQ(v time.Time) predicate.Alert {
 func CreatedAtEQ(v time.Time) predicate.Alert {
 	return predicate.Alert(func(s *sql.Selector) {
 	return predicate.Alert(func(s *sql.Selector) {
@@ -2328,6 +2335,131 @@ func SimulatedNEQ(v bool) predicate.Alert {
 	})
 	})
 }
 }
 
 
+// UUIDEQ applies the EQ predicate on the "uuid" field.
+func UUIDEQ(v string) predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDNEQ applies the NEQ predicate on the "uuid" field.
+func UUIDNEQ(v string) predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDIn applies the In predicate on the "uuid" field.
+func UUIDIn(vs ...string) predicate.Alert {
+	v := make([]interface{}, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.Alert(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(v) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		s.Where(sql.In(s.C(FieldUUID), v...))
+	})
+}
+
+// UUIDNotIn applies the NotIn predicate on the "uuid" field.
+func UUIDNotIn(vs ...string) predicate.Alert {
+	v := make([]interface{}, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.Alert(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(v) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		s.Where(sql.NotIn(s.C(FieldUUID), v...))
+	})
+}
+
+// UUIDGT applies the GT predicate on the "uuid" field.
+func UUIDGT(v string) predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDGTE applies the GTE predicate on the "uuid" field.
+func UUIDGTE(v string) predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDLT applies the LT predicate on the "uuid" field.
+func UUIDLT(v string) predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDLTE applies the LTE predicate on the "uuid" field.
+func UUIDLTE(v string) predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDContains applies the Contains predicate on the "uuid" field.
+func UUIDContains(v string) predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDHasPrefix applies the HasPrefix predicate on the "uuid" field.
+func UUIDHasPrefix(v string) predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDHasSuffix applies the HasSuffix predicate on the "uuid" field.
+func UUIDHasSuffix(v string) predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDIsNil applies the IsNil predicate on the "uuid" field.
+func UUIDIsNil() predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.IsNull(s.C(FieldUUID)))
+	})
+}
+
+// UUIDNotNil applies the NotNil predicate on the "uuid" field.
+func UUIDNotNil() predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.NotNull(s.C(FieldUUID)))
+	})
+}
+
+// UUIDEqualFold applies the EqualFold predicate on the "uuid" field.
+func UUIDEqualFold(v string) predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDContainsFold applies the ContainsFold predicate on the "uuid" field.
+func UUIDContainsFold(v string) predicate.Alert {
+	return predicate.Alert(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldUUID), v))
+	})
+}
+
 // HasOwner applies the HasEdge predicate on the "owner" edge.
 // HasOwner applies the HasEdge predicate on the "owner" edge.
 func HasOwner() predicate.Alert {
 func HasOwner() predicate.Alert {
 	return predicate.Alert(func(s *sql.Selector) {
 	return predicate.Alert(func(s *sql.Selector) {

+ 22 - 0
pkg/database/ent/alert_create.go

@@ -324,6 +324,20 @@ func (ac *AlertCreate) SetNillableSimulated(b *bool) *AlertCreate {
 	return ac
 	return ac
 }
 }
 
 
+// SetUUID sets the "uuid" field.
+func (ac *AlertCreate) SetUUID(s string) *AlertCreate {
+	ac.mutation.SetUUID(s)
+	return ac
+}
+
+// SetNillableUUID sets the "uuid" field if the given value is not nil.
+func (ac *AlertCreate) SetNillableUUID(s *string) *AlertCreate {
+	if s != nil {
+		ac.SetUUID(*s)
+	}
+	return ac
+}
+
 // SetOwnerID sets the "owner" edge to the Machine entity by ID.
 // SetOwnerID sets the "owner" edge to the Machine entity by ID.
 func (ac *AlertCreate) SetOwnerID(id int) *AlertCreate {
 func (ac *AlertCreate) SetOwnerID(id int) *AlertCreate {
 	ac.mutation.SetOwnerID(id)
 	ac.mutation.SetOwnerID(id)
@@ -710,6 +724,14 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) {
 		})
 		})
 		_node.Simulated = value
 		_node.Simulated = value
 	}
 	}
+	if value, ok := ac.mutation.UUID(); ok {
+		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: alert.FieldUUID,
+		})
+		_node.UUID = value
+	}
 	if nodes := ac.mutation.OwnerIDs(); len(nodes) > 0 {
 	if nodes := ac.mutation.OwnerIDs(); len(nodes) > 0 {
 		edge := &sqlgraph.EdgeSpec{
 		edge := &sqlgraph.EdgeSpec{
 			Rel:     sqlgraph.M2O,
 			Rel:     sqlgraph.M2O,

+ 66 - 0
pkg/database/ent/alert_update.go

@@ -464,6 +464,26 @@ func (au *AlertUpdate) SetNillableSimulated(b *bool) *AlertUpdate {
 	return au
 	return au
 }
 }
 
 
+// SetUUID sets the "uuid" field.
+func (au *AlertUpdate) SetUUID(s string) *AlertUpdate {
+	au.mutation.SetUUID(s)
+	return au
+}
+
+// SetNillableUUID sets the "uuid" field if the given value is not nil.
+func (au *AlertUpdate) SetNillableUUID(s *string) *AlertUpdate {
+	if s != nil {
+		au.SetUUID(*s)
+	}
+	return au
+}
+
+// ClearUUID clears the value of the "uuid" field.
+func (au *AlertUpdate) ClearUUID() *AlertUpdate {
+	au.mutation.ClearUUID()
+	return au
+}
+
 // SetOwnerID sets the "owner" edge to the Machine entity by ID.
 // SetOwnerID sets the "owner" edge to the Machine entity by ID.
 func (au *AlertUpdate) SetOwnerID(id int) *AlertUpdate {
 func (au *AlertUpdate) SetOwnerID(id int) *AlertUpdate {
 	au.mutation.SetOwnerID(id)
 	au.mutation.SetOwnerID(id)
@@ -989,6 +1009,19 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) {
 			Column: alert.FieldSimulated,
 			Column: alert.FieldSimulated,
 		})
 		})
 	}
 	}
+	if value, ok := au.mutation.UUID(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: alert.FieldUUID,
+		})
+	}
+	if au.mutation.UUIDCleared() {
+		_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Column: alert.FieldUUID,
+		})
+	}
 	if au.mutation.OwnerCleared() {
 	if au.mutation.OwnerCleared() {
 		edge := &sqlgraph.EdgeSpec{
 		edge := &sqlgraph.EdgeSpec{
 			Rel:     sqlgraph.M2O,
 			Rel:     sqlgraph.M2O,
@@ -1637,6 +1670,26 @@ func (auo *AlertUpdateOne) SetNillableSimulated(b *bool) *AlertUpdateOne {
 	return auo
 	return auo
 }
 }
 
 
+// SetUUID sets the "uuid" field.
+func (auo *AlertUpdateOne) SetUUID(s string) *AlertUpdateOne {
+	auo.mutation.SetUUID(s)
+	return auo
+}
+
+// SetNillableUUID sets the "uuid" field if the given value is not nil.
+func (auo *AlertUpdateOne) SetNillableUUID(s *string) *AlertUpdateOne {
+	if s != nil {
+		auo.SetUUID(*s)
+	}
+	return auo
+}
+
+// ClearUUID clears the value of the "uuid" field.
+func (auo *AlertUpdateOne) ClearUUID() *AlertUpdateOne {
+	auo.mutation.ClearUUID()
+	return auo
+}
+
 // SetOwnerID sets the "owner" edge to the Machine entity by ID.
 // SetOwnerID sets the "owner" edge to the Machine entity by ID.
 func (auo *AlertUpdateOne) SetOwnerID(id int) *AlertUpdateOne {
 func (auo *AlertUpdateOne) SetOwnerID(id int) *AlertUpdateOne {
 	auo.mutation.SetOwnerID(id)
 	auo.mutation.SetOwnerID(id)
@@ -2192,6 +2245,19 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error
 			Column: alert.FieldSimulated,
 			Column: alert.FieldSimulated,
 		})
 		})
 	}
 	}
+	if value, ok := auo.mutation.UUID(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: alert.FieldUUID,
+		})
+	}
+	if auo.mutation.UUIDCleared() {
+		_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Column: alert.FieldUUID,
+		})
+	}
 	if auo.mutation.OwnerCleared() {
 	if auo.mutation.OwnerCleared() {
 		edge := &sqlgraph.EdgeSpec{
 		edge := &sqlgraph.EdgeSpec{
 			Rel:     sqlgraph.M2O,
 			Rel:     sqlgraph.M2O,

+ 113 - 16
pkg/database/ent/client.go

@@ -12,6 +12,7 @@ import (
 
 
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
@@ -31,6 +32,8 @@ type Client struct {
 	Alert *AlertClient
 	Alert *AlertClient
 	// Bouncer is the client for interacting with the Bouncer builders.
 	// Bouncer is the client for interacting with the Bouncer builders.
 	Bouncer *BouncerClient
 	Bouncer *BouncerClient
+	// ConfigItem is the client for interacting with the ConfigItem builders.
+	ConfigItem *ConfigItemClient
 	// Decision is the client for interacting with the Decision builders.
 	// Decision is the client for interacting with the Decision builders.
 	Decision *DecisionClient
 	Decision *DecisionClient
 	// Event is the client for interacting with the Event builders.
 	// Event is the client for interacting with the Event builders.
@@ -54,6 +57,7 @@ func (c *Client) init() {
 	c.Schema = migrate.NewSchema(c.driver)
 	c.Schema = migrate.NewSchema(c.driver)
 	c.Alert = NewAlertClient(c.config)
 	c.Alert = NewAlertClient(c.config)
 	c.Bouncer = NewBouncerClient(c.config)
 	c.Bouncer = NewBouncerClient(c.config)
+	c.ConfigItem = NewConfigItemClient(c.config)
 	c.Decision = NewDecisionClient(c.config)
 	c.Decision = NewDecisionClient(c.config)
 	c.Event = NewEventClient(c.config)
 	c.Event = NewEventClient(c.config)
 	c.Machine = NewMachineClient(c.config)
 	c.Machine = NewMachineClient(c.config)
@@ -89,14 +93,15 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
 	cfg := c.config
 	cfg := c.config
 	cfg.driver = tx
 	cfg.driver = tx
 	return &Tx{
 	return &Tx{
-		ctx:      ctx,
-		config:   cfg,
-		Alert:    NewAlertClient(cfg),
-		Bouncer:  NewBouncerClient(cfg),
-		Decision: NewDecisionClient(cfg),
-		Event:    NewEventClient(cfg),
-		Machine:  NewMachineClient(cfg),
-		Meta:     NewMetaClient(cfg),
+		ctx:        ctx,
+		config:     cfg,
+		Alert:      NewAlertClient(cfg),
+		Bouncer:    NewBouncerClient(cfg),
+		ConfigItem: NewConfigItemClient(cfg),
+		Decision:   NewDecisionClient(cfg),
+		Event:      NewEventClient(cfg),
+		Machine:    NewMachineClient(cfg),
+		Meta:       NewMetaClient(cfg),
 	}, nil
 	}, nil
 }
 }
 
 
@@ -114,14 +119,15 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
 	cfg := c.config
 	cfg := c.config
 	cfg.driver = &txDriver{tx: tx, drv: c.driver}
 	cfg.driver = &txDriver{tx: tx, drv: c.driver}
 	return &Tx{
 	return &Tx{
-		ctx:      ctx,
-		config:   cfg,
-		Alert:    NewAlertClient(cfg),
-		Bouncer:  NewBouncerClient(cfg),
-		Decision: NewDecisionClient(cfg),
-		Event:    NewEventClient(cfg),
-		Machine:  NewMachineClient(cfg),
-		Meta:     NewMetaClient(cfg),
+		ctx:        ctx,
+		config:     cfg,
+		Alert:      NewAlertClient(cfg),
+		Bouncer:    NewBouncerClient(cfg),
+		ConfigItem: NewConfigItemClient(cfg),
+		Decision:   NewDecisionClient(cfg),
+		Event:      NewEventClient(cfg),
+		Machine:    NewMachineClient(cfg),
+		Meta:       NewMetaClient(cfg),
 	}, nil
 	}, nil
 }
 }
 
 
@@ -152,6 +158,7 @@ func (c *Client) Close() error {
 func (c *Client) Use(hooks ...Hook) {
 func (c *Client) Use(hooks ...Hook) {
 	c.Alert.Use(hooks...)
 	c.Alert.Use(hooks...)
 	c.Bouncer.Use(hooks...)
 	c.Bouncer.Use(hooks...)
+	c.ConfigItem.Use(hooks...)
 	c.Decision.Use(hooks...)
 	c.Decision.Use(hooks...)
 	c.Event.Use(hooks...)
 	c.Event.Use(hooks...)
 	c.Machine.Use(hooks...)
 	c.Machine.Use(hooks...)
@@ -402,6 +409,96 @@ func (c *BouncerClient) Hooks() []Hook {
 	return c.hooks.Bouncer
 	return c.hooks.Bouncer
 }
 }
 
 
+// ConfigItemClient is a client for the ConfigItem schema.
+type ConfigItemClient struct {
+	config
+}
+
+// NewConfigItemClient returns a client for the ConfigItem from the given config.
+func NewConfigItemClient(c config) *ConfigItemClient {
+	return &ConfigItemClient{config: c}
+}
+
+// Use adds a list of mutation hooks to the hooks stack.
+// A call to `Use(f, g, h)` equals to `configitem.Hooks(f(g(h())))`.
+func (c *ConfigItemClient) Use(hooks ...Hook) {
+	c.hooks.ConfigItem = append(c.hooks.ConfigItem, hooks...)
+}
+
+// Create returns a create builder for ConfigItem.
+func (c *ConfigItemClient) Create() *ConfigItemCreate {
+	mutation := newConfigItemMutation(c.config, OpCreate)
+	return &ConfigItemCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// CreateBulk returns a builder for creating a bulk of ConfigItem entities.
+func (c *ConfigItemClient) CreateBulk(builders ...*ConfigItemCreate) *ConfigItemCreateBulk {
+	return &ConfigItemCreateBulk{config: c.config, builders: builders}
+}
+
+// Update returns an update builder for ConfigItem.
+func (c *ConfigItemClient) Update() *ConfigItemUpdate {
+	mutation := newConfigItemMutation(c.config, OpUpdate)
+	return &ConfigItemUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOne returns an update builder for the given entity.
+func (c *ConfigItemClient) UpdateOne(ci *ConfigItem) *ConfigItemUpdateOne {
+	mutation := newConfigItemMutation(c.config, OpUpdateOne, withConfigItem(ci))
+	return &ConfigItemUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOneID returns an update builder for the given id.
+func (c *ConfigItemClient) UpdateOneID(id int) *ConfigItemUpdateOne {
+	mutation := newConfigItemMutation(c.config, OpUpdateOne, withConfigItemID(id))
+	return &ConfigItemUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// Delete returns a delete builder for ConfigItem.
+func (c *ConfigItemClient) Delete() *ConfigItemDelete {
+	mutation := newConfigItemMutation(c.config, OpDelete)
+	return &ConfigItemDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// DeleteOne returns a delete builder for the given entity.
+func (c *ConfigItemClient) DeleteOne(ci *ConfigItem) *ConfigItemDeleteOne {
+	return c.DeleteOneID(ci.ID)
+}
+
+// DeleteOneID returns a delete builder for the given id.
+func (c *ConfigItemClient) DeleteOneID(id int) *ConfigItemDeleteOne {
+	builder := c.Delete().Where(configitem.ID(id))
+	builder.mutation.id = &id
+	builder.mutation.op = OpDeleteOne
+	return &ConfigItemDeleteOne{builder}
+}
+
+// Query returns a query builder for ConfigItem.
+func (c *ConfigItemClient) Query() *ConfigItemQuery {
+	return &ConfigItemQuery{
+		config: c.config,
+	}
+}
+
+// Get returns a ConfigItem entity by its id.
+func (c *ConfigItemClient) Get(ctx context.Context, id int) (*ConfigItem, error) {
+	return c.Query().Where(configitem.ID(id)).Only(ctx)
+}
+
+// GetX is like Get, but panics if an error occurs.
+func (c *ConfigItemClient) GetX(ctx context.Context, id int) *ConfigItem {
+	obj, err := c.Get(ctx, id)
+	if err != nil {
+		panic(err)
+	}
+	return obj
+}
+
+// Hooks returns the client hooks.
+func (c *ConfigItemClient) Hooks() []Hook {
+	return c.hooks.ConfigItem
+}
+
 // DecisionClient is a client for the Decision schema.
 // DecisionClient is a client for the Decision schema.
 type DecisionClient struct {
 type DecisionClient struct {
 	config
 	config

+ 7 - 6
pkg/database/ent/config.go

@@ -24,12 +24,13 @@ type config struct {
 
 
 // hooks per client, for fast access.
 // hooks per client, for fast access.
 type hooks struct {
 type hooks struct {
-	Alert    []ent.Hook
-	Bouncer  []ent.Hook
-	Decision []ent.Hook
-	Event    []ent.Hook
-	Machine  []ent.Hook
-	Meta     []ent.Hook
+	Alert      []ent.Hook
+	Bouncer    []ent.Hook
+	ConfigItem []ent.Hook
+	Decision   []ent.Hook
+	Event      []ent.Hook
+	Machine    []ent.Hook
+	Meta       []ent.Hook
 }
 }
 
 
 // Options applies the options on the config object.
 // Options applies the options on the config object.

+ 138 - 0
pkg/database/ent/configitem.go

@@ -0,0 +1,138 @@
+// Code generated by entc, DO NOT EDIT.
+
+package ent
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
+)
+
+// ConfigItem is the model entity for the ConfigItem schema.
+type ConfigItem struct {
+	config `json:"-"`
+	// ID of the ent.
+	ID int `json:"id,omitempty"`
+	// CreatedAt holds the value of the "created_at" field.
+	CreatedAt *time.Time `json:"created_at"`
+	// UpdatedAt holds the value of the "updated_at" field.
+	UpdatedAt *time.Time `json:"updated_at"`
+	// Name holds the value of the "name" field.
+	Name string `json:"name"`
+	// Value holds the value of the "value" field.
+	Value string `json:"value"`
+}
+
+// scanValues returns the types for scanning values from sql.Rows.
+func (*ConfigItem) scanValues(columns []string) ([]interface{}, error) {
+	values := make([]interface{}, len(columns))
+	for i := range columns {
+		switch columns[i] {
+		case configitem.FieldID:
+			values[i] = new(sql.NullInt64)
+		case configitem.FieldName, configitem.FieldValue:
+			values[i] = new(sql.NullString)
+		case configitem.FieldCreatedAt, configitem.FieldUpdatedAt:
+			values[i] = new(sql.NullTime)
+		default:
+			return nil, fmt.Errorf("unexpected column %q for type ConfigItem", columns[i])
+		}
+	}
+	return values, nil
+}
+
+// assignValues assigns the values that were returned from sql.Rows (after scanning)
+// to the ConfigItem fields.
+func (ci *ConfigItem) assignValues(columns []string, values []interface{}) error {
+	if m, n := len(values), len(columns); m < n {
+		return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
+	}
+	for i := range columns {
+		switch columns[i] {
+		case configitem.FieldID:
+			value, ok := values[i].(*sql.NullInt64)
+			if !ok {
+				return fmt.Errorf("unexpected type %T for field id", value)
+			}
+			ci.ID = int(value.Int64)
+		case configitem.FieldCreatedAt:
+			if value, ok := values[i].(*sql.NullTime); !ok {
+				return fmt.Errorf("unexpected type %T for field created_at", values[i])
+			} else if value.Valid {
+				ci.CreatedAt = new(time.Time)
+				*ci.CreatedAt = value.Time
+			}
+		case configitem.FieldUpdatedAt:
+			if value, ok := values[i].(*sql.NullTime); !ok {
+				return fmt.Errorf("unexpected type %T for field updated_at", values[i])
+			} else if value.Valid {
+				ci.UpdatedAt = new(time.Time)
+				*ci.UpdatedAt = value.Time
+			}
+		case configitem.FieldName:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field name", values[i])
+			} else if value.Valid {
+				ci.Name = value.String
+			}
+		case configitem.FieldValue:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field value", values[i])
+			} else if value.Valid {
+				ci.Value = value.String
+			}
+		}
+	}
+	return nil
+}
+
+// Update returns a builder for updating this ConfigItem.
+// Note that you need to call ConfigItem.Unwrap() before calling this method if this ConfigItem
+// was returned from a transaction, and the transaction was committed or rolled back.
+func (ci *ConfigItem) Update() *ConfigItemUpdateOne {
+	return (&ConfigItemClient{config: ci.config}).UpdateOne(ci)
+}
+
+// Unwrap unwraps the ConfigItem entity that was returned from a transaction after it was closed,
+// so that all future queries will be executed through the driver which created the transaction.
+func (ci *ConfigItem) Unwrap() *ConfigItem {
+	tx, ok := ci.config.driver.(*txDriver)
+	if !ok {
+		panic("ent: ConfigItem is not a transactional entity")
+	}
+	ci.config.driver = tx.drv
+	return ci
+}
+
+// String implements the fmt.Stringer.
+func (ci *ConfigItem) String() string {
+	var builder strings.Builder
+	builder.WriteString("ConfigItem(")
+	builder.WriteString(fmt.Sprintf("id=%v", ci.ID))
+	if v := ci.CreatedAt; v != nil {
+		builder.WriteString(", created_at=")
+		builder.WriteString(v.Format(time.ANSIC))
+	}
+	if v := ci.UpdatedAt; v != nil {
+		builder.WriteString(", updated_at=")
+		builder.WriteString(v.Format(time.ANSIC))
+	}
+	builder.WriteString(", name=")
+	builder.WriteString(ci.Name)
+	builder.WriteString(", value=")
+	builder.WriteString(ci.Value)
+	builder.WriteByte(')')
+	return builder.String()
+}
+
+// ConfigItems is a parsable slice of ConfigItem.
+type ConfigItems []*ConfigItem
+
+func (ci ConfigItems) config(cfg config) {
+	for _i := range ci {
+		ci[_i].config = cfg
+	}
+}

+ 54 - 0
pkg/database/ent/configitem/configitem.go

@@ -0,0 +1,54 @@
+// Code generated by entc, DO NOT EDIT.
+
+package configitem
+
+import (
+	"time"
+)
+
+const (
+	// Label holds the string label denoting the configitem type in the database.
+	Label = "config_item"
+	// FieldID holds the string denoting the id field in the database.
+	FieldID = "id"
+	// FieldCreatedAt holds the string denoting the created_at field in the database.
+	FieldCreatedAt = "created_at"
+	// FieldUpdatedAt holds the string denoting the updated_at field in the database.
+	FieldUpdatedAt = "updated_at"
+	// FieldName holds the string denoting the name field in the database.
+	FieldName = "name"
+	// FieldValue holds the string denoting the value field in the database.
+	FieldValue = "value"
+	// Table holds the table name of the configitem in the database.
+	Table = "config_items"
+)
+
+// Columns holds all SQL columns for configitem fields.
+var Columns = []string{
+	FieldID,
+	FieldCreatedAt,
+	FieldUpdatedAt,
+	FieldName,
+	FieldValue,
+}
+
+// ValidColumn reports if the column name is valid (part of the table columns).
+func ValidColumn(column string) bool {
+	for i := range Columns {
+		if column == Columns[i] {
+			return true
+		}
+	}
+	return false
+}
+
+var (
+	// DefaultCreatedAt holds the default value on creation for the "created_at" field.
+	DefaultCreatedAt func() time.Time
+	// UpdateDefaultCreatedAt holds the default value on update for the "created_at" field.
+	UpdateDefaultCreatedAt func() time.Time
+	// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
+	DefaultUpdatedAt func() time.Time
+	// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
+	UpdateDefaultUpdatedAt func() time.Time
+)

+ 555 - 0
pkg/database/ent/configitem/where.go

@@ -0,0 +1,555 @@
+// Code generated by entc, DO NOT EDIT.
+
+package configitem
+
+import (
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate"
+)
+
+// ID filters vertices based on their ID field.
+func ID(id int) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldID), id))
+	})
+}
+
+// IDEQ applies the EQ predicate on the ID field.
+func IDEQ(id int) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldID), id))
+	})
+}
+
+// IDNEQ applies the NEQ predicate on the ID field.
+func IDNEQ(id int) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldID), id))
+	})
+}
+
+// IDIn applies the In predicate on the ID field.
+func IDIn(ids ...int) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(ids) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		v := make([]interface{}, len(ids))
+		for i := range v {
+			v[i] = ids[i]
+		}
+		s.Where(sql.In(s.C(FieldID), v...))
+	})
+}
+
+// IDNotIn applies the NotIn predicate on the ID field.
+func IDNotIn(ids ...int) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(ids) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		v := make([]interface{}, len(ids))
+		for i := range v {
+			v[i] = ids[i]
+		}
+		s.Where(sql.NotIn(s.C(FieldID), v...))
+	})
+}
+
+// IDGT applies the GT predicate on the ID field.
+func IDGT(id int) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldID), id))
+	})
+}
+
+// IDGTE applies the GTE predicate on the ID field.
+func IDGTE(id int) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldID), id))
+	})
+}
+
+// IDLT applies the LT predicate on the ID field.
+func IDLT(id int) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldID), id))
+	})
+}
+
+// IDLTE applies the LTE predicate on the ID field.
+func IDLTE(id int) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldID), id))
+	})
+}
+
+// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
+func CreatedAt(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
+	})
+}
+
+// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
+func UpdatedAt(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
+	})
+}
+
+// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
+func Name(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldName), v))
+	})
+}
+
+// Value applies equality check predicate on the "value" field. It's identical to ValueEQ.
+func Value(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldValue), v))
+	})
+}
+
+// CreatedAtEQ applies the EQ predicate on the "created_at" field.
+func CreatedAtEQ(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
+func CreatedAtNEQ(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtIn applies the In predicate on the "created_at" field.
+func CreatedAtIn(vs ...time.Time) predicate.ConfigItem {
+	v := make([]interface{}, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(v) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		s.Where(sql.In(s.C(FieldCreatedAt), v...))
+	})
+}
+
+// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
+func CreatedAtNotIn(vs ...time.Time) predicate.ConfigItem {
+	v := make([]interface{}, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(v) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
+	})
+}
+
+// CreatedAtGT applies the GT predicate on the "created_at" field.
+func CreatedAtGT(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtGTE applies the GTE predicate on the "created_at" field.
+func CreatedAtGTE(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtLT applies the LT predicate on the "created_at" field.
+func CreatedAtLT(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtLTE applies the LTE predicate on the "created_at" field.
+func CreatedAtLTE(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldCreatedAt), v))
+	})
+}
+
+// CreatedAtIsNil applies the IsNil predicate on the "created_at" field.
+func CreatedAtIsNil() predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.IsNull(s.C(FieldCreatedAt)))
+	})
+}
+
+// CreatedAtNotNil applies the NotNil predicate on the "created_at" field.
+func CreatedAtNotNil() predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.NotNull(s.C(FieldCreatedAt)))
+	})
+}
+
+// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
+func UpdatedAtEQ(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
+	})
+}
+
+// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
+func UpdatedAtNEQ(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
+	})
+}
+
+// UpdatedAtIn applies the In predicate on the "updated_at" field.
+func UpdatedAtIn(vs ...time.Time) predicate.ConfigItem {
+	v := make([]interface{}, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(v) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		s.Where(sql.In(s.C(FieldUpdatedAt), v...))
+	})
+}
+
+// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
+func UpdatedAtNotIn(vs ...time.Time) predicate.ConfigItem {
+	v := make([]interface{}, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(v) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
+	})
+}
+
+// UpdatedAtGT applies the GT predicate on the "updated_at" field.
+func UpdatedAtGT(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldUpdatedAt), v))
+	})
+}
+
+// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
+func UpdatedAtGTE(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
+	})
+}
+
+// UpdatedAtLT applies the LT predicate on the "updated_at" field.
+func UpdatedAtLT(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldUpdatedAt), v))
+	})
+}
+
+// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
+func UpdatedAtLTE(v time.Time) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
+	})
+}
+
+// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field.
+func UpdatedAtIsNil() predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.IsNull(s.C(FieldUpdatedAt)))
+	})
+}
+
+// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field.
+func UpdatedAtNotNil() predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.NotNull(s.C(FieldUpdatedAt)))
+	})
+}
+
+// NameEQ applies the EQ predicate on the "name" field.
+func NameEQ(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldName), v))
+	})
+}
+
+// NameNEQ applies the NEQ predicate on the "name" field.
+func NameNEQ(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldName), v))
+	})
+}
+
+// NameIn applies the In predicate on the "name" field.
+func NameIn(vs ...string) predicate.ConfigItem {
+	v := make([]interface{}, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(v) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		s.Where(sql.In(s.C(FieldName), v...))
+	})
+}
+
+// NameNotIn applies the NotIn predicate on the "name" field.
+func NameNotIn(vs ...string) predicate.ConfigItem {
+	v := make([]interface{}, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(v) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		s.Where(sql.NotIn(s.C(FieldName), v...))
+	})
+}
+
+// NameGT applies the GT predicate on the "name" field.
+func NameGT(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldName), v))
+	})
+}
+
+// NameGTE applies the GTE predicate on the "name" field.
+func NameGTE(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldName), v))
+	})
+}
+
+// NameLT applies the LT predicate on the "name" field.
+func NameLT(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldName), v))
+	})
+}
+
+// NameLTE applies the LTE predicate on the "name" field.
+func NameLTE(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldName), v))
+	})
+}
+
+// NameContains applies the Contains predicate on the "name" field.
+func NameContains(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldName), v))
+	})
+}
+
+// NameHasPrefix applies the HasPrefix predicate on the "name" field.
+func NameHasPrefix(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldName), v))
+	})
+}
+
+// NameHasSuffix applies the HasSuffix predicate on the "name" field.
+func NameHasSuffix(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldName), v))
+	})
+}
+
+// NameEqualFold applies the EqualFold predicate on the "name" field.
+func NameEqualFold(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldName), v))
+	})
+}
+
+// NameContainsFold applies the ContainsFold predicate on the "name" field.
+func NameContainsFold(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldName), v))
+	})
+}
+
+// ValueEQ applies the EQ predicate on the "value" field.
+func ValueEQ(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldValue), v))
+	})
+}
+
+// ValueNEQ applies the NEQ predicate on the "value" field.
+func ValueNEQ(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldValue), v))
+	})
+}
+
+// ValueIn applies the In predicate on the "value" field.
+func ValueIn(vs ...string) predicate.ConfigItem {
+	v := make([]interface{}, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(v) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		s.Where(sql.In(s.C(FieldValue), v...))
+	})
+}
+
+// ValueNotIn applies the NotIn predicate on the "value" field.
+func ValueNotIn(vs ...string) predicate.ConfigItem {
+	v := make([]interface{}, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(v) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		s.Where(sql.NotIn(s.C(FieldValue), v...))
+	})
+}
+
+// ValueGT applies the GT predicate on the "value" field.
+func ValueGT(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldValue), v))
+	})
+}
+
+// ValueGTE applies the GTE predicate on the "value" field.
+func ValueGTE(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldValue), v))
+	})
+}
+
+// ValueLT applies the LT predicate on the "value" field.
+func ValueLT(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldValue), v))
+	})
+}
+
+// ValueLTE applies the LTE predicate on the "value" field.
+func ValueLTE(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldValue), v))
+	})
+}
+
+// ValueContains applies the Contains predicate on the "value" field.
+func ValueContains(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldValue), v))
+	})
+}
+
+// ValueHasPrefix applies the HasPrefix predicate on the "value" field.
+func ValueHasPrefix(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldValue), v))
+	})
+}
+
+// ValueHasSuffix applies the HasSuffix predicate on the "value" field.
+func ValueHasSuffix(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldValue), v))
+	})
+}
+
+// ValueEqualFold applies the EqualFold predicate on the "value" field.
+func ValueEqualFold(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldValue), v))
+	})
+}
+
+// ValueContainsFold applies the ContainsFold predicate on the "value" field.
+func ValueContainsFold(v string) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldValue), v))
+	})
+}
+
+// And groups predicates with the AND operator between them.
+func And(predicates ...predicate.ConfigItem) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s1 := s.Clone().SetP(nil)
+		for _, p := range predicates {
+			p(s1)
+		}
+		s.Where(s1.P())
+	})
+}
+
+// Or groups predicates with the OR operator between them.
+func Or(predicates ...predicate.ConfigItem) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		s1 := s.Clone().SetP(nil)
+		for i, p := range predicates {
+			if i > 0 {
+				s1.Or()
+			}
+			p(s1)
+		}
+		s.Where(s1.P())
+	})
+}
+
+// Not applies the not operator on the given predicate.
+func Not(p predicate.ConfigItem) predicate.ConfigItem {
+	return predicate.ConfigItem(func(s *sql.Selector) {
+		p(s.Not())
+	})
+}

+ 296 - 0
pkg/database/ent/configitem_create.go

@@ -0,0 +1,296 @@
+// Code generated by entc, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
+)
+
+// ConfigItemCreate is the builder for creating a ConfigItem entity.
+type ConfigItemCreate struct {
+	config
+	mutation *ConfigItemMutation
+	hooks    []Hook
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (cic *ConfigItemCreate) SetCreatedAt(t time.Time) *ConfigItemCreate {
+	cic.mutation.SetCreatedAt(t)
+	return cic
+}
+
+// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
+func (cic *ConfigItemCreate) SetNillableCreatedAt(t *time.Time) *ConfigItemCreate {
+	if t != nil {
+		cic.SetCreatedAt(*t)
+	}
+	return cic
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (cic *ConfigItemCreate) SetUpdatedAt(t time.Time) *ConfigItemCreate {
+	cic.mutation.SetUpdatedAt(t)
+	return cic
+}
+
+// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
+func (cic *ConfigItemCreate) SetNillableUpdatedAt(t *time.Time) *ConfigItemCreate {
+	if t != nil {
+		cic.SetUpdatedAt(*t)
+	}
+	return cic
+}
+
+// SetName sets the "name" field.
+func (cic *ConfigItemCreate) SetName(s string) *ConfigItemCreate {
+	cic.mutation.SetName(s)
+	return cic
+}
+
+// SetValue sets the "value" field.
+func (cic *ConfigItemCreate) SetValue(s string) *ConfigItemCreate {
+	cic.mutation.SetValue(s)
+	return cic
+}
+
+// Mutation returns the ConfigItemMutation object of the builder.
+func (cic *ConfigItemCreate) Mutation() *ConfigItemMutation {
+	return cic.mutation
+}
+
+// Save creates the ConfigItem in the database.
+func (cic *ConfigItemCreate) Save(ctx context.Context) (*ConfigItem, error) {
+	var (
+		err  error
+		node *ConfigItem
+	)
+	cic.defaults()
+	if len(cic.hooks) == 0 {
+		if err = cic.check(); err != nil {
+			return nil, err
+		}
+		node, err = cic.sqlSave(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*ConfigItemMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			if err = cic.check(); err != nil {
+				return nil, err
+			}
+			cic.mutation = mutation
+			if node, err = cic.sqlSave(ctx); err != nil {
+				return nil, err
+			}
+			mutation.id = &node.ID
+			mutation.done = true
+			return node, err
+		})
+		for i := len(cic.hooks) - 1; i >= 0; i-- {
+			if cic.hooks[i] == nil {
+				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = cic.hooks[i](mut)
+		}
+		if _, err := mut.Mutate(ctx, cic.mutation); err != nil {
+			return nil, err
+		}
+	}
+	return node, err
+}
+
+// SaveX calls Save and panics if Save returns an error.
+func (cic *ConfigItemCreate) SaveX(ctx context.Context) *ConfigItem {
+	v, err := cic.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Exec executes the query.
+func (cic *ConfigItemCreate) Exec(ctx context.Context) error {
+	_, err := cic.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (cic *ConfigItemCreate) ExecX(ctx context.Context) {
+	if err := cic.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+// defaults sets the default values of the builder before save.
+func (cic *ConfigItemCreate) defaults() {
+	if _, ok := cic.mutation.CreatedAt(); !ok {
+		v := configitem.DefaultCreatedAt()
+		cic.mutation.SetCreatedAt(v)
+	}
+	if _, ok := cic.mutation.UpdatedAt(); !ok {
+		v := configitem.DefaultUpdatedAt()
+		cic.mutation.SetUpdatedAt(v)
+	}
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (cic *ConfigItemCreate) check() error {
+	if _, ok := cic.mutation.Name(); !ok {
+		return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "ConfigItem.name"`)}
+	}
+	if _, ok := cic.mutation.Value(); !ok {
+		return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "ConfigItem.value"`)}
+	}
+	return nil
+}
+
+func (cic *ConfigItemCreate) sqlSave(ctx context.Context) (*ConfigItem, error) {
+	_node, _spec := cic.createSpec()
+	if err := sqlgraph.CreateNode(ctx, cic.driver, _spec); err != nil {
+		if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{err.Error(), err}
+		}
+		return nil, err
+	}
+	id := _spec.ID.Value.(int64)
+	_node.ID = int(id)
+	return _node, nil
+}
+
+func (cic *ConfigItemCreate) createSpec() (*ConfigItem, *sqlgraph.CreateSpec) {
+	var (
+		_node = &ConfigItem{config: cic.config}
+		_spec = &sqlgraph.CreateSpec{
+			Table: configitem.Table,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: configitem.FieldID,
+			},
+		}
+	)
+	if value, ok := cic.mutation.CreatedAt(); ok {
+		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
+			Type:   field.TypeTime,
+			Value:  value,
+			Column: configitem.FieldCreatedAt,
+		})
+		_node.CreatedAt = &value
+	}
+	if value, ok := cic.mutation.UpdatedAt(); ok {
+		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
+			Type:   field.TypeTime,
+			Value:  value,
+			Column: configitem.FieldUpdatedAt,
+		})
+		_node.UpdatedAt = &value
+	}
+	if value, ok := cic.mutation.Name(); ok {
+		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: configitem.FieldName,
+		})
+		_node.Name = value
+	}
+	if value, ok := cic.mutation.Value(); ok {
+		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: configitem.FieldValue,
+		})
+		_node.Value = value
+	}
+	return _node, _spec
+}
+
+// ConfigItemCreateBulk is the builder for creating many ConfigItem entities in bulk.
+type ConfigItemCreateBulk struct {
+	config
+	builders []*ConfigItemCreate
+}
+
+// Save creates the ConfigItem entities in the database.
+func (cicb *ConfigItemCreateBulk) Save(ctx context.Context) ([]*ConfigItem, error) {
+	specs := make([]*sqlgraph.CreateSpec, len(cicb.builders))
+	nodes := make([]*ConfigItem, len(cicb.builders))
+	mutators := make([]Mutator, len(cicb.builders))
+	for i := range cicb.builders {
+		func(i int, root context.Context) {
+			builder := cicb.builders[i]
+			builder.defaults()
+			var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+				mutation, ok := m.(*ConfigItemMutation)
+				if !ok {
+					return nil, fmt.Errorf("unexpected mutation type %T", m)
+				}
+				if err := builder.check(); err != nil {
+					return nil, err
+				}
+				builder.mutation = mutation
+				nodes[i], specs[i] = builder.createSpec()
+				var err error
+				if i < len(mutators)-1 {
+					_, err = mutators[i+1].Mutate(root, cicb.builders[i+1].mutation)
+				} else {
+					spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
+					// Invoke the actual operation on the latest mutation in the chain.
+					if err = sqlgraph.BatchCreate(ctx, cicb.driver, spec); err != nil {
+						if sqlgraph.IsConstraintError(err) {
+							err = &ConstraintError{err.Error(), err}
+						}
+					}
+				}
+				if err != nil {
+					return nil, err
+				}
+				mutation.id = &nodes[i].ID
+				mutation.done = true
+				if specs[i].ID.Value != nil {
+					id := specs[i].ID.Value.(int64)
+					nodes[i].ID = int(id)
+				}
+				return nodes[i], nil
+			})
+			for i := len(builder.hooks) - 1; i >= 0; i-- {
+				mut = builder.hooks[i](mut)
+			}
+			mutators[i] = mut
+		}(i, ctx)
+	}
+	if len(mutators) > 0 {
+		if _, err := mutators[0].Mutate(ctx, cicb.builders[0].mutation); err != nil {
+			return nil, err
+		}
+	}
+	return nodes, nil
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (cicb *ConfigItemCreateBulk) SaveX(ctx context.Context) []*ConfigItem {
+	v, err := cicb.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Exec executes the query.
+func (cicb *ConfigItemCreateBulk) Exec(ctx context.Context) error {
+	_, err := cicb.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (cicb *ConfigItemCreateBulk) ExecX(ctx context.Context) {
+	if err := cicb.Exec(ctx); err != nil {
+		panic(err)
+	}
+}

+ 111 - 0
pkg/database/ent/configitem_delete.go

@@ -0,0 +1,111 @@
+// Code generated by entc, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"fmt"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate"
+)
+
+// ConfigItemDelete is the builder for deleting a ConfigItem entity.
+type ConfigItemDelete struct {
+	config
+	hooks    []Hook
+	mutation *ConfigItemMutation
+}
+
+// Where appends a list predicates to the ConfigItemDelete builder.
+func (cid *ConfigItemDelete) Where(ps ...predicate.ConfigItem) *ConfigItemDelete {
+	cid.mutation.Where(ps...)
+	return cid
+}
+
+// Exec executes the deletion query and returns how many vertices were deleted.
+func (cid *ConfigItemDelete) Exec(ctx context.Context) (int, error) {
+	var (
+		err      error
+		affected int
+	)
+	if len(cid.hooks) == 0 {
+		affected, err = cid.sqlExec(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*ConfigItemMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			cid.mutation = mutation
+			affected, err = cid.sqlExec(ctx)
+			mutation.done = true
+			return affected, err
+		})
+		for i := len(cid.hooks) - 1; i >= 0; i-- {
+			if cid.hooks[i] == nil {
+				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = cid.hooks[i](mut)
+		}
+		if _, err := mut.Mutate(ctx, cid.mutation); err != nil {
+			return 0, err
+		}
+	}
+	return affected, err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (cid *ConfigItemDelete) ExecX(ctx context.Context) int {
+	n, err := cid.Exec(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return n
+}
+
+func (cid *ConfigItemDelete) sqlExec(ctx context.Context) (int, error) {
+	_spec := &sqlgraph.DeleteSpec{
+		Node: &sqlgraph.NodeSpec{
+			Table: configitem.Table,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: configitem.FieldID,
+			},
+		},
+	}
+	if ps := cid.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	return sqlgraph.DeleteNodes(ctx, cid.driver, _spec)
+}
+
+// ConfigItemDeleteOne is the builder for deleting a single ConfigItem entity.
+type ConfigItemDeleteOne struct {
+	cid *ConfigItemDelete
+}
+
+// Exec executes the deletion query.
+func (cido *ConfigItemDeleteOne) Exec(ctx context.Context) error {
+	n, err := cido.cid.Exec(ctx)
+	switch {
+	case err != nil:
+		return err
+	case n == 0:
+		return &NotFoundError{configitem.Label}
+	default:
+		return nil
+	}
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (cido *ConfigItemDeleteOne) ExecX(ctx context.Context) {
+	cido.cid.ExecX(ctx)
+}

+ 921 - 0
pkg/database/ent/configitem_query.go

@@ -0,0 +1,921 @@
+// Code generated by entc, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"math"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate"
+)
+
+// ConfigItemQuery is the builder for querying ConfigItem entities.
+type ConfigItemQuery struct {
+	config
+	limit      *int
+	offset     *int
+	unique     *bool
+	order      []OrderFunc
+	fields     []string
+	predicates []predicate.ConfigItem
+	// intermediate query (i.e. traversal path).
+	sql  *sql.Selector
+	path func(context.Context) (*sql.Selector, error)
+}
+
+// Where adds a new predicate for the ConfigItemQuery builder.
+func (ciq *ConfigItemQuery) Where(ps ...predicate.ConfigItem) *ConfigItemQuery {
+	ciq.predicates = append(ciq.predicates, ps...)
+	return ciq
+}
+
+// Limit adds a limit step to the query.
+func (ciq *ConfigItemQuery) Limit(limit int) *ConfigItemQuery {
+	ciq.limit = &limit
+	return ciq
+}
+
+// Offset adds an offset step to the query.
+func (ciq *ConfigItemQuery) Offset(offset int) *ConfigItemQuery {
+	ciq.offset = &offset
+	return ciq
+}
+
+// Unique configures the query builder to filter duplicate records on query.
+// By default, unique is set to true, and can be disabled using this method.
+func (ciq *ConfigItemQuery) Unique(unique bool) *ConfigItemQuery {
+	ciq.unique = &unique
+	return ciq
+}
+
+// Order adds an order step to the query.
+func (ciq *ConfigItemQuery) Order(o ...OrderFunc) *ConfigItemQuery {
+	ciq.order = append(ciq.order, o...)
+	return ciq
+}
+
+// First returns the first ConfigItem entity from the query.
+// Returns a *NotFoundError when no ConfigItem was found.
+func (ciq *ConfigItemQuery) First(ctx context.Context) (*ConfigItem, error) {
+	nodes, err := ciq.Limit(1).All(ctx)
+	if err != nil {
+		return nil, err
+	}
+	if len(nodes) == 0 {
+		return nil, &NotFoundError{configitem.Label}
+	}
+	return nodes[0], nil
+}
+
+// FirstX is like First, but panics if an error occurs.
+func (ciq *ConfigItemQuery) FirstX(ctx context.Context) *ConfigItem {
+	node, err := ciq.First(ctx)
+	if err != nil && !IsNotFound(err) {
+		panic(err)
+	}
+	return node
+}
+
+// FirstID returns the first ConfigItem ID from the query.
+// Returns a *NotFoundError when no ConfigItem ID was found.
+func (ciq *ConfigItemQuery) FirstID(ctx context.Context) (id int, err error) {
+	var ids []int
+	if ids, err = ciq.Limit(1).IDs(ctx); err != nil {
+		return
+	}
+	if len(ids) == 0 {
+		err = &NotFoundError{configitem.Label}
+		return
+	}
+	return ids[0], nil
+}
+
+// FirstIDX is like FirstID, but panics if an error occurs.
+func (ciq *ConfigItemQuery) FirstIDX(ctx context.Context) int {
+	id, err := ciq.FirstID(ctx)
+	if err != nil && !IsNotFound(err) {
+		panic(err)
+	}
+	return id
+}
+
+// Only returns a single ConfigItem entity found by the query, ensuring it only returns one.
+// Returns a *NotSingularError when more than one ConfigItem entity is found.
+// Returns a *NotFoundError when no ConfigItem entities are found.
+func (ciq *ConfigItemQuery) Only(ctx context.Context) (*ConfigItem, error) {
+	nodes, err := ciq.Limit(2).All(ctx)
+	if err != nil {
+		return nil, err
+	}
+	switch len(nodes) {
+	case 1:
+		return nodes[0], nil
+	case 0:
+		return nil, &NotFoundError{configitem.Label}
+	default:
+		return nil, &NotSingularError{configitem.Label}
+	}
+}
+
+// OnlyX is like Only, but panics if an error occurs.
+func (ciq *ConfigItemQuery) OnlyX(ctx context.Context) *ConfigItem {
+	node, err := ciq.Only(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return node
+}
+
+// OnlyID is like Only, but returns the only ConfigItem ID in the query.
+// Returns a *NotSingularError when more than one ConfigItem ID is found.
+// Returns a *NotFoundError when no entities are found.
+func (ciq *ConfigItemQuery) OnlyID(ctx context.Context) (id int, err error) {
+	var ids []int
+	if ids, err = ciq.Limit(2).IDs(ctx); err != nil {
+		return
+	}
+	switch len(ids) {
+	case 1:
+		id = ids[0]
+	case 0:
+		err = &NotFoundError{configitem.Label}
+	default:
+		err = &NotSingularError{configitem.Label}
+	}
+	return
+}
+
+// OnlyIDX is like OnlyID, but panics if an error occurs.
+func (ciq *ConfigItemQuery) OnlyIDX(ctx context.Context) int {
+	id, err := ciq.OnlyID(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return id
+}
+
+// All executes the query and returns a list of ConfigItems.
+func (ciq *ConfigItemQuery) All(ctx context.Context) ([]*ConfigItem, error) {
+	if err := ciq.prepareQuery(ctx); err != nil {
+		return nil, err
+	}
+	return ciq.sqlAll(ctx)
+}
+
+// AllX is like All, but panics if an error occurs.
+func (ciq *ConfigItemQuery) AllX(ctx context.Context) []*ConfigItem {
+	nodes, err := ciq.All(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return nodes
+}
+
+// IDs executes the query and returns a list of ConfigItem IDs.
+func (ciq *ConfigItemQuery) IDs(ctx context.Context) ([]int, error) {
+	var ids []int
+	if err := ciq.Select(configitem.FieldID).Scan(ctx, &ids); err != nil {
+		return nil, err
+	}
+	return ids, nil
+}
+
+// IDsX is like IDs, but panics if an error occurs.
+func (ciq *ConfigItemQuery) IDsX(ctx context.Context) []int {
+	ids, err := ciq.IDs(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return ids
+}
+
+// Count returns the count of the given query.
+func (ciq *ConfigItemQuery) Count(ctx context.Context) (int, error) {
+	if err := ciq.prepareQuery(ctx); err != nil {
+		return 0, err
+	}
+	return ciq.sqlCount(ctx)
+}
+
+// CountX is like Count, but panics if an error occurs.
+func (ciq *ConfigItemQuery) CountX(ctx context.Context) int {
+	count, err := ciq.Count(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return count
+}
+
+// Exist returns true if the query has elements in the graph.
+func (ciq *ConfigItemQuery) Exist(ctx context.Context) (bool, error) {
+	if err := ciq.prepareQuery(ctx); err != nil {
+		return false, err
+	}
+	return ciq.sqlExist(ctx)
+}
+
+// ExistX is like Exist, but panics if an error occurs.
+func (ciq *ConfigItemQuery) ExistX(ctx context.Context) bool {
+	exist, err := ciq.Exist(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return exist
+}
+
+// Clone returns a duplicate of the ConfigItemQuery builder, including all associated steps. It can be
+// used to prepare common query builders and use them differently after the clone is made.
+func (ciq *ConfigItemQuery) Clone() *ConfigItemQuery {
+	if ciq == nil {
+		return nil
+	}
+	return &ConfigItemQuery{
+		config:     ciq.config,
+		limit:      ciq.limit,
+		offset:     ciq.offset,
+		order:      append([]OrderFunc{}, ciq.order...),
+		predicates: append([]predicate.ConfigItem{}, ciq.predicates...),
+		// clone intermediate query.
+		sql:    ciq.sql.Clone(),
+		path:   ciq.path,
+		unique: ciq.unique,
+	}
+}
+
+// GroupBy is used to group vertices by one or more fields/columns.
+// It is often used with aggregate functions, like: count, max, mean, min, sum.
+//
+// Example:
+//
+//	var v []struct {
+//		CreatedAt time.Time `json:"created_at"`
+//		Count int `json:"count,omitempty"`
+//	}
+//
+//	client.ConfigItem.Query().
+//		GroupBy(configitem.FieldCreatedAt).
+//		Aggregate(ent.Count()).
+//		Scan(ctx, &v)
+//
+func (ciq *ConfigItemQuery) GroupBy(field string, fields ...string) *ConfigItemGroupBy {
+	group := &ConfigItemGroupBy{config: ciq.config}
+	group.fields = append([]string{field}, fields...)
+	group.path = func(ctx context.Context) (prev *sql.Selector, err error) {
+		if err := ciq.prepareQuery(ctx); err != nil {
+			return nil, err
+		}
+		return ciq.sqlQuery(ctx), nil
+	}
+	return group
+}
+
+// Select allows the selection one or more fields/columns for the given query,
+// instead of selecting all fields in the entity.
+//
+// Example:
+//
+//	var v []struct {
+//		CreatedAt time.Time `json:"created_at"`
+//	}
+//
+//	client.ConfigItem.Query().
+//		Select(configitem.FieldCreatedAt).
+//		Scan(ctx, &v)
+//
+func (ciq *ConfigItemQuery) Select(fields ...string) *ConfigItemSelect {
+	ciq.fields = append(ciq.fields, fields...)
+	return &ConfigItemSelect{ConfigItemQuery: ciq}
+}
+
+func (ciq *ConfigItemQuery) prepareQuery(ctx context.Context) error {
+	for _, f := range ciq.fields {
+		if !configitem.ValidColumn(f) {
+			return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+		}
+	}
+	if ciq.path != nil {
+		prev, err := ciq.path(ctx)
+		if err != nil {
+			return err
+		}
+		ciq.sql = prev
+	}
+	return nil
+}
+
+func (ciq *ConfigItemQuery) sqlAll(ctx context.Context) ([]*ConfigItem, error) {
+	var (
+		nodes = []*ConfigItem{}
+		_spec = ciq.querySpec()
+	)
+	_spec.ScanValues = func(columns []string) ([]interface{}, error) {
+		node := &ConfigItem{config: ciq.config}
+		nodes = append(nodes, node)
+		return node.scanValues(columns)
+	}
+	_spec.Assign = func(columns []string, values []interface{}) error {
+		if len(nodes) == 0 {
+			return fmt.Errorf("ent: Assign called without calling ScanValues")
+		}
+		node := nodes[len(nodes)-1]
+		return node.assignValues(columns, values)
+	}
+	if err := sqlgraph.QueryNodes(ctx, ciq.driver, _spec); err != nil {
+		return nil, err
+	}
+	if len(nodes) == 0 {
+		return nodes, nil
+	}
+	return nodes, nil
+}
+
+func (ciq *ConfigItemQuery) sqlCount(ctx context.Context) (int, error) {
+	_spec := ciq.querySpec()
+	_spec.Node.Columns = ciq.fields
+	if len(ciq.fields) > 0 {
+		_spec.Unique = ciq.unique != nil && *ciq.unique
+	}
+	return sqlgraph.CountNodes(ctx, ciq.driver, _spec)
+}
+
+func (ciq *ConfigItemQuery) sqlExist(ctx context.Context) (bool, error) {
+	n, err := ciq.sqlCount(ctx)
+	if err != nil {
+		return false, fmt.Errorf("ent: check existence: %w", err)
+	}
+	return n > 0, nil
+}
+
+func (ciq *ConfigItemQuery) querySpec() *sqlgraph.QuerySpec {
+	_spec := &sqlgraph.QuerySpec{
+		Node: &sqlgraph.NodeSpec{
+			Table:   configitem.Table,
+			Columns: configitem.Columns,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: configitem.FieldID,
+			},
+		},
+		From:   ciq.sql,
+		Unique: true,
+	}
+	if unique := ciq.unique; unique != nil {
+		_spec.Unique = *unique
+	}
+	if fields := ciq.fields; len(fields) > 0 {
+		_spec.Node.Columns = make([]string, 0, len(fields))
+		_spec.Node.Columns = append(_spec.Node.Columns, configitem.FieldID)
+		for i := range fields {
+			if fields[i] != configitem.FieldID {
+				_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
+			}
+		}
+	}
+	if ps := ciq.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if limit := ciq.limit; limit != nil {
+		_spec.Limit = *limit
+	}
+	if offset := ciq.offset; offset != nil {
+		_spec.Offset = *offset
+	}
+	if ps := ciq.order; len(ps) > 0 {
+		_spec.Order = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	return _spec
+}
+
+func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
+	builder := sql.Dialect(ciq.driver.Dialect())
+	t1 := builder.Table(configitem.Table)
+	columns := ciq.fields
+	if len(columns) == 0 {
+		columns = configitem.Columns
+	}
+	selector := builder.Select(t1.Columns(columns...)...).From(t1)
+	if ciq.sql != nil {
+		selector = ciq.sql
+		selector.Select(selector.Columns(columns...)...)
+	}
+	if ciq.unique != nil && *ciq.unique {
+		selector.Distinct()
+	}
+	for _, p := range ciq.predicates {
+		p(selector)
+	}
+	for _, p := range ciq.order {
+		p(selector)
+	}
+	if offset := ciq.offset; offset != nil {
+		// limit is mandatory for offset clause. We start
+		// with default value, and override it below if needed.
+		selector.Offset(*offset).Limit(math.MaxInt32)
+	}
+	if limit := ciq.limit; limit != nil {
+		selector.Limit(*limit)
+	}
+	return selector
+}
+
+// ConfigItemGroupBy is the group-by builder for ConfigItem entities.
+type ConfigItemGroupBy struct {
+	config
+	fields []string
+	fns    []AggregateFunc
+	// intermediate query (i.e. traversal path).
+	sql  *sql.Selector
+	path func(context.Context) (*sql.Selector, error)
+}
+
+// Aggregate adds the given aggregation functions to the group-by query.
+func (cigb *ConfigItemGroupBy) Aggregate(fns ...AggregateFunc) *ConfigItemGroupBy {
+	cigb.fns = append(cigb.fns, fns...)
+	return cigb
+}
+
+// Scan applies the group-by query and scans the result into the given value.
+func (cigb *ConfigItemGroupBy) Scan(ctx context.Context, v interface{}) error {
+	query, err := cigb.path(ctx)
+	if err != nil {
+		return err
+	}
+	cigb.sql = query
+	return cigb.sqlScan(ctx, v)
+}
+
+// ScanX is like Scan, but panics if an error occurs.
+func (cigb *ConfigItemGroupBy) ScanX(ctx context.Context, v interface{}) {
+	if err := cigb.Scan(ctx, v); err != nil {
+		panic(err)
+	}
+}
+
+// Strings returns list of strings from group-by.
+// It is only allowed when executing a group-by query with one field.
+func (cigb *ConfigItemGroupBy) Strings(ctx context.Context) ([]string, error) {
+	if len(cigb.fields) > 1 {
+		return nil, errors.New("ent: ConfigItemGroupBy.Strings is not achievable when grouping more than 1 field")
+	}
+	var v []string
+	if err := cigb.Scan(ctx, &v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// StringsX is like Strings, but panics if an error occurs.
+func (cigb *ConfigItemGroupBy) StringsX(ctx context.Context) []string {
+	v, err := cigb.Strings(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// String returns a single string from a group-by query.
+// It is only allowed when executing a group-by query with one field.
+func (cigb *ConfigItemGroupBy) String(ctx context.Context) (_ string, err error) {
+	var v []string
+	if v, err = cigb.Strings(ctx); err != nil {
+		return
+	}
+	switch len(v) {
+	case 1:
+		return v[0], nil
+	case 0:
+		err = &NotFoundError{configitem.Label}
+	default:
+		err = fmt.Errorf("ent: ConfigItemGroupBy.Strings returned %d results when one was expected", len(v))
+	}
+	return
+}
+
+// StringX is like String, but panics if an error occurs.
+func (cigb *ConfigItemGroupBy) StringX(ctx context.Context) string {
+	v, err := cigb.String(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Ints returns list of ints from group-by.
+// It is only allowed when executing a group-by query with one field.
+func (cigb *ConfigItemGroupBy) Ints(ctx context.Context) ([]int, error) {
+	if len(cigb.fields) > 1 {
+		return nil, errors.New("ent: ConfigItemGroupBy.Ints is not achievable when grouping more than 1 field")
+	}
+	var v []int
+	if err := cigb.Scan(ctx, &v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// IntsX is like Ints, but panics if an error occurs.
+func (cigb *ConfigItemGroupBy) IntsX(ctx context.Context) []int {
+	v, err := cigb.Ints(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Int returns a single int from a group-by query.
+// It is only allowed when executing a group-by query with one field.
+func (cigb *ConfigItemGroupBy) Int(ctx context.Context) (_ int, err error) {
+	var v []int
+	if v, err = cigb.Ints(ctx); err != nil {
+		return
+	}
+	switch len(v) {
+	case 1:
+		return v[0], nil
+	case 0:
+		err = &NotFoundError{configitem.Label}
+	default:
+		err = fmt.Errorf("ent: ConfigItemGroupBy.Ints returned %d results when one was expected", len(v))
+	}
+	return
+}
+
+// IntX is like Int, but panics if an error occurs.
+func (cigb *ConfigItemGroupBy) IntX(ctx context.Context) int {
+	v, err := cigb.Int(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Float64s returns list of float64s from group-by.
+// It is only allowed when executing a group-by query with one field.
+func (cigb *ConfigItemGroupBy) Float64s(ctx context.Context) ([]float64, error) {
+	if len(cigb.fields) > 1 {
+		return nil, errors.New("ent: ConfigItemGroupBy.Float64s is not achievable when grouping more than 1 field")
+	}
+	var v []float64
+	if err := cigb.Scan(ctx, &v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// Float64sX is like Float64s, but panics if an error occurs.
+func (cigb *ConfigItemGroupBy) Float64sX(ctx context.Context) []float64 {
+	v, err := cigb.Float64s(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Float64 returns a single float64 from a group-by query.
+// It is only allowed when executing a group-by query with one field.
+func (cigb *ConfigItemGroupBy) Float64(ctx context.Context) (_ float64, err error) {
+	var v []float64
+	if v, err = cigb.Float64s(ctx); err != nil {
+		return
+	}
+	switch len(v) {
+	case 1:
+		return v[0], nil
+	case 0:
+		err = &NotFoundError{configitem.Label}
+	default:
+		err = fmt.Errorf("ent: ConfigItemGroupBy.Float64s returned %d results when one was expected", len(v))
+	}
+	return
+}
+
+// Float64X is like Float64, but panics if an error occurs.
+func (cigb *ConfigItemGroupBy) Float64X(ctx context.Context) float64 {
+	v, err := cigb.Float64(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Bools returns list of bools from group-by.
+// It is only allowed when executing a group-by query with one field.
+func (cigb *ConfigItemGroupBy) Bools(ctx context.Context) ([]bool, error) {
+	if len(cigb.fields) > 1 {
+		return nil, errors.New("ent: ConfigItemGroupBy.Bools is not achievable when grouping more than 1 field")
+	}
+	var v []bool
+	if err := cigb.Scan(ctx, &v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// BoolsX is like Bools, but panics if an error occurs.
+func (cigb *ConfigItemGroupBy) BoolsX(ctx context.Context) []bool {
+	v, err := cigb.Bools(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Bool returns a single bool from a group-by query.
+// It is only allowed when executing a group-by query with one field.
+func (cigb *ConfigItemGroupBy) Bool(ctx context.Context) (_ bool, err error) {
+	var v []bool
+	if v, err = cigb.Bools(ctx); err != nil {
+		return
+	}
+	switch len(v) {
+	case 1:
+		return v[0], nil
+	case 0:
+		err = &NotFoundError{configitem.Label}
+	default:
+		err = fmt.Errorf("ent: ConfigItemGroupBy.Bools returned %d results when one was expected", len(v))
+	}
+	return
+}
+
+// BoolX is like Bool, but panics if an error occurs.
+func (cigb *ConfigItemGroupBy) BoolX(ctx context.Context) bool {
+	v, err := cigb.Bool(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+func (cigb *ConfigItemGroupBy) sqlScan(ctx context.Context, v interface{}) error {
+	for _, f := range cigb.fields {
+		if !configitem.ValidColumn(f) {
+			return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
+		}
+	}
+	selector := cigb.sqlQuery()
+	if err := selector.Err(); err != nil {
+		return err
+	}
+	rows := &sql.Rows{}
+	query, args := selector.Query()
+	if err := cigb.driver.Query(ctx, query, args, rows); err != nil {
+		return err
+	}
+	defer rows.Close()
+	return sql.ScanSlice(rows, v)
+}
+
+func (cigb *ConfigItemGroupBy) sqlQuery() *sql.Selector {
+	selector := cigb.sql.Select()
+	aggregation := make([]string, 0, len(cigb.fns))
+	for _, fn := range cigb.fns {
+		aggregation = append(aggregation, fn(selector))
+	}
+	// If no columns were selected in a custom aggregation function, the default
+	// selection is the fields used for "group-by", and the aggregation functions.
+	if len(selector.SelectedColumns()) == 0 {
+		columns := make([]string, 0, len(cigb.fields)+len(cigb.fns))
+		for _, f := range cigb.fields {
+			columns = append(columns, selector.C(f))
+		}
+		columns = append(columns, aggregation...)
+		selector.Select(columns...)
+	}
+	return selector.GroupBy(selector.Columns(cigb.fields...)...)
+}
+
+// ConfigItemSelect is the builder for selecting fields of ConfigItem entities.
+type ConfigItemSelect struct {
+	*ConfigItemQuery
+	// intermediate query (i.e. traversal path).
+	sql *sql.Selector
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (cis *ConfigItemSelect) Scan(ctx context.Context, v interface{}) error {
+	if err := cis.prepareQuery(ctx); err != nil {
+		return err
+	}
+	cis.sql = cis.ConfigItemQuery.sqlQuery(ctx)
+	return cis.sqlScan(ctx, v)
+}
+
+// ScanX is like Scan, but panics if an error occurs.
+func (cis *ConfigItemSelect) ScanX(ctx context.Context, v interface{}) {
+	if err := cis.Scan(ctx, v); err != nil {
+		panic(err)
+	}
+}
+
+// Strings returns list of strings from a selector. It is only allowed when selecting one field.
+func (cis *ConfigItemSelect) Strings(ctx context.Context) ([]string, error) {
+	if len(cis.fields) > 1 {
+		return nil, errors.New("ent: ConfigItemSelect.Strings is not achievable when selecting more than 1 field")
+	}
+	var v []string
+	if err := cis.Scan(ctx, &v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// StringsX is like Strings, but panics if an error occurs.
+func (cis *ConfigItemSelect) StringsX(ctx context.Context) []string {
+	v, err := cis.Strings(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// String returns a single string from a selector. It is only allowed when selecting one field.
+func (cis *ConfigItemSelect) String(ctx context.Context) (_ string, err error) {
+	var v []string
+	if v, err = cis.Strings(ctx); err != nil {
+		return
+	}
+	switch len(v) {
+	case 1:
+		return v[0], nil
+	case 0:
+		err = &NotFoundError{configitem.Label}
+	default:
+		err = fmt.Errorf("ent: ConfigItemSelect.Strings returned %d results when one was expected", len(v))
+	}
+	return
+}
+
+// StringX is like String, but panics if an error occurs.
+func (cis *ConfigItemSelect) StringX(ctx context.Context) string {
+	v, err := cis.String(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Ints returns list of ints from a selector. It is only allowed when selecting one field.
+func (cis *ConfigItemSelect) Ints(ctx context.Context) ([]int, error) {
+	if len(cis.fields) > 1 {
+		return nil, errors.New("ent: ConfigItemSelect.Ints is not achievable when selecting more than 1 field")
+	}
+	var v []int
+	if err := cis.Scan(ctx, &v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// IntsX is like Ints, but panics if an error occurs.
+func (cis *ConfigItemSelect) IntsX(ctx context.Context) []int {
+	v, err := cis.Ints(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Int returns a single int from a selector. It is only allowed when selecting one field.
+func (cis *ConfigItemSelect) Int(ctx context.Context) (_ int, err error) {
+	var v []int
+	if v, err = cis.Ints(ctx); err != nil {
+		return
+	}
+	switch len(v) {
+	case 1:
+		return v[0], nil
+	case 0:
+		err = &NotFoundError{configitem.Label}
+	default:
+		err = fmt.Errorf("ent: ConfigItemSelect.Ints returned %d results when one was expected", len(v))
+	}
+	return
+}
+
+// IntX is like Int, but panics if an error occurs.
+func (cis *ConfigItemSelect) IntX(ctx context.Context) int {
+	v, err := cis.Int(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Float64s returns list of float64s from a selector. It is only allowed when selecting one field.
+func (cis *ConfigItemSelect) Float64s(ctx context.Context) ([]float64, error) {
+	if len(cis.fields) > 1 {
+		return nil, errors.New("ent: ConfigItemSelect.Float64s is not achievable when selecting more than 1 field")
+	}
+	var v []float64
+	if err := cis.Scan(ctx, &v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// Float64sX is like Float64s, but panics if an error occurs.
+func (cis *ConfigItemSelect) Float64sX(ctx context.Context) []float64 {
+	v, err := cis.Float64s(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Float64 returns a single float64 from a selector. It is only allowed when selecting one field.
+func (cis *ConfigItemSelect) Float64(ctx context.Context) (_ float64, err error) {
+	var v []float64
+	if v, err = cis.Float64s(ctx); err != nil {
+		return
+	}
+	switch len(v) {
+	case 1:
+		return v[0], nil
+	case 0:
+		err = &NotFoundError{configitem.Label}
+	default:
+		err = fmt.Errorf("ent: ConfigItemSelect.Float64s returned %d results when one was expected", len(v))
+	}
+	return
+}
+
+// Float64X is like Float64, but panics if an error occurs.
+func (cis *ConfigItemSelect) Float64X(ctx context.Context) float64 {
+	v, err := cis.Float64(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Bools returns list of bools from a selector. It is only allowed when selecting one field.
+func (cis *ConfigItemSelect) Bools(ctx context.Context) ([]bool, error) {
+	if len(cis.fields) > 1 {
+		return nil, errors.New("ent: ConfigItemSelect.Bools is not achievable when selecting more than 1 field")
+	}
+	var v []bool
+	if err := cis.Scan(ctx, &v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// BoolsX is like Bools, but panics if an error occurs.
+func (cis *ConfigItemSelect) BoolsX(ctx context.Context) []bool {
+	v, err := cis.Bools(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Bool returns a single bool from a selector. It is only allowed when selecting one field.
+func (cis *ConfigItemSelect) Bool(ctx context.Context) (_ bool, err error) {
+	var v []bool
+	if v, err = cis.Bools(ctx); err != nil {
+		return
+	}
+	switch len(v) {
+	case 1:
+		return v[0], nil
+	case 0:
+		err = &NotFoundError{configitem.Label}
+	default:
+		err = fmt.Errorf("ent: ConfigItemSelect.Bools returned %d results when one was expected", len(v))
+	}
+	return
+}
+
+// BoolX is like Bool, but panics if an error occurs.
+func (cis *ConfigItemSelect) BoolX(ctx context.Context) bool {
+	v, err := cis.Bool(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+func (cis *ConfigItemSelect) sqlScan(ctx context.Context, v interface{}) error {
+	rows := &sql.Rows{}
+	query, args := cis.sql.Query()
+	if err := cis.driver.Query(ctx, query, args, rows); err != nil {
+		return err
+	}
+	defer rows.Close()
+	return sql.ScanSlice(rows, v)
+}

+ 418 - 0
pkg/database/ent/configitem_update.go

@@ -0,0 +1,418 @@
+// Code generated by entc, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate"
+)
+
+// ConfigItemUpdate is the builder for updating ConfigItem entities.
+type ConfigItemUpdate struct {
+	config
+	hooks    []Hook
+	mutation *ConfigItemMutation
+}
+
+// Where appends a list predicates to the ConfigItemUpdate builder.
+func (ciu *ConfigItemUpdate) Where(ps ...predicate.ConfigItem) *ConfigItemUpdate {
+	ciu.mutation.Where(ps...)
+	return ciu
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (ciu *ConfigItemUpdate) SetCreatedAt(t time.Time) *ConfigItemUpdate {
+	ciu.mutation.SetCreatedAt(t)
+	return ciu
+}
+
+// ClearCreatedAt clears the value of the "created_at" field.
+func (ciu *ConfigItemUpdate) ClearCreatedAt() *ConfigItemUpdate {
+	ciu.mutation.ClearCreatedAt()
+	return ciu
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (ciu *ConfigItemUpdate) SetUpdatedAt(t time.Time) *ConfigItemUpdate {
+	ciu.mutation.SetUpdatedAt(t)
+	return ciu
+}
+
+// ClearUpdatedAt clears the value of the "updated_at" field.
+func (ciu *ConfigItemUpdate) ClearUpdatedAt() *ConfigItemUpdate {
+	ciu.mutation.ClearUpdatedAt()
+	return ciu
+}
+
+// SetName sets the "name" field.
+func (ciu *ConfigItemUpdate) SetName(s string) *ConfigItemUpdate {
+	ciu.mutation.SetName(s)
+	return ciu
+}
+
+// SetValue sets the "value" field.
+func (ciu *ConfigItemUpdate) SetValue(s string) *ConfigItemUpdate {
+	ciu.mutation.SetValue(s)
+	return ciu
+}
+
+// Mutation returns the ConfigItemMutation object of the builder.
+func (ciu *ConfigItemUpdate) Mutation() *ConfigItemMutation {
+	return ciu.mutation
+}
+
+// Save executes the query and returns the number of nodes affected by the update operation.
+func (ciu *ConfigItemUpdate) Save(ctx context.Context) (int, error) {
+	var (
+		err      error
+		affected int
+	)
+	ciu.defaults()
+	if len(ciu.hooks) == 0 {
+		affected, err = ciu.sqlSave(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*ConfigItemMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			ciu.mutation = mutation
+			affected, err = ciu.sqlSave(ctx)
+			mutation.done = true
+			return affected, err
+		})
+		for i := len(ciu.hooks) - 1; i >= 0; i-- {
+			if ciu.hooks[i] == nil {
+				return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = ciu.hooks[i](mut)
+		}
+		if _, err := mut.Mutate(ctx, ciu.mutation); err != nil {
+			return 0, err
+		}
+	}
+	return affected, err
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (ciu *ConfigItemUpdate) SaveX(ctx context.Context) int {
+	affected, err := ciu.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return affected
+}
+
+// Exec executes the query.
+func (ciu *ConfigItemUpdate) Exec(ctx context.Context) error {
+	_, err := ciu.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (ciu *ConfigItemUpdate) ExecX(ctx context.Context) {
+	if err := ciu.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+// defaults sets the default values of the builder before save.
+func (ciu *ConfigItemUpdate) defaults() {
+	if _, ok := ciu.mutation.CreatedAt(); !ok && !ciu.mutation.CreatedAtCleared() {
+		v := configitem.UpdateDefaultCreatedAt()
+		ciu.mutation.SetCreatedAt(v)
+	}
+	if _, ok := ciu.mutation.UpdatedAt(); !ok && !ciu.mutation.UpdatedAtCleared() {
+		v := configitem.UpdateDefaultUpdatedAt()
+		ciu.mutation.SetUpdatedAt(v)
+	}
+}
+
+func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
+	_spec := &sqlgraph.UpdateSpec{
+		Node: &sqlgraph.NodeSpec{
+			Table:   configitem.Table,
+			Columns: configitem.Columns,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: configitem.FieldID,
+			},
+		},
+	}
+	if ps := ciu.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if value, ok := ciu.mutation.CreatedAt(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeTime,
+			Value:  value,
+			Column: configitem.FieldCreatedAt,
+		})
+	}
+	if ciu.mutation.CreatedAtCleared() {
+		_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
+			Type:   field.TypeTime,
+			Column: configitem.FieldCreatedAt,
+		})
+	}
+	if value, ok := ciu.mutation.UpdatedAt(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeTime,
+			Value:  value,
+			Column: configitem.FieldUpdatedAt,
+		})
+	}
+	if ciu.mutation.UpdatedAtCleared() {
+		_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
+			Type:   field.TypeTime,
+			Column: configitem.FieldUpdatedAt,
+		})
+	}
+	if value, ok := ciu.mutation.Name(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: configitem.FieldName,
+		})
+	}
+	if value, ok := ciu.mutation.Value(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: configitem.FieldValue,
+		})
+	}
+	if n, err = sqlgraph.UpdateNodes(ctx, ciu.driver, _spec); err != nil {
+		if _, ok := err.(*sqlgraph.NotFoundError); ok {
+			err = &NotFoundError{configitem.Label}
+		} else if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{err.Error(), err}
+		}
+		return 0, err
+	}
+	return n, nil
+}
+
+// ConfigItemUpdateOne is the builder for updating a single ConfigItem entity.
+type ConfigItemUpdateOne struct {
+	config
+	fields   []string
+	hooks    []Hook
+	mutation *ConfigItemMutation
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (ciuo *ConfigItemUpdateOne) SetCreatedAt(t time.Time) *ConfigItemUpdateOne {
+	ciuo.mutation.SetCreatedAt(t)
+	return ciuo
+}
+
+// ClearCreatedAt clears the value of the "created_at" field.
+func (ciuo *ConfigItemUpdateOne) ClearCreatedAt() *ConfigItemUpdateOne {
+	ciuo.mutation.ClearCreatedAt()
+	return ciuo
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (ciuo *ConfigItemUpdateOne) SetUpdatedAt(t time.Time) *ConfigItemUpdateOne {
+	ciuo.mutation.SetUpdatedAt(t)
+	return ciuo
+}
+
+// ClearUpdatedAt clears the value of the "updated_at" field.
+func (ciuo *ConfigItemUpdateOne) ClearUpdatedAt() *ConfigItemUpdateOne {
+	ciuo.mutation.ClearUpdatedAt()
+	return ciuo
+}
+
+// SetName sets the "name" field.
+func (ciuo *ConfigItemUpdateOne) SetName(s string) *ConfigItemUpdateOne {
+	ciuo.mutation.SetName(s)
+	return ciuo
+}
+
+// SetValue sets the "value" field.
+func (ciuo *ConfigItemUpdateOne) SetValue(s string) *ConfigItemUpdateOne {
+	ciuo.mutation.SetValue(s)
+	return ciuo
+}
+
+// Mutation returns the ConfigItemMutation object of the builder.
+func (ciuo *ConfigItemUpdateOne) Mutation() *ConfigItemMutation {
+	return ciuo.mutation
+}
+
+// Select allows selecting one or more fields (columns) of the returned entity.
+// The default is selecting all fields defined in the entity schema.
+func (ciuo *ConfigItemUpdateOne) Select(field string, fields ...string) *ConfigItemUpdateOne {
+	ciuo.fields = append([]string{field}, fields...)
+	return ciuo
+}
+
+// Save executes the query and returns the updated ConfigItem entity.
+func (ciuo *ConfigItemUpdateOne) Save(ctx context.Context) (*ConfigItem, error) {
+	var (
+		err  error
+		node *ConfigItem
+	)
+	ciuo.defaults()
+	if len(ciuo.hooks) == 0 {
+		node, err = ciuo.sqlSave(ctx)
+	} else {
+		var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+			mutation, ok := m.(*ConfigItemMutation)
+			if !ok {
+				return nil, fmt.Errorf("unexpected mutation type %T", m)
+			}
+			ciuo.mutation = mutation
+			node, err = ciuo.sqlSave(ctx)
+			mutation.done = true
+			return node, err
+		})
+		for i := len(ciuo.hooks) - 1; i >= 0; i-- {
+			if ciuo.hooks[i] == nil {
+				return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+			}
+			mut = ciuo.hooks[i](mut)
+		}
+		if _, err := mut.Mutate(ctx, ciuo.mutation); err != nil {
+			return nil, err
+		}
+	}
+	return node, err
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (ciuo *ConfigItemUpdateOne) SaveX(ctx context.Context) *ConfigItem {
+	node, err := ciuo.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return node
+}
+
+// Exec executes the query on the entity.
+func (ciuo *ConfigItemUpdateOne) Exec(ctx context.Context) error {
+	_, err := ciuo.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (ciuo *ConfigItemUpdateOne) ExecX(ctx context.Context) {
+	if err := ciuo.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+// defaults sets the default values of the builder before save.
+func (ciuo *ConfigItemUpdateOne) defaults() {
+	if _, ok := ciuo.mutation.CreatedAt(); !ok && !ciuo.mutation.CreatedAtCleared() {
+		v := configitem.UpdateDefaultCreatedAt()
+		ciuo.mutation.SetCreatedAt(v)
+	}
+	if _, ok := ciuo.mutation.UpdatedAt(); !ok && !ciuo.mutation.UpdatedAtCleared() {
+		v := configitem.UpdateDefaultUpdatedAt()
+		ciuo.mutation.SetUpdatedAt(v)
+	}
+}
+
+func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem, err error) {
+	_spec := &sqlgraph.UpdateSpec{
+		Node: &sqlgraph.NodeSpec{
+			Table:   configitem.Table,
+			Columns: configitem.Columns,
+			ID: &sqlgraph.FieldSpec{
+				Type:   field.TypeInt,
+				Column: configitem.FieldID,
+			},
+		},
+	}
+	id, ok := ciuo.mutation.ID()
+	if !ok {
+		return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ConfigItem.id" for update`)}
+	}
+	_spec.Node.ID.Value = id
+	if fields := ciuo.fields; len(fields) > 0 {
+		_spec.Node.Columns = make([]string, 0, len(fields))
+		_spec.Node.Columns = append(_spec.Node.Columns, configitem.FieldID)
+		for _, f := range fields {
+			if !configitem.ValidColumn(f) {
+				return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+			}
+			if f != configitem.FieldID {
+				_spec.Node.Columns = append(_spec.Node.Columns, f)
+			}
+		}
+	}
+	if ps := ciuo.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if value, ok := ciuo.mutation.CreatedAt(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeTime,
+			Value:  value,
+			Column: configitem.FieldCreatedAt,
+		})
+	}
+	if ciuo.mutation.CreatedAtCleared() {
+		_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
+			Type:   field.TypeTime,
+			Column: configitem.FieldCreatedAt,
+		})
+	}
+	if value, ok := ciuo.mutation.UpdatedAt(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeTime,
+			Value:  value,
+			Column: configitem.FieldUpdatedAt,
+		})
+	}
+	if ciuo.mutation.UpdatedAtCleared() {
+		_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
+			Type:   field.TypeTime,
+			Column: configitem.FieldUpdatedAt,
+		})
+	}
+	if value, ok := ciuo.mutation.Name(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: configitem.FieldName,
+		})
+	}
+	if value, ok := ciuo.mutation.Value(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: configitem.FieldValue,
+		})
+	}
+	_node = &ConfigItem{config: ciuo.config}
+	_spec.Assign = _node.assignValues
+	_spec.ScanValues = _node.scanValues
+	if err = sqlgraph.UpdateNode(ctx, ciuo.driver, _spec); err != nil {
+		if _, ok := err.(*sqlgraph.NotFoundError); ok {
+			err = &NotFoundError{configitem.Label}
+		} else if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{err.Error(), err}
+		}
+		return nil, err
+	}
+	return _node, nil
+}

+ 11 - 1
pkg/database/ent/decision.go

@@ -45,6 +45,8 @@ type Decision struct {
 	Origin string `json:"origin,omitempty"`
 	Origin string `json:"origin,omitempty"`
 	// Simulated holds the value of the "simulated" field.
 	// Simulated holds the value of the "simulated" field.
 	Simulated bool `json:"simulated,omitempty"`
 	Simulated bool `json:"simulated,omitempty"`
+	// UUID holds the value of the "uuid" field.
+	UUID string `json:"uuid,omitempty"`
 	// Edges holds the relations/edges for other nodes in the graph.
 	// Edges holds the relations/edges for other nodes in the graph.
 	// The values are being populated by the DecisionQuery when eager-loading is set.
 	// The values are being populated by the DecisionQuery when eager-loading is set.
 	Edges           DecisionEdges `json:"edges"`
 	Edges           DecisionEdges `json:"edges"`
@@ -82,7 +84,7 @@ func (*Decision) scanValues(columns []string) ([]any, error) {
 			values[i] = new(sql.NullBool)
 			values[i] = new(sql.NullBool)
 		case decision.FieldID, decision.FieldStartIP, decision.FieldEndIP, decision.FieldStartSuffix, decision.FieldEndSuffix, decision.FieldIPSize:
 		case decision.FieldID, decision.FieldStartIP, decision.FieldEndIP, decision.FieldStartSuffix, decision.FieldEndSuffix, decision.FieldIPSize:
 			values[i] = new(sql.NullInt64)
 			values[i] = new(sql.NullInt64)
-		case decision.FieldScenario, decision.FieldType, decision.FieldScope, decision.FieldValue, decision.FieldOrigin:
+		case decision.FieldScenario, decision.FieldType, decision.FieldScope, decision.FieldValue, decision.FieldOrigin, decision.FieldUUID:
 			values[i] = new(sql.NullString)
 			values[i] = new(sql.NullString)
 		case decision.FieldCreatedAt, decision.FieldUpdatedAt, decision.FieldUntil:
 		case decision.FieldCreatedAt, decision.FieldUpdatedAt, decision.FieldUntil:
 			values[i] = new(sql.NullTime)
 			values[i] = new(sql.NullTime)
@@ -196,6 +198,12 @@ func (d *Decision) assignValues(columns []string, values []any) error {
 			} else if value.Valid {
 			} else if value.Valid {
 				d.Simulated = value.Bool
 				d.Simulated = value.Bool
 			}
 			}
+		case decision.FieldUUID:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field uuid", values[i])
+			} else if value.Valid {
+				d.UUID = value.String
+			}
 		case decision.ForeignKeys[0]:
 		case decision.ForeignKeys[0]:
 			if value, ok := values[i].(*sql.NullInt64); !ok {
 			if value, ok := values[i].(*sql.NullInt64); !ok {
 				return fmt.Errorf("unexpected type %T for edge-field alert_decisions", value)
 				return fmt.Errorf("unexpected type %T for edge-field alert_decisions", value)
@@ -283,6 +291,8 @@ func (d *Decision) String() string {
 	builder.WriteString(", ")
 	builder.WriteString(", ")
 	builder.WriteString("simulated=")
 	builder.WriteString("simulated=")
 	builder.WriteString(fmt.Sprintf("%v", d.Simulated))
 	builder.WriteString(fmt.Sprintf("%v", d.Simulated))
+	builder.WriteString(", uuid=")
+	builder.WriteString(d.UUID)
 	builder.WriteByte(')')
 	builder.WriteByte(')')
 	return builder.String()
 	return builder.String()
 }
 }

+ 3 - 0
pkg/database/ent/decision/decision.go

@@ -39,6 +39,8 @@ const (
 	FieldOrigin = "origin"
 	FieldOrigin = "origin"
 	// FieldSimulated holds the string denoting the simulated field in the database.
 	// FieldSimulated holds the string denoting the simulated field in the database.
 	FieldSimulated = "simulated"
 	FieldSimulated = "simulated"
+	// FieldUUID holds the string denoting the uuid field in the database.
+	FieldUUID = "uuid"
 	// EdgeOwner holds the string denoting the owner edge name in mutations.
 	// EdgeOwner holds the string denoting the owner edge name in mutations.
 	EdgeOwner = "owner"
 	EdgeOwner = "owner"
 	// Table holds the table name of the decision in the database.
 	// Table holds the table name of the decision in the database.
@@ -69,6 +71,7 @@ var Columns = []string{
 	FieldValue,
 	FieldValue,
 	FieldOrigin,
 	FieldOrigin,
 	FieldSimulated,
 	FieldSimulated,
+	FieldUUID,
 }
 }
 
 
 // ForeignKeys holds the SQL foreign-keys that are owned by the "decisions"
 // ForeignKeys holds the SQL foreign-keys that are owned by the "decisions"

+ 132 - 0
pkg/database/ent/decision/where.go

@@ -179,6 +179,13 @@ func Simulated(v bool) predicate.Decision {
 	})
 	})
 }
 }
 
 
+// UUID applies equality check predicate on the "uuid" field. It's identical to UUIDEQ.
+func UUID(v string) predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldUUID), v))
+	})
+}
+
 // CreatedAtEQ applies the EQ predicate on the "created_at" field.
 // CreatedAtEQ applies the EQ predicate on the "created_at" field.
 func CreatedAtEQ(v time.Time) predicate.Decision {
 func CreatedAtEQ(v time.Time) predicate.Decision {
 	return predicate.Decision(func(s *sql.Selector) {
 	return predicate.Decision(func(s *sql.Selector) {
@@ -1312,6 +1319,131 @@ func SimulatedNEQ(v bool) predicate.Decision {
 	})
 	})
 }
 }
 
 
+// UUIDEQ applies the EQ predicate on the "uuid" field.
+func UUIDEQ(v string) predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.EQ(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDNEQ applies the NEQ predicate on the "uuid" field.
+func UUIDNEQ(v string) predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.NEQ(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDIn applies the In predicate on the "uuid" field.
+func UUIDIn(vs ...string) predicate.Decision {
+	v := make([]interface{}, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.Decision(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(v) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		s.Where(sql.In(s.C(FieldUUID), v...))
+	})
+}
+
+// UUIDNotIn applies the NotIn predicate on the "uuid" field.
+func UUIDNotIn(vs ...string) predicate.Decision {
+	v := make([]interface{}, len(vs))
+	for i := range v {
+		v[i] = vs[i]
+	}
+	return predicate.Decision(func(s *sql.Selector) {
+		// if not arguments were provided, append the FALSE constants,
+		// since we can't apply "IN ()". This will make this predicate falsy.
+		if len(v) == 0 {
+			s.Where(sql.False())
+			return
+		}
+		s.Where(sql.NotIn(s.C(FieldUUID), v...))
+	})
+}
+
+// UUIDGT applies the GT predicate on the "uuid" field.
+func UUIDGT(v string) predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.GT(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDGTE applies the GTE predicate on the "uuid" field.
+func UUIDGTE(v string) predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.GTE(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDLT applies the LT predicate on the "uuid" field.
+func UUIDLT(v string) predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.LT(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDLTE applies the LTE predicate on the "uuid" field.
+func UUIDLTE(v string) predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.LTE(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDContains applies the Contains predicate on the "uuid" field.
+func UUIDContains(v string) predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.Contains(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDHasPrefix applies the HasPrefix predicate on the "uuid" field.
+func UUIDHasPrefix(v string) predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.HasPrefix(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDHasSuffix applies the HasSuffix predicate on the "uuid" field.
+func UUIDHasSuffix(v string) predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.HasSuffix(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDIsNil applies the IsNil predicate on the "uuid" field.
+func UUIDIsNil() predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.IsNull(s.C(FieldUUID)))
+	})
+}
+
+// UUIDNotNil applies the NotNil predicate on the "uuid" field.
+func UUIDNotNil() predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.NotNull(s.C(FieldUUID)))
+	})
+}
+
+// UUIDEqualFold applies the EqualFold predicate on the "uuid" field.
+func UUIDEqualFold(v string) predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.EqualFold(s.C(FieldUUID), v))
+	})
+}
+
+// UUIDContainsFold applies the ContainsFold predicate on the "uuid" field.
+func UUIDContainsFold(v string) predicate.Decision {
+	return predicate.Decision(func(s *sql.Selector) {
+		s.Where(sql.ContainsFold(s.C(FieldUUID), v))
+	})
+}
+
 // HasOwner applies the HasEdge predicate on the "owner" edge.
 // HasOwner applies the HasEdge predicate on the "owner" edge.
 func HasOwner() predicate.Decision {
 func HasOwner() predicate.Decision {
 	return predicate.Decision(func(s *sql.Selector) {
 	return predicate.Decision(func(s *sql.Selector) {

+ 22 - 0
pkg/database/ent/decision_create.go

@@ -177,6 +177,20 @@ func (dc *DecisionCreate) SetNillableSimulated(b *bool) *DecisionCreate {
 	return dc
 	return dc
 }
 }
 
 
+// SetUUID sets the "uuid" field.
+func (dc *DecisionCreate) SetUUID(s string) *DecisionCreate {
+	dc.mutation.SetUUID(s)
+	return dc
+}
+
+// SetNillableUUID sets the "uuid" field if the given value is not nil.
+func (dc *DecisionCreate) SetNillableUUID(s *string) *DecisionCreate {
+	if s != nil {
+		dc.SetUUID(*s)
+	}
+	return dc
+}
+
 // SetOwnerID sets the "owner" edge to the Alert entity by ID.
 // SetOwnerID sets the "owner" edge to the Alert entity by ID.
 func (dc *DecisionCreate) SetOwnerID(id int) *DecisionCreate {
 func (dc *DecisionCreate) SetOwnerID(id int) *DecisionCreate {
 	dc.mutation.SetOwnerID(id)
 	dc.mutation.SetOwnerID(id)
@@ -446,6 +460,14 @@ func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) {
 		})
 		})
 		_node.Simulated = value
 		_node.Simulated = value
 	}
 	}
+	if value, ok := dc.mutation.UUID(); ok {
+		_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: decision.FieldUUID,
+		})
+		_node.UUID = value
+	}
 	if nodes := dc.mutation.OwnerIDs(); len(nodes) > 0 {
 	if nodes := dc.mutation.OwnerIDs(); len(nodes) > 0 {
 		edge := &sqlgraph.EdgeSpec{
 		edge := &sqlgraph.EdgeSpec{
 			Rel:     sqlgraph.M2O,
 			Rel:     sqlgraph.M2O,

+ 66 - 0
pkg/database/ent/decision_update.go

@@ -252,6 +252,26 @@ func (du *DecisionUpdate) SetNillableSimulated(b *bool) *DecisionUpdate {
 	return du
 	return du
 }
 }
 
 
+// SetUUID sets the "uuid" field.
+func (du *DecisionUpdate) SetUUID(s string) *DecisionUpdate {
+	du.mutation.SetUUID(s)
+	return du
+}
+
+// SetNillableUUID sets the "uuid" field if the given value is not nil.
+func (du *DecisionUpdate) SetNillableUUID(s *string) *DecisionUpdate {
+	if s != nil {
+		du.SetUUID(*s)
+	}
+	return du
+}
+
+// ClearUUID clears the value of the "uuid" field.
+func (du *DecisionUpdate) ClearUUID() *DecisionUpdate {
+	du.mutation.ClearUUID()
+	return du
+}
+
 // SetOwnerID sets the "owner" edge to the Alert entity by ID.
 // SetOwnerID sets the "owner" edge to the Alert entity by ID.
 func (du *DecisionUpdate) SetOwnerID(id int) *DecisionUpdate {
 func (du *DecisionUpdate) SetOwnerID(id int) *DecisionUpdate {
 	du.mutation.SetOwnerID(id)
 	du.mutation.SetOwnerID(id)
@@ -548,6 +568,19 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) {
 			Column: decision.FieldSimulated,
 			Column: decision.FieldSimulated,
 		})
 		})
 	}
 	}
+	if value, ok := du.mutation.UUID(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: decision.FieldUUID,
+		})
+	}
+	if du.mutation.UUIDCleared() {
+		_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Column: decision.FieldUUID,
+		})
+	}
 	if du.mutation.OwnerCleared() {
 	if du.mutation.OwnerCleared() {
 		edge := &sqlgraph.EdgeSpec{
 		edge := &sqlgraph.EdgeSpec{
 			Rel:     sqlgraph.M2O,
 			Rel:     sqlgraph.M2O,
@@ -825,6 +858,26 @@ func (duo *DecisionUpdateOne) SetNillableSimulated(b *bool) *DecisionUpdateOne {
 	return duo
 	return duo
 }
 }
 
 
+// SetUUID sets the "uuid" field.
+func (duo *DecisionUpdateOne) SetUUID(s string) *DecisionUpdateOne {
+	duo.mutation.SetUUID(s)
+	return duo
+}
+
+// SetNillableUUID sets the "uuid" field if the given value is not nil.
+func (duo *DecisionUpdateOne) SetNillableUUID(s *string) *DecisionUpdateOne {
+	if s != nil {
+		duo.SetUUID(*s)
+	}
+	return duo
+}
+
+// ClearUUID clears the value of the "uuid" field.
+func (duo *DecisionUpdateOne) ClearUUID() *DecisionUpdateOne {
+	duo.mutation.ClearUUID()
+	return duo
+}
+
 // SetOwnerID sets the "owner" edge to the Alert entity by ID.
 // SetOwnerID sets the "owner" edge to the Alert entity by ID.
 func (duo *DecisionUpdateOne) SetOwnerID(id int) *DecisionUpdateOne {
 func (duo *DecisionUpdateOne) SetOwnerID(id int) *DecisionUpdateOne {
 	duo.mutation.SetOwnerID(id)
 	duo.mutation.SetOwnerID(id)
@@ -1151,6 +1204,19 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err
 			Column: decision.FieldSimulated,
 			Column: decision.FieldSimulated,
 		})
 		})
 	}
 	}
+	if value, ok := duo.mutation.UUID(); ok {
+		_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Value:  value,
+			Column: decision.FieldUUID,
+		})
+	}
+	if duo.mutation.UUIDCleared() {
+		_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
+			Type:   field.TypeString,
+			Column: decision.FieldUUID,
+		})
+	}
 	if duo.mutation.OwnerCleared() {
 	if duo.mutation.OwnerCleared() {
 		edge := &sqlgraph.EdgeSpec{
 		edge := &sqlgraph.EdgeSpec{
 			Rel:     sqlgraph.M2O,
 			Rel:     sqlgraph.M2O,

+ 8 - 6
pkg/database/ent/ent.go

@@ -12,6 +12,7 @@ import (
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"entgo.io/ent/dialect/sql/sqlgraph"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
@@ -36,12 +37,13 @@ type OrderFunc func(*sql.Selector)
 // columnChecker returns a function indicates if the column exists in the given column.
 // columnChecker returns a function indicates if the column exists in the given column.
 func columnChecker(table string) func(string) error {
 func columnChecker(table string) func(string) error {
 	checks := map[string]func(string) bool{
 	checks := map[string]func(string) bool{
-		alert.Table:    alert.ValidColumn,
-		bouncer.Table:  bouncer.ValidColumn,
-		decision.Table: decision.ValidColumn,
-		event.Table:    event.ValidColumn,
-		machine.Table:  machine.ValidColumn,
-		meta.Table:     meta.ValidColumn,
+		alert.Table:      alert.ValidColumn,
+		bouncer.Table:    bouncer.ValidColumn,
+		configitem.Table: configitem.ValidColumn,
+		decision.Table:   decision.ValidColumn,
+		event.Table:      event.ValidColumn,
+		machine.Table:    machine.ValidColumn,
+		meta.Table:       meta.ValidColumn,
 	}
 	}
 	check, ok := checks[table]
 	check, ok := checks[table]
 	if !ok {
 	if !ok {

+ 13 - 0
pkg/database/ent/hook/hook.go

@@ -35,6 +35,19 @@ func (f BouncerFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, err
 	return f(ctx, mv)
 	return f(ctx, mv)
 }
 }
 
 
+// The ConfigItemFunc type is an adapter to allow the use of ordinary
+// function as ConfigItem mutator.
+type ConfigItemFunc func(context.Context, *ent.ConfigItemMutation) (ent.Value, error)
+
+// Mutate calls f(ctx, m).
+func (f ConfigItemFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
+	mv, ok := m.(*ent.ConfigItemMutation)
+	if !ok {
+		return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ConfigItemMutation", m)
+	}
+	return f(ctx, mv)
+}
+
 // The DecisionFunc type is an adapter to allow the use of ordinary
 // The DecisionFunc type is an adapter to allow the use of ordinary
 // function as Decision mutator.
 // function as Decision mutator.
 type DecisionFunc func(context.Context, *ent.DecisionMutation) (ent.Value, error)
 type DecisionFunc func(context.Context, *ent.DecisionMutation) (ent.Value, error)

+ 19 - 2
pkg/database/ent/migrate/schema.go

@@ -33,6 +33,7 @@ var (
 		{Name: "scenario_version", Type: field.TypeString, Nullable: true},
 		{Name: "scenario_version", Type: field.TypeString, Nullable: true},
 		{Name: "scenario_hash", Type: field.TypeString, Nullable: true},
 		{Name: "scenario_hash", Type: field.TypeString, Nullable: true},
 		{Name: "simulated", Type: field.TypeBool, Default: false},
 		{Name: "simulated", Type: field.TypeBool, Default: false},
+		{Name: "uuid", Type: field.TypeString, Nullable: true},
 		{Name: "machine_alerts", Type: field.TypeInt, Nullable: true},
 		{Name: "machine_alerts", Type: field.TypeInt, Nullable: true},
 	}
 	}
 	// AlertsTable holds the schema information for the "alerts" table.
 	// AlertsTable holds the schema information for the "alerts" table.
@@ -43,7 +44,7 @@ var (
 		ForeignKeys: []*schema.ForeignKey{
 		ForeignKeys: []*schema.ForeignKey{
 			{
 			{
 				Symbol:     "alerts_machines_alerts",
 				Symbol:     "alerts_machines_alerts",
-				Columns:    []*schema.Column{AlertsColumns[23]},
+				Columns:    []*schema.Column{AlertsColumns[24]},
 				RefColumns: []*schema.Column{MachinesColumns[0]},
 				RefColumns: []*schema.Column{MachinesColumns[0]},
 				OnDelete:   schema.SetNull,
 				OnDelete:   schema.SetNull,
 			},
 			},
@@ -77,6 +78,20 @@ var (
 		Columns:    BouncersColumns,
 		Columns:    BouncersColumns,
 		PrimaryKey: []*schema.Column{BouncersColumns[0]},
 		PrimaryKey: []*schema.Column{BouncersColumns[0]},
 	}
 	}
+	// ConfigItemsColumns holds the columns for the "config_items" table.
+	ConfigItemsColumns = []*schema.Column{
+		{Name: "id", Type: field.TypeInt, Increment: true},
+		{Name: "created_at", Type: field.TypeTime, Nullable: true},
+		{Name: "updated_at", Type: field.TypeTime, Nullable: true},
+		{Name: "name", Type: field.TypeString, Unique: true},
+		{Name: "value", Type: field.TypeString},
+	}
+	// ConfigItemsTable holds the schema information for the "config_items" table.
+	ConfigItemsTable = &schema.Table{
+		Name:       "config_items",
+		Columns:    ConfigItemsColumns,
+		PrimaryKey: []*schema.Column{ConfigItemsColumns[0]},
+	}
 	// DecisionsColumns holds the columns for the "decisions" table.
 	// DecisionsColumns holds the columns for the "decisions" table.
 	DecisionsColumns = []*schema.Column{
 	DecisionsColumns = []*schema.Column{
 		{Name: "id", Type: field.TypeInt, Increment: true},
 		{Name: "id", Type: field.TypeInt, Increment: true},
@@ -94,6 +109,7 @@ var (
 		{Name: "value", Type: field.TypeString},
 		{Name: "value", Type: field.TypeString},
 		{Name: "origin", Type: field.TypeString},
 		{Name: "origin", Type: field.TypeString},
 		{Name: "simulated", Type: field.TypeBool, Default: false},
 		{Name: "simulated", Type: field.TypeBool, Default: false},
+		{Name: "uuid", Type: field.TypeString, Nullable: true},
 		{Name: "alert_decisions", Type: field.TypeInt, Nullable: true},
 		{Name: "alert_decisions", Type: field.TypeInt, Nullable: true},
 	}
 	}
 	// DecisionsTable holds the schema information for the "decisions" table.
 	// DecisionsTable holds the schema information for the "decisions" table.
@@ -104,7 +120,7 @@ var (
 		ForeignKeys: []*schema.ForeignKey{
 		ForeignKeys: []*schema.ForeignKey{
 			{
 			{
 				Symbol:     "decisions_alerts_decisions",
 				Symbol:     "decisions_alerts_decisions",
-				Columns:    []*schema.Column{DecisionsColumns[15]},
+				Columns:    []*schema.Column{DecisionsColumns[16]},
 				RefColumns: []*schema.Column{AlertsColumns[0]},
 				RefColumns: []*schema.Column{AlertsColumns[0]},
 				OnDelete:   schema.Cascade,
 				OnDelete:   schema.Cascade,
 			},
 			},
@@ -199,6 +215,7 @@ var (
 	Tables = []*schema.Table{
 	Tables = []*schema.Table{
 		AlertsTable,
 		AlertsTable,
 		BouncersTable,
 		BouncersTable,
+		ConfigItemsTable,
 		DecisionsTable,
 		DecisionsTable,
 		EventsTable,
 		EventsTable,
 		MachinesTable,
 		MachinesTable,

+ 670 - 8
pkg/database/ent/mutation.go

@@ -11,6 +11,7 @@ import (
 
 
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
@@ -29,12 +30,13 @@ const (
 	OpUpdateOne = ent.OpUpdateOne
 	OpUpdateOne = ent.OpUpdateOne
 
 
 	// Node types.
 	// Node types.
-	TypeAlert    = "Alert"
-	TypeBouncer  = "Bouncer"
-	TypeDecision = "Decision"
-	TypeEvent    = "Event"
-	TypeMachine  = "Machine"
-	TypeMeta     = "Meta"
+	TypeAlert      = "Alert"
+	TypeBouncer    = "Bouncer"
+	TypeConfigItem = "ConfigItem"
+	TypeDecision   = "Decision"
+	TypeEvent      = "Event"
+	TypeMachine    = "Machine"
+	TypeMeta       = "Meta"
 )
 )
 
 
 // AlertMutation represents an operation that mutates the Alert nodes in the graph.
 // AlertMutation represents an operation that mutates the Alert nodes in the graph.
@@ -69,6 +71,7 @@ type AlertMutation struct {
 	scenarioVersion    *string
 	scenarioVersion    *string
 	scenarioHash       *string
 	scenarioHash       *string
 	simulated          *bool
 	simulated          *bool
+	uuid               *string
 	clearedFields      map[string]struct{}
 	clearedFields      map[string]struct{}
 	owner              *int
 	owner              *int
 	clearedowner       bool
 	clearedowner       bool
@@ -1320,6 +1323,55 @@ func (m *AlertMutation) ResetSimulated() {
 	m.simulated = nil
 	m.simulated = nil
 }
 }
 
 
+// SetUUID sets the "uuid" field.
+func (m *AlertMutation) SetUUID(s string) {
+	m.uuid = &s
+}
+
+// UUID returns the value of the "uuid" field in the mutation.
+func (m *AlertMutation) UUID() (r string, exists bool) {
+	v := m.uuid
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldUUID returns the old "uuid" field's value of the Alert entity.
+// If the Alert object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *AlertMutation) OldUUID(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldUUID is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldUUID requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldUUID: %w", err)
+	}
+	return oldValue.UUID, nil
+}
+
+// ClearUUID clears the value of the "uuid" field.
+func (m *AlertMutation) ClearUUID() {
+	m.uuid = nil
+	m.clearedFields[alert.FieldUUID] = struct{}{}
+}
+
+// UUIDCleared returns if the "uuid" field was cleared in this mutation.
+func (m *AlertMutation) UUIDCleared() bool {
+	_, ok := m.clearedFields[alert.FieldUUID]
+	return ok
+}
+
+// ResetUUID resets all changes to the "uuid" field.
+func (m *AlertMutation) ResetUUID() {
+	m.uuid = nil
+	delete(m.clearedFields, alert.FieldUUID)
+}
+
 // SetOwnerID sets the "owner" edge to the Machine entity by id.
 // SetOwnerID sets the "owner" edge to the Machine entity by id.
 func (m *AlertMutation) SetOwnerID(id int) {
 func (m *AlertMutation) SetOwnerID(id int) {
 	m.owner = &id
 	m.owner = &id
@@ -1540,7 +1592,7 @@ func (m *AlertMutation) Type() string {
 // order to get all numeric fields that were incremented/decremented, call
 // order to get all numeric fields that were incremented/decremented, call
 // AddedFields().
 // AddedFields().
 func (m *AlertMutation) Fields() []string {
 func (m *AlertMutation) Fields() []string {
-	fields := make([]string, 0, 22)
+	fields := make([]string, 0, 23)
 	if m.created_at != nil {
 	if m.created_at != nil {
 		fields = append(fields, alert.FieldCreatedAt)
 		fields = append(fields, alert.FieldCreatedAt)
 	}
 	}
@@ -1607,6 +1659,9 @@ func (m *AlertMutation) Fields() []string {
 	if m.simulated != nil {
 	if m.simulated != nil {
 		fields = append(fields, alert.FieldSimulated)
 		fields = append(fields, alert.FieldSimulated)
 	}
 	}
+	if m.uuid != nil {
+		fields = append(fields, alert.FieldUUID)
+	}
 	return fields
 	return fields
 }
 }
 
 
@@ -1659,6 +1714,8 @@ func (m *AlertMutation) Field(name string) (ent.Value, bool) {
 		return m.ScenarioHash()
 		return m.ScenarioHash()
 	case alert.FieldSimulated:
 	case alert.FieldSimulated:
 		return m.Simulated()
 		return m.Simulated()
+	case alert.FieldUUID:
+		return m.UUID()
 	}
 	}
 	return nil, false
 	return nil, false
 }
 }
@@ -1712,6 +1769,8 @@ func (m *AlertMutation) OldField(ctx context.Context, name string) (ent.Value, e
 		return m.OldScenarioHash(ctx)
 		return m.OldScenarioHash(ctx)
 	case alert.FieldSimulated:
 	case alert.FieldSimulated:
 		return m.OldSimulated(ctx)
 		return m.OldSimulated(ctx)
+	case alert.FieldUUID:
+		return m.OldUUID(ctx)
 	}
 	}
 	return nil, fmt.Errorf("unknown Alert field %s", name)
 	return nil, fmt.Errorf("unknown Alert field %s", name)
 }
 }
@@ -1875,6 +1934,13 @@ func (m *AlertMutation) SetField(name string, value ent.Value) error {
 		}
 		}
 		m.SetSimulated(v)
 		m.SetSimulated(v)
 		return nil
 		return nil
+	case alert.FieldUUID:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetUUID(v)
+		return nil
 	}
 	}
 	return fmt.Errorf("unknown Alert field %s", name)
 	return fmt.Errorf("unknown Alert field %s", name)
 }
 }
@@ -2016,6 +2082,9 @@ func (m *AlertMutation) ClearedFields() []string {
 	if m.FieldCleared(alert.FieldScenarioHash) {
 	if m.FieldCleared(alert.FieldScenarioHash) {
 		fields = append(fields, alert.FieldScenarioHash)
 		fields = append(fields, alert.FieldScenarioHash)
 	}
 	}
+	if m.FieldCleared(alert.FieldUUID) {
+		fields = append(fields, alert.FieldUUID)
+	}
 	return fields
 	return fields
 }
 }
 
 
@@ -2090,6 +2159,9 @@ func (m *AlertMutation) ClearField(name string) error {
 	case alert.FieldScenarioHash:
 	case alert.FieldScenarioHash:
 		m.ClearScenarioHash()
 		m.ClearScenarioHash()
 		return nil
 		return nil
+	case alert.FieldUUID:
+		m.ClearUUID()
+		return nil
 	}
 	}
 	return fmt.Errorf("unknown Alert nullable field %s", name)
 	return fmt.Errorf("unknown Alert nullable field %s", name)
 }
 }
@@ -2164,6 +2236,9 @@ func (m *AlertMutation) ResetField(name string) error {
 	case alert.FieldSimulated:
 	case alert.FieldSimulated:
 		m.ResetSimulated()
 		m.ResetSimulated()
 		return nil
 		return nil
+	case alert.FieldUUID:
+		m.ResetUUID()
+		return nil
 	}
 	}
 	return fmt.Errorf("unknown Alert field %s", name)
 	return fmt.Errorf("unknown Alert field %s", name)
 }
 }
@@ -3290,6 +3365,520 @@ func (m *BouncerMutation) ResetEdge(name string) error {
 	return fmt.Errorf("unknown Bouncer edge %s", name)
 	return fmt.Errorf("unknown Bouncer edge %s", name)
 }
 }
 
 
+// ConfigItemMutation represents an operation that mutates the ConfigItem nodes in the graph.
+type ConfigItemMutation struct {
+	config
+	op            Op
+	typ           string
+	id            *int
+	created_at    *time.Time
+	updated_at    *time.Time
+	name          *string
+	value         *string
+	clearedFields map[string]struct{}
+	done          bool
+	oldValue      func(context.Context) (*ConfigItem, error)
+	predicates    []predicate.ConfigItem
+}
+
+var _ ent.Mutation = (*ConfigItemMutation)(nil)
+
+// configitemOption allows management of the mutation configuration using functional options.
+type configitemOption func(*ConfigItemMutation)
+
+// newConfigItemMutation creates new mutation for the ConfigItem entity.
+func newConfigItemMutation(c config, op Op, opts ...configitemOption) *ConfigItemMutation {
+	m := &ConfigItemMutation{
+		config:        c,
+		op:            op,
+		typ:           TypeConfigItem,
+		clearedFields: make(map[string]struct{}),
+	}
+	for _, opt := range opts {
+		opt(m)
+	}
+	return m
+}
+
+// withConfigItemID sets the ID field of the mutation.
+func withConfigItemID(id int) configitemOption {
+	return func(m *ConfigItemMutation) {
+		var (
+			err   error
+			once  sync.Once
+			value *ConfigItem
+		)
+		m.oldValue = func(ctx context.Context) (*ConfigItem, error) {
+			once.Do(func() {
+				if m.done {
+					err = errors.New("querying old values post mutation is not allowed")
+				} else {
+					value, err = m.Client().ConfigItem.Get(ctx, id)
+				}
+			})
+			return value, err
+		}
+		m.id = &id
+	}
+}
+
+// withConfigItem sets the old ConfigItem of the mutation.
+func withConfigItem(node *ConfigItem) configitemOption {
+	return func(m *ConfigItemMutation) {
+		m.oldValue = func(context.Context) (*ConfigItem, error) {
+			return node, nil
+		}
+		m.id = &node.ID
+	}
+}
+
+// Client returns a new `ent.Client` from the mutation. If the mutation was
+// executed in a transaction (ent.Tx), a transactional client is returned.
+func (m ConfigItemMutation) Client() *Client {
+	client := &Client{config: m.config}
+	client.init()
+	return client
+}
+
+// Tx returns an `ent.Tx` for mutations that were executed in transactions;
+// it returns an error otherwise.
+func (m ConfigItemMutation) Tx() (*Tx, error) {
+	if _, ok := m.driver.(*txDriver); !ok {
+		return nil, errors.New("ent: mutation is not running in a transaction")
+	}
+	tx := &Tx{config: m.config}
+	tx.init()
+	return tx, nil
+}
+
+// ID returns the ID value in the mutation. Note that the ID is only available
+// if it was provided to the builder or after it was returned from the database.
+func (m *ConfigItemMutation) ID() (id int, exists bool) {
+	if m.id == nil {
+		return
+	}
+	return *m.id, true
+}
+
+// IDs queries the database and returns the entity ids that match the mutation's predicate.
+// That means, if the mutation is applied within a transaction with an isolation level such
+// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
+// or updated by the mutation.
+func (m *ConfigItemMutation) IDs(ctx context.Context) ([]int, error) {
+	switch {
+	case m.op.Is(OpUpdateOne | OpDeleteOne):
+		id, exists := m.ID()
+		if exists {
+			return []int{id}, nil
+		}
+		fallthrough
+	case m.op.Is(OpUpdate | OpDelete):
+		return m.Client().ConfigItem.Query().Where(m.predicates...).IDs(ctx)
+	default:
+		return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
+	}
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (m *ConfigItemMutation) SetCreatedAt(t time.Time) {
+	m.created_at = &t
+}
+
+// CreatedAt returns the value of the "created_at" field in the mutation.
+func (m *ConfigItemMutation) CreatedAt() (r time.Time, exists bool) {
+	v := m.created_at
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldCreatedAt returns the old "created_at" field's value of the ConfigItem entity.
+// If the ConfigItem object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ConfigItemMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldCreatedAt requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err)
+	}
+	return oldValue.CreatedAt, nil
+}
+
+// ClearCreatedAt clears the value of the "created_at" field.
+func (m *ConfigItemMutation) ClearCreatedAt() {
+	m.created_at = nil
+	m.clearedFields[configitem.FieldCreatedAt] = struct{}{}
+}
+
+// CreatedAtCleared returns if the "created_at" field was cleared in this mutation.
+func (m *ConfigItemMutation) CreatedAtCleared() bool {
+	_, ok := m.clearedFields[configitem.FieldCreatedAt]
+	return ok
+}
+
+// ResetCreatedAt resets all changes to the "created_at" field.
+func (m *ConfigItemMutation) ResetCreatedAt() {
+	m.created_at = nil
+	delete(m.clearedFields, configitem.FieldCreatedAt)
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (m *ConfigItemMutation) SetUpdatedAt(t time.Time) {
+	m.updated_at = &t
+}
+
+// UpdatedAt returns the value of the "updated_at" field in the mutation.
+func (m *ConfigItemMutation) UpdatedAt() (r time.Time, exists bool) {
+	v := m.updated_at
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldUpdatedAt returns the old "updated_at" field's value of the ConfigItem entity.
+// If the ConfigItem object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ConfigItemMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldUpdatedAt requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err)
+	}
+	return oldValue.UpdatedAt, nil
+}
+
+// ClearUpdatedAt clears the value of the "updated_at" field.
+func (m *ConfigItemMutation) ClearUpdatedAt() {
+	m.updated_at = nil
+	m.clearedFields[configitem.FieldUpdatedAt] = struct{}{}
+}
+
+// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation.
+func (m *ConfigItemMutation) UpdatedAtCleared() bool {
+	_, ok := m.clearedFields[configitem.FieldUpdatedAt]
+	return ok
+}
+
+// ResetUpdatedAt resets all changes to the "updated_at" field.
+func (m *ConfigItemMutation) ResetUpdatedAt() {
+	m.updated_at = nil
+	delete(m.clearedFields, configitem.FieldUpdatedAt)
+}
+
+// SetName sets the "name" field.
+func (m *ConfigItemMutation) SetName(s string) {
+	m.name = &s
+}
+
+// Name returns the value of the "name" field in the mutation.
+func (m *ConfigItemMutation) Name() (r string, exists bool) {
+	v := m.name
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldName returns the old "name" field's value of the ConfigItem entity.
+// If the ConfigItem object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ConfigItemMutation) OldName(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldName is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldName requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldName: %w", err)
+	}
+	return oldValue.Name, nil
+}
+
+// ResetName resets all changes to the "name" field.
+func (m *ConfigItemMutation) ResetName() {
+	m.name = nil
+}
+
+// SetValue sets the "value" field.
+func (m *ConfigItemMutation) SetValue(s string) {
+	m.value = &s
+}
+
+// Value returns the value of the "value" field in the mutation.
+func (m *ConfigItemMutation) Value() (r string, exists bool) {
+	v := m.value
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldValue returns the old "value" field's value of the ConfigItem entity.
+// If the ConfigItem object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ConfigItemMutation) OldValue(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldValue is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldValue requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldValue: %w", err)
+	}
+	return oldValue.Value, nil
+}
+
+// ResetValue resets all changes to the "value" field.
+func (m *ConfigItemMutation) ResetValue() {
+	m.value = nil
+}
+
+// Where appends a list predicates to the ConfigItemMutation builder.
+func (m *ConfigItemMutation) Where(ps ...predicate.ConfigItem) {
+	m.predicates = append(m.predicates, ps...)
+}
+
+// Op returns the operation name.
+func (m *ConfigItemMutation) Op() Op {
+	return m.op
+}
+
+// Type returns the node type of this mutation (ConfigItem).
+func (m *ConfigItemMutation) Type() string {
+	return m.typ
+}
+
+// Fields returns all fields that were changed during this mutation. Note that in
+// order to get all numeric fields that were incremented/decremented, call
+// AddedFields().
+func (m *ConfigItemMutation) Fields() []string {
+	fields := make([]string, 0, 4)
+	if m.created_at != nil {
+		fields = append(fields, configitem.FieldCreatedAt)
+	}
+	if m.updated_at != nil {
+		fields = append(fields, configitem.FieldUpdatedAt)
+	}
+	if m.name != nil {
+		fields = append(fields, configitem.FieldName)
+	}
+	if m.value != nil {
+		fields = append(fields, configitem.FieldValue)
+	}
+	return fields
+}
+
+// Field returns the value of a field with the given name. The second boolean
+// return value indicates that this field was not set, or was not defined in the
+// schema.
+func (m *ConfigItemMutation) Field(name string) (ent.Value, bool) {
+	switch name {
+	case configitem.FieldCreatedAt:
+		return m.CreatedAt()
+	case configitem.FieldUpdatedAt:
+		return m.UpdatedAt()
+	case configitem.FieldName:
+		return m.Name()
+	case configitem.FieldValue:
+		return m.Value()
+	}
+	return nil, false
+}
+
+// OldField returns the old value of the field from the database. An error is
+// returned if the mutation operation is not UpdateOne, or the query to the
+// database failed.
+func (m *ConfigItemMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
+	switch name {
+	case configitem.FieldCreatedAt:
+		return m.OldCreatedAt(ctx)
+	case configitem.FieldUpdatedAt:
+		return m.OldUpdatedAt(ctx)
+	case configitem.FieldName:
+		return m.OldName(ctx)
+	case configitem.FieldValue:
+		return m.OldValue(ctx)
+	}
+	return nil, fmt.Errorf("unknown ConfigItem field %s", name)
+}
+
+// SetField sets the value of a field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *ConfigItemMutation) SetField(name string, value ent.Value) error {
+	switch name {
+	case configitem.FieldCreatedAt:
+		v, ok := value.(time.Time)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetCreatedAt(v)
+		return nil
+	case configitem.FieldUpdatedAt:
+		v, ok := value.(time.Time)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetUpdatedAt(v)
+		return nil
+	case configitem.FieldName:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetName(v)
+		return nil
+	case configitem.FieldValue:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetValue(v)
+		return nil
+	}
+	return fmt.Errorf("unknown ConfigItem field %s", name)
+}
+
+// AddedFields returns all numeric fields that were incremented/decremented during
+// this mutation.
+func (m *ConfigItemMutation) AddedFields() []string {
+	return nil
+}
+
+// AddedField returns the numeric value that was incremented/decremented on a field
+// with the given name. The second boolean return value indicates that this field
+// was not set, or was not defined in the schema.
+func (m *ConfigItemMutation) AddedField(name string) (ent.Value, bool) {
+	return nil, false
+}
+
+// AddField adds the value to the field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *ConfigItemMutation) AddField(name string, value ent.Value) error {
+	switch name {
+	}
+	return fmt.Errorf("unknown ConfigItem numeric field %s", name)
+}
+
+// ClearedFields returns all nullable fields that were cleared during this
+// mutation.
+func (m *ConfigItemMutation) ClearedFields() []string {
+	var fields []string
+	if m.FieldCleared(configitem.FieldCreatedAt) {
+		fields = append(fields, configitem.FieldCreatedAt)
+	}
+	if m.FieldCleared(configitem.FieldUpdatedAt) {
+		fields = append(fields, configitem.FieldUpdatedAt)
+	}
+	return fields
+}
+
+// FieldCleared returns a boolean indicating if a field with the given name was
+// cleared in this mutation.
+func (m *ConfigItemMutation) FieldCleared(name string) bool {
+	_, ok := m.clearedFields[name]
+	return ok
+}
+
+// ClearField clears the value of the field with the given name. It returns an
+// error if the field is not defined in the schema.
+func (m *ConfigItemMutation) ClearField(name string) error {
+	switch name {
+	case configitem.FieldCreatedAt:
+		m.ClearCreatedAt()
+		return nil
+	case configitem.FieldUpdatedAt:
+		m.ClearUpdatedAt()
+		return nil
+	}
+	return fmt.Errorf("unknown ConfigItem nullable field %s", name)
+}
+
+// ResetField resets all changes in the mutation for the field with the given name.
+// It returns an error if the field is not defined in the schema.
+func (m *ConfigItemMutation) ResetField(name string) error {
+	switch name {
+	case configitem.FieldCreatedAt:
+		m.ResetCreatedAt()
+		return nil
+	case configitem.FieldUpdatedAt:
+		m.ResetUpdatedAt()
+		return nil
+	case configitem.FieldName:
+		m.ResetName()
+		return nil
+	case configitem.FieldValue:
+		m.ResetValue()
+		return nil
+	}
+	return fmt.Errorf("unknown ConfigItem field %s", name)
+}
+
+// AddedEdges returns all edge names that were set/added in this mutation.
+func (m *ConfigItemMutation) AddedEdges() []string {
+	edges := make([]string, 0, 0)
+	return edges
+}
+
+// AddedIDs returns all IDs (to other nodes) that were added for the given edge
+// name in this mutation.
+func (m *ConfigItemMutation) AddedIDs(name string) []ent.Value {
+	return nil
+}
+
+// RemovedEdges returns all edge names that were removed in this mutation.
+func (m *ConfigItemMutation) RemovedEdges() []string {
+	edges := make([]string, 0, 0)
+	return edges
+}
+
+// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
+// the given name in this mutation.
+func (m *ConfigItemMutation) RemovedIDs(name string) []ent.Value {
+	return nil
+}
+
+// ClearedEdges returns all edge names that were cleared in this mutation.
+func (m *ConfigItemMutation) ClearedEdges() []string {
+	edges := make([]string, 0, 0)
+	return edges
+}
+
+// EdgeCleared returns a boolean which indicates if the edge with the given name
+// was cleared in this mutation.
+func (m *ConfigItemMutation) EdgeCleared(name string) bool {
+	return false
+}
+
+// ClearEdge clears the value of the edge with the given name. It returns an error
+// if that edge is not defined in the schema.
+func (m *ConfigItemMutation) ClearEdge(name string) error {
+	return fmt.Errorf("unknown ConfigItem unique edge %s", name)
+}
+
+// ResetEdge resets all changes to the edge with the given name in this mutation.
+// It returns an error if the edge is not defined in the schema.
+func (m *ConfigItemMutation) ResetEdge(name string) error {
+	return fmt.Errorf("unknown ConfigItem edge %s", name)
+}
+
 // DecisionMutation represents an operation that mutates the Decision nodes in the graph.
 // DecisionMutation represents an operation that mutates the Decision nodes in the graph.
 type DecisionMutation struct {
 type DecisionMutation struct {
 	config
 	config
@@ -3315,6 +3904,7 @@ type DecisionMutation struct {
 	value           *string
 	value           *string
 	origin          *string
 	origin          *string
 	simulated       *bool
 	simulated       *bool
+	uuid            *string
 	clearedFields   map[string]struct{}
 	clearedFields   map[string]struct{}
 	owner           *int
 	owner           *int
 	clearedowner    bool
 	clearedowner    bool
@@ -4134,6 +4724,55 @@ func (m *DecisionMutation) ResetSimulated() {
 	m.simulated = nil
 	m.simulated = nil
 }
 }
 
 
+// SetUUID sets the "uuid" field.
+func (m *DecisionMutation) SetUUID(s string) {
+	m.uuid = &s
+}
+
+// UUID returns the value of the "uuid" field in the mutation.
+func (m *DecisionMutation) UUID() (r string, exists bool) {
+	v := m.uuid
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldUUID returns the old "uuid" field's value of the Decision entity.
+// If the Decision object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *DecisionMutation) OldUUID(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldUUID is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldUUID requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldUUID: %w", err)
+	}
+	return oldValue.UUID, nil
+}
+
+// ClearUUID clears the value of the "uuid" field.
+func (m *DecisionMutation) ClearUUID() {
+	m.uuid = nil
+	m.clearedFields[decision.FieldUUID] = struct{}{}
+}
+
+// UUIDCleared returns if the "uuid" field was cleared in this mutation.
+func (m *DecisionMutation) UUIDCleared() bool {
+	_, ok := m.clearedFields[decision.FieldUUID]
+	return ok
+}
+
+// ResetUUID resets all changes to the "uuid" field.
+func (m *DecisionMutation) ResetUUID() {
+	m.uuid = nil
+	delete(m.clearedFields, decision.FieldUUID)
+}
+
 // SetOwnerID sets the "owner" edge to the Alert entity by id.
 // SetOwnerID sets the "owner" edge to the Alert entity by id.
 func (m *DecisionMutation) SetOwnerID(id int) {
 func (m *DecisionMutation) SetOwnerID(id int) {
 	m.owner = &id
 	m.owner = &id
@@ -4192,7 +4831,7 @@ func (m *DecisionMutation) Type() string {
 // order to get all numeric fields that were incremented/decremented, call
 // order to get all numeric fields that were incremented/decremented, call
 // AddedFields().
 // AddedFields().
 func (m *DecisionMutation) Fields() []string {
 func (m *DecisionMutation) Fields() []string {
-	fields := make([]string, 0, 14)
+	fields := make([]string, 0, 15)
 	if m.created_at != nil {
 	if m.created_at != nil {
 		fields = append(fields, decision.FieldCreatedAt)
 		fields = append(fields, decision.FieldCreatedAt)
 	}
 	}
@@ -4235,6 +4874,9 @@ func (m *DecisionMutation) Fields() []string {
 	if m.simulated != nil {
 	if m.simulated != nil {
 		fields = append(fields, decision.FieldSimulated)
 		fields = append(fields, decision.FieldSimulated)
 	}
 	}
+	if m.uuid != nil {
+		fields = append(fields, decision.FieldUUID)
+	}
 	return fields
 	return fields
 }
 }
 
 
@@ -4271,6 +4913,8 @@ func (m *DecisionMutation) Field(name string) (ent.Value, bool) {
 		return m.Origin()
 		return m.Origin()
 	case decision.FieldSimulated:
 	case decision.FieldSimulated:
 		return m.Simulated()
 		return m.Simulated()
+	case decision.FieldUUID:
+		return m.UUID()
 	}
 	}
 	return nil, false
 	return nil, false
 }
 }
@@ -4308,6 +4952,8 @@ func (m *DecisionMutation) OldField(ctx context.Context, name string) (ent.Value
 		return m.OldOrigin(ctx)
 		return m.OldOrigin(ctx)
 	case decision.FieldSimulated:
 	case decision.FieldSimulated:
 		return m.OldSimulated(ctx)
 		return m.OldSimulated(ctx)
+	case decision.FieldUUID:
+		return m.OldUUID(ctx)
 	}
 	}
 	return nil, fmt.Errorf("unknown Decision field %s", name)
 	return nil, fmt.Errorf("unknown Decision field %s", name)
 }
 }
@@ -4415,6 +5061,13 @@ func (m *DecisionMutation) SetField(name string, value ent.Value) error {
 		}
 		}
 		m.SetSimulated(v)
 		m.SetSimulated(v)
 		return nil
 		return nil
+	case decision.FieldUUID:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetUUID(v)
+		return nil
 	}
 	}
 	return fmt.Errorf("unknown Decision field %s", name)
 	return fmt.Errorf("unknown Decision field %s", name)
 }
 }
@@ -4532,6 +5185,9 @@ func (m *DecisionMutation) ClearedFields() []string {
 	if m.FieldCleared(decision.FieldIPSize) {
 	if m.FieldCleared(decision.FieldIPSize) {
 		fields = append(fields, decision.FieldIPSize)
 		fields = append(fields, decision.FieldIPSize)
 	}
 	}
+	if m.FieldCleared(decision.FieldUUID) {
+		fields = append(fields, decision.FieldUUID)
+	}
 	return fields
 	return fields
 }
 }
 
 
@@ -4570,6 +5226,9 @@ func (m *DecisionMutation) ClearField(name string) error {
 	case decision.FieldIPSize:
 	case decision.FieldIPSize:
 		m.ClearIPSize()
 		m.ClearIPSize()
 		return nil
 		return nil
+	case decision.FieldUUID:
+		m.ClearUUID()
+		return nil
 	}
 	}
 	return fmt.Errorf("unknown Decision nullable field %s", name)
 	return fmt.Errorf("unknown Decision nullable field %s", name)
 }
 }
@@ -4620,6 +5279,9 @@ func (m *DecisionMutation) ResetField(name string) error {
 	case decision.FieldSimulated:
 	case decision.FieldSimulated:
 		m.ResetSimulated()
 		m.ResetSimulated()
 		return nil
 		return nil
+	case decision.FieldUUID:
+		m.ResetUUID()
+		return nil
 	}
 	}
 	return fmt.Errorf("unknown Decision field %s", name)
 	return fmt.Errorf("unknown Decision field %s", name)
 }
 }

+ 3 - 0
pkg/database/ent/predicate/predicate.go

@@ -12,6 +12,9 @@ type Alert func(*sql.Selector)
 // Bouncer is the predicate function for bouncer builders.
 // Bouncer is the predicate function for bouncer builders.
 type Bouncer func(*sql.Selector)
 type Bouncer func(*sql.Selector)
 
 
+// ConfigItem is the predicate function for configitem builders.
+type ConfigItem func(*sql.Selector)
+
 // Decision is the predicate function for decision builders.
 // Decision is the predicate function for decision builders.
 type Decision func(*sql.Selector)
 type Decision func(*sql.Selector)
 
 

+ 15 - 0
pkg/database/ent/runtime.go

@@ -7,6 +7,7 @@ import (
 
 
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
+	"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
@@ -86,6 +87,20 @@ func init() {
 	bouncerDescAuthType := bouncerFields[10].Descriptor()
 	bouncerDescAuthType := bouncerFields[10].Descriptor()
 	// bouncer.DefaultAuthType holds the default value on creation for the auth_type field.
 	// bouncer.DefaultAuthType holds the default value on creation for the auth_type field.
 	bouncer.DefaultAuthType = bouncerDescAuthType.Default.(string)
 	bouncer.DefaultAuthType = bouncerDescAuthType.Default.(string)
+	configitemFields := schema.ConfigItem{}.Fields()
+	_ = configitemFields
+	// configitemDescCreatedAt is the schema descriptor for created_at field.
+	configitemDescCreatedAt := configitemFields[0].Descriptor()
+	// configitem.DefaultCreatedAt holds the default value on creation for the created_at field.
+	configitem.DefaultCreatedAt = configitemDescCreatedAt.Default.(func() time.Time)
+	// configitem.UpdateDefaultCreatedAt holds the default value on update for the created_at field.
+	configitem.UpdateDefaultCreatedAt = configitemDescCreatedAt.UpdateDefault.(func() time.Time)
+	// configitemDescUpdatedAt is the schema descriptor for updated_at field.
+	configitemDescUpdatedAt := configitemFields[1].Descriptor()
+	// configitem.DefaultUpdatedAt holds the default value on creation for the updated_at field.
+	configitem.DefaultUpdatedAt = configitemDescUpdatedAt.Default.(func() time.Time)
+	// configitem.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
+	configitem.UpdateDefaultUpdatedAt = configitemDescUpdatedAt.UpdateDefault.(func() time.Time)
 	decisionFields := schema.Decision{}.Fields()
 	decisionFields := schema.Decision{}.Fields()
 	_ = decisionFields
 	_ = decisionFields
 	// decisionDescCreatedAt is the schema descriptor for created_at field.
 	// decisionDescCreatedAt is the schema descriptor for created_at field.

+ 1 - 0
pkg/database/ent/schema/alert.go

@@ -50,6 +50,7 @@ func (Alert) Fields() []ent.Field {
 		field.String("scenarioVersion").Optional(),
 		field.String("scenarioVersion").Optional(),
 		field.String("scenarioHash").Optional(),
 		field.String("scenarioHash").Optional(),
 		field.Bool("simulated").Default(false),
 		field.Bool("simulated").Default(false),
+		field.String("uuid").Optional(), //this uuid is mostly here to ensure that CAPI/PAPI has a unique id for each alert
 	}
 	}
 }
 }
 
 

+ 31 - 0
pkg/database/ent/schema/config.go

@@ -0,0 +1,31 @@
+package schema
+
+import (
+	"entgo.io/ent"
+	"entgo.io/ent/schema/field"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
+)
+
+// ConfigItem holds the schema definition for the ConfigItem entity.
+type ConfigItem struct {
+	ent.Schema
+}
+
+// Fields of the Bouncer.
+func (ConfigItem) Fields() []ent.Field {
+	return []ent.Field{
+		field.Time("created_at").
+			Default(types.UtcNow).
+			UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"created_at"`),
+		field.Time("updated_at").
+			Default(types.UtcNow).
+			UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"updated_at"`),
+		field.String("name").Unique().StructTag(`json:"name"`),
+		field.String("value").StructTag(`json:"value"`), // a json object
+	}
+}
+
+// Edges of the Bouncer.
+func (ConfigItem) Edges() []ent.Edge {
+	return nil
+}

+ 1 - 0
pkg/database/ent/schema/decision.go

@@ -37,6 +37,7 @@ func (Decision) Fields() []ent.Field {
 		field.String("value"),
 		field.String("value"),
 		field.String("origin"),
 		field.String("origin"),
 		field.Bool("simulated").Default(false),
 		field.Bool("simulated").Default(false),
+		field.String("uuid").Optional(), //this uuid is mostly here to ensure that CAPI/PAPI has a unique id for each decision
 	}
 	}
 }
 }
 
 

+ 3 - 0
pkg/database/ent/tx.go

@@ -16,6 +16,8 @@ type Tx struct {
 	Alert *AlertClient
 	Alert *AlertClient
 	// Bouncer is the client for interacting with the Bouncer builders.
 	// Bouncer is the client for interacting with the Bouncer builders.
 	Bouncer *BouncerClient
 	Bouncer *BouncerClient
+	// ConfigItem is the client for interacting with the ConfigItem builders.
+	ConfigItem *ConfigItemClient
 	// Decision is the client for interacting with the Decision builders.
 	// Decision is the client for interacting with the Decision builders.
 	Decision *DecisionClient
 	Decision *DecisionClient
 	// Event is the client for interacting with the Event builders.
 	// Event is the client for interacting with the Event builders.
@@ -161,6 +163,7 @@ func (tx *Tx) Client() *Client {
 func (tx *Tx) init() {
 func (tx *Tx) init() {
 	tx.Alert = NewAlertClient(tx.config)
 	tx.Alert = NewAlertClient(tx.config)
 	tx.Bouncer = NewBouncerClient(tx.config)
 	tx.Bouncer = NewBouncerClient(tx.config)
+	tx.ConfigItem = NewConfigItemClient(tx.config)
 	tx.Decision = NewDecisionClient(tx.config)
 	tx.Decision = NewDecisionClient(tx.config)
 	tx.Event = NewEventClient(tx.config)
 	tx.Event = NewEventClient(tx.config)
 	tx.Machine = NewMachineClient(tx.config)
 	tx.Machine = NewMachineClient(tx.config)

+ 3 - 2
pkg/database/machines.go

@@ -8,12 +8,13 @@ import (
 
 
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
 	"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
+	"github.com/crowdsecurity/crowdsec/pkg/types"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"golang.org/x/crypto/bcrypt"
 	"golang.org/x/crypto/bcrypt"
 )
 )
 
 
-const CapiMachineID = "CAPI"
-const CapiListsMachineID = "lists"
+const CapiMachineID = types.CAPIOrigin
+const CapiListsMachineID = types.ListOrigin
 
 
 func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) {
 func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) {
 	hashPassword, err := bcrypt.GenerateFromPassword([]byte(*password), bcrypt.DefaultCost)
 	hashPassword, err := bcrypt.GenerateFromPassword([]byte(*password), bcrypt.DefaultCost)

+ 5 - 1
pkg/fflag/crowdsec.go

@@ -4,6 +4,7 @@ var Crowdsec = FeatureRegister{EnvPrefix: "CROWDSEC_FEATURE_"}
 
 
 var CscliSetup = &Feature{Name: "cscli_setup", Description: "Enable cscli setup command (service detection)"}
 var CscliSetup = &Feature{Name: "cscli_setup", Description: "Enable cscli setup command (service detection)"}
 var DisableHttpRetryBackoff = &Feature{Name: "disable_http_retry_backoff", Description: "Disable http retry backoff"}
 var DisableHttpRetryBackoff = &Feature{Name: "disable_http_retry_backoff", Description: "Disable http retry backoff"}
+var PapiClient = &Feature{Name: "papi_client", Description: "Enable Polling API client"}
 
 
 func RegisterAllFeatures() error {
 func RegisterAllFeatures() error {
 	err := Crowdsec.RegisterFeature(CscliSetup)
 	err := Crowdsec.RegisterFeature(CscliSetup)
@@ -14,6 +15,9 @@ func RegisterAllFeatures() error {
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-
+	err = Crowdsec.RegisterFeature(PapiClient)
+	if err != nil {
+		return err
+	}
 	return nil
 	return nil
 }
 }

+ 191 - 0
pkg/longpollclient/client.go

@@ -0,0 +1,191 @@
+package longpollclient
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"time"
+
+	"github.com/gofrs/uuid"
+	log "github.com/sirupsen/logrus"
+	"gopkg.in/tomb.v2"
+)
+
+type LongPollClient struct {
+	t          tomb.Tomb
+	c          chan Event
+	url        url.URL
+	logger     *log.Entry
+	since      int64
+	httpClient *http.Client
+}
+
+type LongPollClientConfig struct {
+	Url        url.URL
+	Logger     *log.Logger
+	HttpClient *http.Client
+}
+
+type Event struct {
+	Timestamp int64     `json:"timestamp"`
+	Category  string    `json:"category"`
+	Data      string    `json:"data"`
+	ID        uuid.UUID `json:"id"`
+	RequestId string
+}
+
+type pollResponse struct {
+	Events []Event `json:"events"`
+	// Set for timeout responses
+	Timestamp int64 `json:"timestamp"`
+	// API error responses could have an informative error here. Empty on success.
+	ErrorMessage string `json:"error"`
+}
+
+var errUnauthorized = fmt.Errorf("user is not authorized to use PAPI")
+
+const timeoutMessage = "no events before timeout"
+
+func (c *LongPollClient) doQuery() error {
+
+	logger := c.logger.WithField("method", "doQuery")
+
+	query := c.url.Query()
+	query.Set("since_time", fmt.Sprintf("%d", c.since))
+	query.Set("timeout", "45")
+	c.url.RawQuery = query.Encode()
+
+	logger.Debugf("Query parameters: %s", c.url.RawQuery)
+
+	req, err := http.NewRequest(http.MethodGet, c.url.String(), nil)
+	if err != nil {
+		logger.Errorf("failed to create request: %s", err)
+		return err
+	}
+	req.Header.Set("Accept", "application/json")
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		logger.Errorf("failed to execute request: %s", err)
+		return err
+	}
+	defer resp.Body.Close()
+	requestId := resp.Header.Get("X-Amzn-Trace-Id")
+	logger = logger.WithField("request-id", requestId)
+	if resp.StatusCode != http.StatusOK {
+		c.logger.Errorf("unexpected status code: %d", resp.StatusCode)
+		if resp.StatusCode == http.StatusPaymentRequired {
+			bodyContent, err := io.ReadAll(resp.Body)
+			if err != nil {
+				logger.Errorf("failed to read response body: %s", err)
+				return err
+			}
+			logger.Errorf(string(bodyContent))
+			return errUnauthorized
+		}
+		return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
+	}
+
+	decoder := json.NewDecoder(resp.Body)
+
+	for {
+		select {
+		case <-c.t.Dying():
+			logger.Debugf("dying")
+			close(c.c)
+			return nil
+		default:
+			var pollResp pollResponse
+			err = decoder.Decode(&pollResp)
+			if err != nil {
+				if err == io.EOF {
+					logger.Debugf("server closed connection")
+					return nil
+				}
+				return fmt.Errorf("error decoding poll response: %v", err)
+			}
+
+			logger.Tracef("got response: %+v", pollResp)
+
+			if len(pollResp.ErrorMessage) > 0 {
+				if pollResp.ErrorMessage == timeoutMessage {
+					logger.Debugf("got timeout message")
+					return nil
+				}
+				return fmt.Errorf("longpoll API error message: %s", pollResp.ErrorMessage)
+			}
+
+			if len(pollResp.Events) > 0 {
+				logger.Debugf("got %d events", len(pollResp.Events))
+				for _, event := range pollResp.Events {
+					event.RequestId = requestId
+					c.c <- event
+					if event.Timestamp > c.since {
+						c.since = event.Timestamp
+					}
+				}
+			}
+			if pollResp.Timestamp > 0 {
+				c.since = pollResp.Timestamp
+			}
+			logger.Debugf("Since is now %d", c.since)
+		}
+	}
+}
+
+func (c *LongPollClient) pollEvents() error {
+	for {
+		select {
+		case <-c.t.Dying():
+			c.logger.Debug("dying")
+			return nil
+		default:
+			c.logger.Debug("Polling PAPI")
+			err := c.doQuery()
+			if err != nil {
+				c.logger.Errorf("failed to poll: %s", err)
+				if err == errUnauthorized {
+					c.t.Kill(err)
+					close(c.c)
+					return err
+				}
+				continue
+			}
+		}
+	}
+}
+
+func (c *LongPollClient) Start(since time.Time) chan Event {
+	c.logger.Infof("starting polling client")
+	c.c = make(chan Event)
+	c.since = since.Unix() * 1000
+	c.t.Go(c.pollEvents)
+	return c.c
+}
+
+func (c *LongPollClient) Stop() error {
+	c.t.Kill(nil)
+	return nil
+}
+
+func NewLongPollClient(config LongPollClientConfig) (*LongPollClient, error) {
+	var logger *log.Entry
+	if config.Url == (url.URL{}) {
+		return nil, fmt.Errorf("url is required")
+	}
+	if config.Logger == nil {
+		logger = log.WithField("component", "longpollclient")
+	} else {
+		logger = config.Logger.WithFields(log.Fields{
+			"component": "longpollclient",
+			"url":       config.Url.String(),
+		})
+	}
+
+	return &LongPollClient{
+		url:        config.Url,
+		logger:     logger,
+		httpClient: config.HttpClient,
+	}, nil
+}

+ 26 - 2
pkg/models/add_signals_request_item.go

@@ -23,12 +23,16 @@ type AddSignalsRequestItem struct {
 	// alert id
 	// alert id
 	AlertID int64 `json:"alert_id,omitempty"`
 	AlertID int64 `json:"alert_id,omitempty"`
 
 
+
 	// context
 	// context
 	Context []*AddSignalsRequestItemContextItems0 `json:"context"`
 	Context []*AddSignalsRequestItemContextItems0 `json:"context"`
 
 
-	// created at
+  // created at
 	CreatedAt string `json:"created_at,omitempty"`
 	CreatedAt string `json:"created_at,omitempty"`
 
 
+	// decisions
+	Decisions AddSignalsRequestItemDecisions `json:"decisions,omitempty"`
+
 	// machine id
 	// machine id
 	MachineID string `json:"machine_id,omitempty"`
 	MachineID string `json:"machine_id,omitempty"`
 
 
@@ -53,7 +57,7 @@ type AddSignalsRequestItem struct {
 
 
 	// source
 	// source
 	// Required: true
 	// Required: true
-	Source *Source `json:"source"`
+	Source *AddSignalsRequestItemSource `json:"source"`
 
 
 	// start at
 	// start at
 	// Required: true
 	// Required: true
@@ -62,6 +66,10 @@ type AddSignalsRequestItem struct {
 	// stop at
 	// stop at
 	// Required: true
 	// Required: true
 	StopAt *string `json:"stop_at"`
 	StopAt *string `json:"stop_at"`
+
+	// uuid
+	// Read Only: true
+	UUID string `json:"uuid,omitempty"`
 }
 }
 
 
 // Validate validates this add signals request item
 // Validate validates this add signals request item
@@ -106,6 +114,7 @@ func (m *AddSignalsRequestItem) Validate(formats strfmt.Registry) error {
 	return nil
 	return nil
 }
 }
 
 
+
 func (m *AddSignalsRequestItem) validateContext(formats strfmt.Registry) error {
 func (m *AddSignalsRequestItem) validateContext(formats strfmt.Registry) error {
 	if swag.IsZero(m.Context) { // not required
 	if swag.IsZero(m.Context) { // not required
 		return nil
 		return nil
@@ -210,6 +219,7 @@ func (m *AddSignalsRequestItem) validateStopAt(formats strfmt.Registry) error {
 func (m *AddSignalsRequestItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
 func (m *AddSignalsRequestItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
 	var res []error
 	var res []error
 
 
+
 	if err := m.contextValidateContext(ctx, formats); err != nil {
 	if err := m.contextValidateContext(ctx, formats); err != nil {
 		res = append(res, err)
 		res = append(res, err)
 	}
 	}
@@ -218,12 +228,17 @@ func (m *AddSignalsRequestItem) ContextValidate(ctx context.Context, formats str
 		res = append(res, err)
 		res = append(res, err)
 	}
 	}
 
 
+	if err := m.contextValidateUUID(ctx, formats); err != nil {
+		res = append(res, err)
+	}
+
 	if len(res) > 0 {
 	if len(res) > 0 {
 		return errors.CompositeValidationError(res...)
 		return errors.CompositeValidationError(res...)
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
+
 func (m *AddSignalsRequestItem) contextValidateContext(ctx context.Context, formats strfmt.Registry) error {
 func (m *AddSignalsRequestItem) contextValidateContext(ctx context.Context, formats strfmt.Registry) error {
 
 
 	for i := 0; i < len(m.Context); i++ {
 	for i := 0; i < len(m.Context); i++ {
@@ -260,6 +275,15 @@ func (m *AddSignalsRequestItem) contextValidateSource(ctx context.Context, forma
 	return nil
 	return nil
 }
 }
 
 
+func (m *AddSignalsRequestItem) contextValidateUUID(ctx context.Context, formats strfmt.Registry) error {
+
+	if err := validate.ReadOnly(ctx, "uuid", "body", string(m.UUID)); err != nil {
+		return err
+	}
+
+	return nil
+}
+
 // MarshalBinary interface implementation
 // MarshalBinary interface implementation
 func (m *AddSignalsRequestItem) MarshalBinary() ([]byte, error) {
 func (m *AddSignalsRequestItem) MarshalBinary() ([]byte, error) {
 	if m == nil {
 	if m == nil {

+ 73 - 0
pkg/models/add_signals_request_item_decisions.go

@@ -0,0 +1,73 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+	"context"
+	"strconv"
+
+	"github.com/go-openapi/errors"
+	"github.com/go-openapi/strfmt"
+	"github.com/go-openapi/swag"
+)
+
+// AddSignalsRequestItemDecisions Decisions list
+//
+// swagger:model AddSignalsRequestItemDecisions
+type AddSignalsRequestItemDecisions []*AddSignalsRequestItemDecisionsItem
+
+// Validate validates this add signals request item decisions
+func (m AddSignalsRequestItemDecisions) Validate(formats strfmt.Registry) error {
+	var res []error
+
+	for i := 0; i < len(m); i++ {
+		if swag.IsZero(m[i]) { // not required
+			continue
+		}
+
+		if m[i] != nil {
+			if err := m[i].Validate(formats); err != nil {
+				if ve, ok := err.(*errors.Validation); ok {
+					return ve.ValidateName(strconv.Itoa(i))
+				} else if ce, ok := err.(*errors.CompositeError); ok {
+					return ce.ValidateName(strconv.Itoa(i))
+				}
+				return err
+			}
+		}
+
+	}
+
+	if len(res) > 0 {
+		return errors.CompositeValidationError(res...)
+	}
+	return nil
+}
+
+// ContextValidate validate this add signals request item decisions based on the context it is used
+func (m AddSignalsRequestItemDecisions) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+	var res []error
+
+	for i := 0; i < len(m); i++ {
+
+		if m[i] != nil {
+			if err := m[i].ContextValidate(ctx, formats); err != nil {
+				if ve, ok := err.(*errors.Validation); ok {
+					return ve.ValidateName(strconv.Itoa(i))
+				} else if ce, ok := err.(*errors.CompositeError); ok {
+					return ce.ValidateName(strconv.Itoa(i))
+				}
+				return err
+			}
+		}
+
+	}
+
+	if len(res) > 0 {
+		return errors.CompositeValidationError(res...)
+	}
+	return nil
+}

+ 201 - 0
pkg/models/add_signals_request_item_decisions_item.go

@@ -0,0 +1,201 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+	"context"
+
+	"github.com/go-openapi/errors"
+	"github.com/go-openapi/strfmt"
+	"github.com/go-openapi/swag"
+	"github.com/go-openapi/validate"
+)
+
+// AddSignalsRequestItemDecisionsItem Decision
+//
+// swagger:model AddSignalsRequestItemDecisionsItem
+type AddSignalsRequestItemDecisionsItem struct {
+
+	// duration
+	// Required: true
+	Duration *string `json:"duration"`
+
+	// (only relevant for GET ops) the unique id
+	// Required: true
+	ID *int64 `json:"id"`
+
+	// the origin of the decision : cscli, crowdsec
+	// Required: true
+	Origin *string `json:"origin"`
+
+	// scenario
+	// Required: true
+	Scenario *string `json:"scenario"`
+
+	// the scope of decision : does it apply to an IP, a range, a username, etc
+	// Required: true
+	Scope *string `json:"scope"`
+
+	// simulated
+	Simulated bool `json:"simulated,omitempty"`
+
+	// the type of decision, might be 'ban', 'captcha' or something custom. Ignored when watcher (cscli/crowdsec) is pushing to APIL.
+	// Required: true
+	Type *string `json:"type"`
+
+	// until
+	Until string `json:"until,omitempty"`
+
+	// only relevant for LAPI->CAPI, ignored for cscli->LAPI and crowdsec->LAPI
+	// Read Only: true
+	UUID string `json:"uuid,omitempty"`
+
+	// the value of the decision scope : an IP, a range, a username, etc
+	// Required: true
+	Value *string `json:"value"`
+}
+
+// Validate validates this add signals request item decisions item
+func (m *AddSignalsRequestItemDecisionsItem) Validate(formats strfmt.Registry) error {
+	var res []error
+
+	if err := m.validateDuration(formats); err != nil {
+		res = append(res, err)
+	}
+
+	if err := m.validateID(formats); err != nil {
+		res = append(res, err)
+	}
+
+	if err := m.validateOrigin(formats); err != nil {
+		res = append(res, err)
+	}
+
+	if err := m.validateScenario(formats); err != nil {
+		res = append(res, err)
+	}
+
+	if err := m.validateScope(formats); err != nil {
+		res = append(res, err)
+	}
+
+	if err := m.validateType(formats); err != nil {
+		res = append(res, err)
+	}
+
+	if err := m.validateValue(formats); err != nil {
+		res = append(res, err)
+	}
+
+	if len(res) > 0 {
+		return errors.CompositeValidationError(res...)
+	}
+	return nil
+}
+
+func (m *AddSignalsRequestItemDecisionsItem) validateDuration(formats strfmt.Registry) error {
+
+	if err := validate.Required("duration", "body", m.Duration); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *AddSignalsRequestItemDecisionsItem) validateID(formats strfmt.Registry) error {
+
+	if err := validate.Required("id", "body", m.ID); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *AddSignalsRequestItemDecisionsItem) validateOrigin(formats strfmt.Registry) error {
+
+	if err := validate.Required("origin", "body", m.Origin); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *AddSignalsRequestItemDecisionsItem) validateScenario(formats strfmt.Registry) error {
+
+	if err := validate.Required("scenario", "body", m.Scenario); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *AddSignalsRequestItemDecisionsItem) validateScope(formats strfmt.Registry) error {
+
+	if err := validate.Required("scope", "body", m.Scope); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *AddSignalsRequestItemDecisionsItem) validateType(formats strfmt.Registry) error {
+
+	if err := validate.Required("type", "body", m.Type); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *AddSignalsRequestItemDecisionsItem) validateValue(formats strfmt.Registry) error {
+
+	if err := validate.Required("value", "body", m.Value); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// ContextValidate validate this add signals request item decisions item based on the context it is used
+func (m *AddSignalsRequestItemDecisionsItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+	var res []error
+
+	if err := m.contextValidateUUID(ctx, formats); err != nil {
+		res = append(res, err)
+	}
+
+	if len(res) > 0 {
+		return errors.CompositeValidationError(res...)
+	}
+	return nil
+}
+
+func (m *AddSignalsRequestItemDecisionsItem) contextValidateUUID(ctx context.Context, formats strfmt.Registry) error {
+
+	if err := validate.ReadOnly(ctx, "uuid", "body", string(m.UUID)); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// MarshalBinary interface implementation
+func (m *AddSignalsRequestItemDecisionsItem) MarshalBinary() ([]byte, error) {
+	if m == nil {
+		return nil, nil
+	}
+	return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *AddSignalsRequestItemDecisionsItem) UnmarshalBinary(b []byte) error {
+	var res AddSignalsRequestItemDecisionsItem
+	if err := swag.ReadJSON(b, &res); err != nil {
+		return err
+	}
+	*m = res
+	return nil
+}

+ 109 - 0
pkg/models/add_signals_request_item_source.go

@@ -0,0 +1,109 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+	"context"
+
+	"github.com/go-openapi/errors"
+	"github.com/go-openapi/strfmt"
+	"github.com/go-openapi/swag"
+	"github.com/go-openapi/validate"
+)
+
+// AddSignalsRequestItemSource Source
+//
+// swagger:model AddSignalsRequestItemSource
+type AddSignalsRequestItemSource struct {
+
+	// provided as a convenience when the source is an IP
+	AsName string `json:"as_name,omitempty"`
+
+	// provided as a convenience when the source is an IP
+	AsNumber string `json:"as_number,omitempty"`
+
+	// cn
+	Cn string `json:"cn,omitempty"`
+
+	// provided as a convenience when the source is an IP
+	IP string `json:"ip,omitempty"`
+
+	// latitude
+	Latitude float32 `json:"latitude,omitempty"`
+
+	// longitude
+	Longitude float32 `json:"longitude,omitempty"`
+
+	// provided as a convenience when the source is an IP
+	Range string `json:"range,omitempty"`
+
+	// the scope of a source : ip,range,username,etc
+	// Required: true
+	Scope *string `json:"scope"`
+
+	// the value of a source : the ip, the range, the username,etc
+	// Required: true
+	Value *string `json:"value"`
+}
+
+// Validate validates this add signals request item source
+func (m *AddSignalsRequestItemSource) Validate(formats strfmt.Registry) error {
+	var res []error
+
+	if err := m.validateScope(formats); err != nil {
+		res = append(res, err)
+	}
+
+	if err := m.validateValue(formats); err != nil {
+		res = append(res, err)
+	}
+
+	if len(res) > 0 {
+		return errors.CompositeValidationError(res...)
+	}
+	return nil
+}
+
+func (m *AddSignalsRequestItemSource) validateScope(formats strfmt.Registry) error {
+
+	if err := validate.Required("scope", "body", m.Scope); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *AddSignalsRequestItemSource) validateValue(formats strfmt.Registry) error {
+
+	if err := validate.Required("value", "body", m.Value); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// ContextValidate validates this add signals request item source based on context it is used
+func (m *AddSignalsRequestItemSource) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+	return nil
+}
+
+// MarshalBinary interface implementation
+func (m *AddSignalsRequestItemSource) MarshalBinary() ([]byte, error) {
+	if m == nil {
+		return nil, nil
+	}
+	return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *AddSignalsRequestItemSource) UnmarshalBinary(b []byte) error {
+	var res AddSignalsRequestItemSource
+	if err := swag.ReadJSON(b, &res); err != nil {
+		return err
+	}
+	*m = res
+	return nil
+}

+ 18 - 1
pkg/models/alert.go

@@ -50,7 +50,7 @@ type Alert struct {
 	// Required: true
 	// Required: true
 	Leakspeed *string `json:"leakspeed"`
 	Leakspeed *string `json:"leakspeed"`
 
 
-	// only relevant for APIL->APIC, ignored for cscli->APIL and crowdsec->APIL
+	// only relevant for LAPI->CAPI, ignored for cscli->LAPI and crowdsec->LAPI
 	// Read Only: true
 	// Read Only: true
 	MachineID string `json:"machine_id,omitempty"`
 	MachineID string `json:"machine_id,omitempty"`
 
 
@@ -91,6 +91,10 @@ type Alert struct {
 	// stop at
 	// stop at
 	// Required: true
 	// Required: true
 	StopAt *string `json:"stop_at"`
 	StopAt *string `json:"stop_at"`
+
+	// only relevant for LAPI->CAPI, ignored for cscli->LAPI and crowdsec->LAPI
+	// Read Only: true
+	UUID string `json:"uuid,omitempty"`
 }
 }
 
 
 // Validate validates this alert
 // Validate validates this alert
@@ -371,6 +375,10 @@ func (m *Alert) ContextValidate(ctx context.Context, formats strfmt.Registry) er
 		res = append(res, err)
 		res = append(res, err)
 	}
 	}
 
 
+	if err := m.contextValidateUUID(ctx, formats); err != nil {
+		res = append(res, err)
+	}
+
 	if len(res) > 0 {
 	if len(res) > 0 {
 		return errors.CompositeValidationError(res...)
 		return errors.CompositeValidationError(res...)
 	}
 	}
@@ -474,6 +482,15 @@ func (m *Alert) contextValidateSource(ctx context.Context, formats strfmt.Regist
 	return nil
 	return nil
 }
 }
 
 
+func (m *Alert) contextValidateUUID(ctx context.Context, formats strfmt.Registry) error {
+
+	if err := validate.ReadOnly(ctx, "uuid", "body", string(m.UUID)); err != nil {
+		return err
+	}
+
+	return nil
+}
+
 // MarshalBinary interface implementation
 // MarshalBinary interface implementation
 func (m *Alert) MarshalBinary() ([]byte, error) {
 func (m *Alert) MarshalBinary() ([]byte, error) {
 	if m == nil {
 	if m == nil {

+ 17 - 0
pkg/models/decision.go

@@ -50,6 +50,10 @@ type Decision struct {
 	// the date until the decisions must be active
 	// the date until the decisions must be active
 	Until string `json:"until,omitempty"`
 	Until string `json:"until,omitempty"`
 
 
+	// only relevant for LAPI->CAPI, ignored for cscli->LAPI and crowdsec->LAPI
+	// Read Only: true
+	UUID string `json:"uuid,omitempty"`
+
 	// the value of the decision scope : an IP, a range, a username, etc
 	// the value of the decision scope : an IP, a range, a username, etc
 	// Required: true
 	// Required: true
 	Value *string `json:"value"`
 	Value *string `json:"value"`
@@ -155,6 +159,10 @@ func (m *Decision) ContextValidate(ctx context.Context, formats strfmt.Registry)
 		res = append(res, err)
 		res = append(res, err)
 	}
 	}
 
 
+	if err := m.contextValidateUUID(ctx, formats); err != nil {
+		res = append(res, err)
+	}
+
 	if len(res) > 0 {
 	if len(res) > 0 {
 		return errors.CompositeValidationError(res...)
 		return errors.CompositeValidationError(res...)
 	}
 	}
@@ -179,6 +187,15 @@ func (m *Decision) contextValidateSimulated(ctx context.Context, formats strfmt.
 	return nil
 	return nil
 }
 }
 
 
+func (m *Decision) contextValidateUUID(ctx context.Context, formats strfmt.Registry) error {
+
+	if err := validate.ReadOnly(ctx, "uuid", "body", string(m.UUID)); err != nil {
+		return err
+	}
+
+	return nil
+}
+
 // MarshalBinary interface implementation
 // MarshalBinary interface implementation
 func (m *Decision) MarshalBinary() ([]byte, error) {
 func (m *Decision) MarshalBinary() ([]byte, error) {
 	if m == nil {
 	if m == nil {

+ 67 - 0
pkg/models/decisions_delete_request.go

@@ -0,0 +1,67 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+	"context"
+	"strconv"
+
+	"github.com/go-openapi/errors"
+	"github.com/go-openapi/strfmt"
+)
+
+// DecisionsDeleteRequest delete decisions
+//
+// delete decision model
+//
+// swagger:model DecisionsDeleteRequest
+type DecisionsDeleteRequest []DecisionsDeleteRequestItem
+
+// Validate validates this decisions delete request
+func (m DecisionsDeleteRequest) Validate(formats strfmt.Registry) error {
+	var res []error
+
+	for i := 0; i < len(m); i++ {
+
+		if err := m[i].Validate(formats); err != nil {
+			if ve, ok := err.(*errors.Validation); ok {
+				return ve.ValidateName(strconv.Itoa(i))
+			} else if ce, ok := err.(*errors.CompositeError); ok {
+				return ce.ValidateName(strconv.Itoa(i))
+			}
+			return err
+		}
+
+	}
+
+	if len(res) > 0 {
+		return errors.CompositeValidationError(res...)
+	}
+	return nil
+}
+
+// ContextValidate validate this decisions delete request based on the context it is used
+func (m DecisionsDeleteRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+	var res []error
+
+	for i := 0; i < len(m); i++ {
+
+		if err := m[i].ContextValidate(ctx, formats); err != nil {
+			if ve, ok := err.(*errors.Validation); ok {
+				return ve.ValidateName(strconv.Itoa(i))
+			} else if ce, ok := err.(*errors.CompositeError); ok {
+				return ce.ValidateName(strconv.Itoa(i))
+			}
+			return err
+		}
+
+	}
+
+	if len(res) > 0 {
+		return errors.CompositeValidationError(res...)
+	}
+	return nil
+}

+ 27 - 0
pkg/models/decisions_delete_request_item.go

@@ -0,0 +1,27 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+	"context"
+
+	"github.com/go-openapi/strfmt"
+)
+
+// DecisionsDeleteRequestItem decisionsIDs
+//
+// swagger:model DecisionsDeleteRequestItem
+type DecisionsDeleteRequestItem string
+
+// Validate validates this decisions delete request item
+func (m DecisionsDeleteRequestItem) Validate(formats strfmt.Registry) error {
+	return nil
+}
+
+// ContextValidate validates this decisions delete request item based on context it is used
+func (m DecisionsDeleteRequestItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+	return nil
+}

+ 9 - 41
pkg/models/localapi_swagger.yaml

@@ -733,8 +733,12 @@ definitions:
         description: 'only relevant for GET, ignored in POST requests'
         description: 'only relevant for GET, ignored in POST requests'
         type: integer
         type: integer
         readOnly: true
         readOnly: true
+      uuid:
+        description: 'only relevant for LAPI->CAPI, ignored for cscli->LAPI and crowdsec->LAPI'
+        type: string
+        readOnly: true
       machine_id:
       machine_id:
-        description: 'only relevant for APIL->APIC, ignored for cscli->APIL and crowdsec->APIL'
+        description: 'only relevant for LAPI->CAPI, ignored for cscli->LAPI and crowdsec->LAPI'
         type: string
         type: string
         readOnly: true
         readOnly: true
       created_at:
       created_at:
@@ -890,6 +894,10 @@ definitions:
         description: (only relevant for GET ops) the unique id
         description: (only relevant for GET ops) the unique id
         type: integer
         type: integer
         readOnly: true
         readOnly: true
+      uuid:
+        description: 'only relevant for LAPI->CAPI, ignored for cscli->LAPI and crowdsec->LAPI'
+        type: string
+        readOnly: true
       origin:
       origin:
         description: 'the origin of the decision : cscli, crowdsec'
         description: 'the origin of the decision : cscli, crowdsec'
         type: string
         type: string
@@ -999,46 +1007,6 @@ definitions:
         description: "more detail on individual errors"
         description: "more detail on individual errors"
     title: "error response"
     title: "error response"
     description: "error response return by the API"
     description: "error response return by the API"
-  AddSignalsRequest:
-    title: "add signals request"
-    type: "array"
-    description: "All signals request model"
-    items:
-      $ref: "#/definitions/AddSignalsRequestItem"
-  AddSignalsRequestItem:
-    type: "object"
-    required:
-    - "message"
-    - "scenario"
-    - "scenario_hash"
-    - "scenario_version"
-    - "source"
-    - "start_at"
-    - "stop_at"
-    - "scenario_trust"
-    properties:
-      scenario_hash:
-        type: "string"
-      scenario:
-        type: "string"
-      created_at:
-        type: "string"
-      machine_id:
-        type: "string"
-      source:
-        $ref: "#/definitions/Source"
-      scenario_version:
-        type: "string"
-      scenario_trust:
-        type: "string"
-      message:
-        type: "string"
-        description: "a human readable message"
-      start_at:
-        type: "string"
-      stop_at:
-        type: "string"
-    title: "Signal"
 tags:
 tags:
   - name: bouncers
   - name: bouncers
     description: 'Operations about decisions : bans, captcha, rate-limit etc.'
     description: 'Operations about decisions : bans, captcha, rate-limit etc.'

+ 21 - 0
pkg/types/constants.go

@@ -3,3 +3,24 @@ package types
 const ApiKeyAuthType = "api-key"
 const ApiKeyAuthType = "api-key"
 const TlsAuthType = "tls"
 const TlsAuthType = "tls"
 const PasswordAuthType = "password"
 const PasswordAuthType = "password"
+
+const PAPIBaseURL = "https://papi.crowdsec.net/v1/decisions/stream/poll"
+const CAPIBaseURL = "https://api.crowdsec.net/"
+
+const CscliOrigin = "cscli"
+const CrowdSecOrigin = "crowdsec"
+const ConsoleOrigin = "console"
+const CscliImportOrigin = "cscli-import"
+const ListOrigin = "lists"
+const CAPIOrigin = "CAPI"
+
+func GetOrigins() []string {
+	return []string{
+		CscliOrigin,
+		CrowdSecOrigin,
+		ConsoleOrigin,
+		CscliImportOrigin,
+		ListOrigin,
+		CAPIOrigin,
+	}
+}