Compare commits
43 commits
master
...
lapi_to_ca
Author | SHA1 | Date | |
---|---|---|---|
|
42a57e0a5f | ||
|
d50ea5f994 | ||
|
40103275df | ||
|
ff4f653fc8 | ||
|
7691fbef62 | ||
|
774a8a681a | ||
|
0917d82340 | ||
|
42f1802a9b | ||
|
52cb64e9f5 | ||
|
708e7b5d8a | ||
|
8a485b09f2 | ||
|
816edfa007 | ||
|
0b4d812b90 | ||
|
03a058dff6 | ||
|
3e8ce3ce19 | ||
|
23f7499836 | ||
|
f0cee89211 | ||
|
21d279c66b | ||
|
ed010fe7d6 | ||
|
e41a334af4 | ||
|
9acb0e7e7e | ||
|
e96a267144 | ||
|
21f83b6a0f | ||
|
3046181316 | ||
|
45df3c9526 | ||
|
50a065dc5d | ||
|
5d6422b8fa | ||
|
7432c54254 | ||
|
8994e57bdc | ||
|
64123b7249 | ||
|
7af17baa8a | ||
|
28f13e4b82 | ||
|
11b1843070 | ||
|
ab141f7f3a | ||
|
553e833d75 | ||
|
0c32f2574b | ||
|
dd9dca624b | ||
|
04f7dbd1f5 | ||
|
908b04028e | ||
|
1372c49505 | ||
|
03dbce1e50 | ||
|
2c6a279b9b | ||
|
52edfba2b3 |
37 changed files with 1408 additions and 156 deletions
|
@ -2,17 +2,59 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/apiclient"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/database"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/models"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func IsInSlice(a string, b []string) bool {
|
||||
for _, v := range b {
|
||||
if a == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func FetchScenariosListFromDB() ([]string, error) {
|
||||
scenarios := make([]string, 0)
|
||||
machines, err := dbClient.ListMachines()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("while listing machines: %s", err)
|
||||
}
|
||||
//merge all scenarios together
|
||||
for _, v := range machines {
|
||||
machineScenarios := strings.Split(v.Scenarios, ",")
|
||||
log.Debugf("%d scenarios for machine %d", len(machineScenarios), v.ID)
|
||||
for _, sv := range machineScenarios {
|
||||
if !IsInSlice(sv, scenarios) && sv != "" {
|
||||
scenarios = append(scenarios, sv)
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Debugf("Returning list of scenarios : %+v", scenarios)
|
||||
return scenarios, nil
|
||||
}
|
||||
|
||||
func NewConsoleCmd() *cobra.Command {
|
||||
var cmdConsole = &cobra.Command{
|
||||
Use: "console [action]",
|
||||
|
@ -20,12 +62,24 @@ func NewConsoleCmd() *cobra.Command {
|
|||
Args: cobra.MinimumNArgs(1),
|
||||
DisableAutoGenTag: true,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
|
||||
if err := csConfig.LoadAPIServer(); err != nil {
|
||||
var fdErr *fs.PathError
|
||||
if errors.As(err, &fdErr) {
|
||||
log.Fatalf("Unable to load Local API : %s", fdErr)
|
||||
} else {
|
||||
log.Fatalf("Unable to load required Local API Configuration : %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if csConfig.DisableAPI {
|
||||
log.Fatal("Local API is disabled, please run this command on the local API machine")
|
||||
}
|
||||
if csConfig.API.Server.OnlineClient == nil {
|
||||
log.Fatalf("no configuration for Central API (CAPI) in '%s'", *csConfig.FilePath)
|
||||
}
|
||||
if csConfig.API.Server.OnlineClient.Credentials == nil {
|
||||
log.Fatal("You must configure Central API (CAPI) with `cscli capi register` before enrolling your instance")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
|
@ -39,21 +93,10 @@ Enroll this instance to https://app.crowdsec.net
|
|||
|
||||
You can get your enrollment key by creating an account on https://app.crowdsec.net.
|
||||
After running this command your will need to validate the enrollment in the webapp.`,
|
||||
Example: "cscli console enroll YOUR-ENROLL-KEY",
|
||||
Example: `cscli console enroll YOUR-ENROLL-KEY
|
||||
`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
DisableAutoGenTag: true,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI {
|
||||
log.Fatal("Local API is disabled, please run this command on the local API machine")
|
||||
}
|
||||
if csConfig.API.Server.OnlineClient == nil {
|
||||
log.Fatalf("no configuration for Central API (CAPI) in '%s'", *csConfig.FilePath)
|
||||
}
|
||||
if csConfig.API.Server.OnlineClient.Credentials == nil {
|
||||
log.Fatal("You must configure Central API (CAPI) with `cscli capi register` before enrolling your instance")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
password := strfmt.Password(csConfig.API.Server.OnlineClient.Credentials.Password)
|
||||
apiURL, err := url.Parse(csConfig.API.Server.OnlineClient.Credentials.URL)
|
||||
|
@ -94,7 +137,314 @@ After running this command your will need to validate the enrollment in the weba
|
|||
log.Infof("Watcher successfully enrolled. Visit https://app.crowdsec.net to accept it.")
|
||||
},
|
||||
}
|
||||
|
||||
cmdConsole.AddCommand(cmdEnroll)
|
||||
|
||||
var enableAll, disableAll bool
|
||||
|
||||
cmdEnable := &cobra.Command{
|
||||
Use: "enable [feature-flag]",
|
||||
Short: "Enable a feature flag",
|
||||
Example: "enable alerts-tainted",
|
||||
Long: `
|
||||
Enable given information push to the central API. Allows to empower the console`,
|
||||
ValidArgs: csconfig.CONSOLE_CONFIGS,
|
||||
DisableAutoGenTag: true,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if enableAll {
|
||||
SetConsoleOpts(csconfig.CONSOLE_CONFIGS, true)
|
||||
} else {
|
||||
SetConsoleOpts(args, true)
|
||||
}
|
||||
|
||||
if err := csConfig.API.Server.DumpConsoleConfig(); err != nil {
|
||||
log.Fatalf("failed writing console config : %s", err)
|
||||
}
|
||||
if enableAll {
|
||||
log.Infof("All features have been enabled successfully")
|
||||
} else {
|
||||
log.Infof("%v have been enabled", args)
|
||||
}
|
||||
log.Infof(ReloadMessage())
|
||||
},
|
||||
}
|
||||
cmdEnable.Flags().BoolVarP(&enableAll, "all", "a", false, "Enable all feature flags")
|
||||
cmdConsole.AddCommand(cmdEnable)
|
||||
|
||||
cmdDisable := &cobra.Command{
|
||||
Use: "disable [feature-flag]",
|
||||
Short: "Disable a feature flag",
|
||||
Example: "disable alerts-tainted",
|
||||
Long: `
|
||||
Disable given information push to the central API.`,
|
||||
ValidArgs: csconfig.CONSOLE_CONFIGS,
|
||||
DisableAutoGenTag: true,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if disableAll {
|
||||
SetConsoleOpts(csconfig.CONSOLE_CONFIGS, false)
|
||||
} else {
|
||||
SetConsoleOpts(args, false)
|
||||
}
|
||||
|
||||
if err := csConfig.API.Server.DumpConsoleConfig(); err != nil {
|
||||
log.Fatalf("failed writing console config : %s", err)
|
||||
}
|
||||
if disableAll {
|
||||
log.Infof("All features have been disabled")
|
||||
} else {
|
||||
log.Infof("%v have been disabled", args)
|
||||
}
|
||||
log.Infof(ReloadMessage())
|
||||
},
|
||||
}
|
||||
cmdDisable.Flags().BoolVarP(&disableAll, "all", "a", false, "Enable all feature flags")
|
||||
cmdConsole.AddCommand(cmdDisable)
|
||||
|
||||
cmdConsoleStatus := &cobra.Command{
|
||||
Use: "status [feature-flag]",
|
||||
Short: "Shows status of one or all feature flags",
|
||||
Example: "status alerts-tainted",
|
||||
DisableAutoGenTag: true,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
switch csConfig.Cscli.Output {
|
||||
case "human":
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
|
||||
table.SetHeaderAlignment(tablewriter.ALIGN_LEFT)
|
||||
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
||||
table.SetHeader([]string{"Option Name", "Activated", "Description"})
|
||||
for _, option := range csconfig.CONSOLE_CONFIGS {
|
||||
switch option {
|
||||
case csconfig.SEND_CUSTOM_SCENARIOS:
|
||||
activated := fmt.Sprintf("%s", emoji.CrossMark)
|
||||
if *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios == true {
|
||||
activated = fmt.Sprintf("%s", emoji.CheckMarkButton)
|
||||
}
|
||||
table.Append([]string{option, activated, "Send alerts from custom scenarios to the console"})
|
||||
case csconfig.SEND_MANUAL_SCENARIOS:
|
||||
activated := fmt.Sprintf("%s", emoji.CrossMark)
|
||||
if *csConfig.API.Server.ConsoleConfig.ShareManualDecisions == true {
|
||||
activated = fmt.Sprintf("%s", emoji.CheckMarkButton)
|
||||
}
|
||||
table.Append([]string{option, activated, "Send manual decisions to the console"})
|
||||
case csconfig.SEND_TAINTED_SCENARIOS:
|
||||
activated := fmt.Sprintf("%s", emoji.CrossMark)
|
||||
if *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios == true {
|
||||
activated = fmt.Sprintf("%s", emoji.CheckMarkButton)
|
||||
}
|
||||
table.Append([]string{option, activated, "Send alerts from tainted scenarios to the console"})
|
||||
case csconfig.SEND_SIMULATED_DECISIONS:
|
||||
activated := fmt.Sprintf("%s", emoji.CrossMark)
|
||||
if *csConfig.API.Server.ConsoleConfig.ShareSimulatedDecisions == true {
|
||||
activated = fmt.Sprintf("%s", emoji.CheckMarkButton)
|
||||
}
|
||||
table.Append([]string{option, activated, "Send alerts from scenarios in simulation mode to the console"})
|
||||
}
|
||||
}
|
||||
table.Render()
|
||||
case "json":
|
||||
data, err := json.MarshalIndent(csConfig.API.Server.ConsoleConfig, "", " ")
|
||||
if err != nil {
|
||||
log.Fatalf("failed to marshal configuration: %s", err)
|
||||
}
|
||||
fmt.Printf("%s\n", string(data))
|
||||
case "raw":
|
||||
data, err := yaml.Marshal(csConfig.API.Server.ConsoleConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to marshal configuration: %s", err)
|
||||
}
|
||||
fmt.Printf("%s\n", string(data))
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
cmdConsole.AddCommand(cmdConsoleStatus)
|
||||
|
||||
cmdConsoleSync := &cobra.Command{
|
||||
Use: "sync",
|
||||
Short: "Sync current decisions to console",
|
||||
DisableAutoGenTag: true,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var err error
|
||||
if err := csConfig.LoadDBConfig(); err != nil {
|
||||
log.Errorf("This command requires direct database access (must be run on the local API machine)")
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
dbClient, err = database.NewClient(csConfig.DbConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("unable to create new database client: %s", err)
|
||||
}
|
||||
|
||||
password := strfmt.Password(csConfig.API.Server.OnlineClient.Credentials.Password)
|
||||
apiurl, err := url.Parse(csConfig.API.Server.OnlineClient.Credentials.URL)
|
||||
if err != nil {
|
||||
log.Fatalf("parsing api url ('%s'): %s", csConfig.API.Server.OnlineClient.Credentials.URL, err)
|
||||
}
|
||||
|
||||
if err := csConfig.LoadHub(); err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
if err := cwhub.GetHubIdx(csConfig.Hub); err != nil {
|
||||
log.Fatalf("Failed to load hub index : %s", err)
|
||||
log.Infoln("Run 'sudo cscli hub update' to get the hub index")
|
||||
}
|
||||
scenarios, err := cwhub.GetUpstreamInstalledScenariosAsString()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to get scenarios : %s", err.Error())
|
||||
}
|
||||
if len(scenarios) == 0 {
|
||||
log.Fatalf("no scenarios installed, abort")
|
||||
}
|
||||
|
||||
Client, err = apiclient.NewClient(&apiclient.Config{
|
||||
MachineID: csConfig.API.Server.OnlineClient.Credentials.Login,
|
||||
Password: password,
|
||||
UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()),
|
||||
URL: apiurl,
|
||||
VersionPrefix: "v2",
|
||||
Scenarios: scenarios,
|
||||
UpdateScenario: FetchScenariosListFromDB,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("init default client: %s", err)
|
||||
}
|
||||
|
||||
filter := make(map[string][]string)
|
||||
filter["has_active_decision"] = []string{"true"}
|
||||
alertsWithDecisions, err := dbClient.QueryAlertWithFilter(filter)
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
alertList := make([]*models.Alert, 0)
|
||||
for _, alert := range alertsWithDecisions {
|
||||
startAt := alert.StartedAt.String()
|
||||
StopAt := alert.StoppedAt.String()
|
||||
formatedAlert := models.Alert{
|
||||
ID: int64(alert.ID),
|
||||
MachineID: machineID,
|
||||
CreatedAt: alert.CreatedAt.Format(time.RFC3339),
|
||||
Scenario: &alert.Scenario,
|
||||
ScenarioVersion: &alert.ScenarioVersion,
|
||||
ScenarioHash: &alert.ScenarioHash,
|
||||
Message: &alert.Message,
|
||||
EventsCount: &alert.EventsCount,
|
||||
StartAt: &startAt,
|
||||
StopAt: &StopAt,
|
||||
Capacity: &alert.Capacity,
|
||||
Leakspeed: &alert.LeakSpeed,
|
||||
Simulated: &alert.Simulated,
|
||||
Source: &models.Source{
|
||||
Scope: &alert.SourceScope,
|
||||
Value: &alert.SourceValue,
|
||||
IP: alert.SourceIp,
|
||||
Range: alert.SourceRange,
|
||||
AsNumber: alert.SourceAsNumber,
|
||||
AsName: alert.SourceAsName,
|
||||
Cn: alert.SourceCountry,
|
||||
Latitude: alert.SourceLatitude,
|
||||
Longitude: alert.SourceLongitude,
|
||||
},
|
||||
}
|
||||
for _, decisionItem := range alert.Edges.Decisions {
|
||||
duration := decisionItem.Until.Sub(time.Now()).String()
|
||||
formatedAlert.Decisions = append(formatedAlert.Decisions, &models.Decision{
|
||||
Duration: &duration, // transform into time.Time ?
|
||||
Scenario: &decisionItem.Scenario,
|
||||
Type: &decisionItem.Type,
|
||||
Scope: &decisionItem.Scope,
|
||||
Value: &decisionItem.Value,
|
||||
Origin: &decisionItem.Origin,
|
||||
Simulated: formatedAlert.Simulated,
|
||||
ID: int64(decisionItem.ID),
|
||||
})
|
||||
}
|
||||
alertList = append(alertList, &formatedAlert)
|
||||
}
|
||||
_, _, err = Client.Decisions.SyncDecisions(context.Background(), alertList)
|
||||
if err != nil {
|
||||
log.Fatalf("unable to sync decisions with console: %s", err.Error())
|
||||
}
|
||||
log.Infof("Decisions have been synchronized successfully")
|
||||
},
|
||||
}
|
||||
|
||||
cmdConsole.AddCommand(cmdConsoleSync)
|
||||
|
||||
return cmdConsole
|
||||
}
|
||||
|
||||
func SetConsoleOpts(args []string, wanted bool) {
|
||||
for _, arg := range args {
|
||||
switch arg {
|
||||
case csconfig.SEND_CUSTOM_SCENARIOS:
|
||||
/*for each flag check if it's already set before setting it*/
|
||||
if csConfig.API.Server.ConsoleConfig.ShareCustomScenarios != nil {
|
||||
if *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios == wanted {
|
||||
log.Infof("%s already set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted)
|
||||
} else {
|
||||
log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted)
|
||||
*csConfig.API.Server.ConsoleConfig.ShareCustomScenarios = wanted
|
||||
}
|
||||
} else {
|
||||
log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted)
|
||||
csConfig.API.Server.ConsoleConfig.ShareCustomScenarios = types.BoolPtr(wanted)
|
||||
}
|
||||
case csconfig.SEND_TAINTED_SCENARIOS:
|
||||
/*for each flag check if it's already set before setting it*/
|
||||
if csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios != nil {
|
||||
if *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios == wanted {
|
||||
log.Infof("%s already set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted)
|
||||
} else {
|
||||
log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted)
|
||||
*csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios = wanted
|
||||
}
|
||||
} else {
|
||||
log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted)
|
||||
csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios = types.BoolPtr(wanted)
|
||||
}
|
||||
case csconfig.SEND_MANUAL_SCENARIOS:
|
||||
/*for each flag check if it's already set before setting it*/
|
||||
if csConfig.API.Server.ConsoleConfig.ShareManualDecisions != nil {
|
||||
if *csConfig.API.Server.ConsoleConfig.ShareManualDecisions == wanted {
|
||||
log.Infof("%s already set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted)
|
||||
} else {
|
||||
log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted)
|
||||
*csConfig.API.Server.ConsoleConfig.ShareManualDecisions = wanted
|
||||
}
|
||||
} else {
|
||||
log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted)
|
||||
csConfig.API.Server.ConsoleConfig.ShareManualDecisions = types.BoolPtr(wanted)
|
||||
}
|
||||
case csconfig.SEND_LIVE_DECISIONS:
|
||||
/*for each flag check if it's already set before setting it*/
|
||||
if csConfig.API.Server.ConsoleConfig.ShareDecisions != nil {
|
||||
if *csConfig.API.Server.ConsoleConfig.ShareDecisions == wanted {
|
||||
log.Infof("%s already set to %t", csconfig.SEND_LIVE_DECISIONS, wanted)
|
||||
} else {
|
||||
log.Infof("%s set to %t", csconfig.SEND_LIVE_DECISIONS, wanted)
|
||||
*csConfig.API.Server.ConsoleConfig.ShareDecisions = wanted
|
||||
}
|
||||
} else {
|
||||
log.Infof("%s set to %t", csconfig.SEND_LIVE_DECISIONS, wanted)
|
||||
csConfig.API.Server.ConsoleConfig.ShareDecisions = types.BoolPtr(wanted)
|
||||
}
|
||||
case csconfig.SEND_SIMULATED_DECISIONS:
|
||||
/*for each flag check if it's already set before setting it*/
|
||||
if csConfig.API.Server.ConsoleConfig.ShareSimulatedDecisions != nil {
|
||||
if *csConfig.API.Server.ConsoleConfig.ShareSimulatedDecisions == wanted {
|
||||
log.Infof("%s already set to %t", csconfig.SEND_SIMULATED_DECISIONS, wanted)
|
||||
} else {
|
||||
log.Infof("%s set to %t", csconfig.SEND_SIMULATED_DECISIONS, wanted)
|
||||
*csConfig.API.Server.ConsoleConfig.ShareSimulatedDecisions = wanted
|
||||
}
|
||||
} else {
|
||||
log.Infof("%s set to %t", csconfig.SEND_SIMULATED_DECISIONS, wanted)
|
||||
csConfig.API.Server.ConsoleConfig.ShareSimulatedDecisions = types.BoolPtr(wanted)
|
||||
}
|
||||
default:
|
||||
log.Fatalf("unknown flag %s", arg)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,8 +26,7 @@ func DecisionsToTable(alerts *models.GetAlertsResponse) error {
|
|||
/*here we cheat a bit : to make it more readable for the user, we dedup some entries*/
|
||||
var spamLimit map[string]bool = make(map[string]bool)
|
||||
|
||||
/*process in reverse order to keep the latest item only*/
|
||||
for aIdx := len(*alerts) - 1; aIdx >= 0; aIdx-- {
|
||||
for aIdx := 0; aIdx < len(*alerts); aIdx++ {
|
||||
alertItem := (*alerts)[aIdx]
|
||||
newDecisions := make([]*models.Decision, 0)
|
||||
for _, decisionItem := range alertItem.Decisions {
|
||||
|
@ -227,7 +226,6 @@ cscli decisions list -t ban
|
|||
if err != nil {
|
||||
log.Fatalf("Unable to list decisions : %v", err.Error())
|
||||
}
|
||||
|
||||
err = DecisionsToTable(alerts)
|
||||
if err != nil {
|
||||
log.Fatalf("unable to list decisions : %v", err.Error())
|
||||
|
@ -273,7 +271,7 @@ cscli decisions add --scope username --value foobar
|
|||
DisableAutoGenTag: true,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var err error
|
||||
var ip, ipRange string
|
||||
var ipRange string
|
||||
alerts := models.AddAlertsRequest{}
|
||||
origin := "cscli"
|
||||
capacity := int32(0)
|
||||
|
@ -329,7 +327,7 @@ cscli decisions add --scope username --value foobar
|
|||
AsName: empty,
|
||||
AsNumber: empty,
|
||||
Cn: empty,
|
||||
IP: ip,
|
||||
IP: addValue,
|
||||
Range: ipRange,
|
||||
Scope: &addScope,
|
||||
Value: &addValue,
|
||||
|
|
|
@ -41,6 +41,7 @@ api:
|
|||
log_level: info
|
||||
listen_uri: 127.0.0.1:8080
|
||||
profiles_path: /etc/crowdsec/profiles.yaml
|
||||
console_path: /etc/crowdsec/console_config.yaml
|
||||
online_client: # Central API credentials (to push signals and receive bad IPs)
|
||||
credentials_path: /etc/crowdsec/online_api_credentials.yaml
|
||||
# tls:
|
||||
|
|
4
config/console_config.yaml
Normal file
4
config/console_config.yaml
Normal file
|
@ -0,0 +1,4 @@
|
|||
share_manual_decisions: false
|
||||
share_custom: false
|
||||
share_tainted: false
|
||||
share_decisions: false
|
1
debian/rules
vendored
1
debian/rules
vendored
|
@ -47,4 +47,5 @@ override_dh_auto_install:
|
|||
cp config/config.yaml debian/crowdsec/etc/crowdsec/config.yaml
|
||||
cp config/simulation.yaml debian/crowdsec/etc/crowdsec/simulation.yaml
|
||||
cp config/profiles.yaml debian/crowdsec/etc/crowdsec/profiles.yaml
|
||||
cp config/console_config.yaml debian/crowdsec/etc/crowdsec/console_config.yaml
|
||||
cp -a config/patterns debian/crowdsec/etc/crowdsec
|
||||
|
|
|
@ -230,7 +230,7 @@ func TestWatcherEnroll(t *testing.T) {
|
|||
|
||||
_, err = client.Auth.EnrollWatcher(context.Background(), "goodkey")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpect auth err: %s", err)
|
||||
t.Fatalf("unexpect enroll err: %s", err)
|
||||
}
|
||||
|
||||
_, err = client.Auth.EnrollWatcher(context.Background(), "badkey")
|
||||
|
|
|
@ -32,6 +32,10 @@ type DecisionsDeleteOpts struct {
|
|||
ListOpts
|
||||
}
|
||||
|
||||
type SuccessReponse struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
//to demo query arguments
|
||||
func (s *DecisionsService) List(ctx context.Context, opts DecisionsListOpts) (*models.GetDecisionsResponse, *Response, error) {
|
||||
var decisions models.GetDecisionsResponse
|
||||
|
@ -122,3 +126,35 @@ func (s *DecisionsService) DeleteOne(ctx context.Context, decision_id string) (*
|
|||
}
|
||||
return &deleteDecisionResponse, resp, nil
|
||||
}
|
||||
|
||||
// send to CAPI manually deleted decisions
|
||||
func (s *DecisionsService) DeleteManualDecisions(ctx context.Context, decisionsID []string) (*SuccessReponse, *Response, error) {
|
||||
var successReponse SuccessReponse
|
||||
u := fmt.Sprintf("%s/decisions/delete", s.client.URLPrefix)
|
||||
|
||||
req, err := s.client.NewRequest("POST", u, &decisionsID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
resp, err := s.client.Do(ctx, req, &successReponse)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return &successReponse, resp, nil
|
||||
}
|
||||
|
||||
func (s *DecisionsService) SyncDecisions(ctx context.Context, decisions []*models.Alert) (*SuccessReponse, *Response, error) {
|
||||
var successReponse SuccessReponse
|
||||
|
||||
u := fmt.Sprintf("%s/decisions/sync", s.client.URLPrefix)
|
||||
req, err := s.client.NewRequest("POST", u, &decisions)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
resp, err := s.client.Do(ctx, req, &successReponse)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &successReponse, resp, nil
|
||||
}
|
||||
|
|
|
@ -41,9 +41,12 @@ type apic struct {
|
|||
pushTomb tomb.Tomb
|
||||
pullTomb tomb.Tomb
|
||||
metricsTomb tomb.Tomb
|
||||
deleteDecisionsTomb tomb.Tomb
|
||||
startup bool
|
||||
credentials *csconfig.ApiCredentialsCfg
|
||||
scenarioList []string
|
||||
consoleConfig *csconfig.ConsoleConfig
|
||||
decisionsToDelete chan []string
|
||||
}
|
||||
|
||||
func IsInSlice(a string, b []string) bool {
|
||||
|
@ -75,8 +78,8 @@ func (a *apic) FetchScenariosListFromDB() ([]string, error) {
|
|||
return scenarios, nil
|
||||
}
|
||||
|
||||
func AlertToSignal(alert *models.Alert) *models.AddSignalsRequestItem {
|
||||
return &models.AddSignalsRequestItem{
|
||||
func AlertToSignal(alert *models.Alert, scenarioTrust string, keepDecisions bool) *models.AddSignalsRequestItem {
|
||||
signal := &models.AddSignalsRequestItem{
|
||||
Message: alert.Message,
|
||||
Scenario: alert.Scenario,
|
||||
ScenarioHash: alert.ScenarioHash,
|
||||
|
@ -86,10 +89,17 @@ func AlertToSignal(alert *models.Alert) *models.AddSignalsRequestItem {
|
|||
StopAt: alert.StopAt,
|
||||
CreatedAt: alert.CreatedAt,
|
||||
MachineID: alert.MachineID,
|
||||
ScenarioTrust: &scenarioTrust,
|
||||
AlertID: &alert.ID,
|
||||
}
|
||||
if keepDecisions {
|
||||
log.Debugf("Keeping decisions to send to CAPI")
|
||||
signal.Decisions = alert.Decisions
|
||||
}
|
||||
return signal
|
||||
}
|
||||
|
||||
func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client) (*apic, error) {
|
||||
func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig) (*apic, error) {
|
||||
var err error
|
||||
ret := &apic{
|
||||
alertToPush: make(chan []*models.Alert),
|
||||
|
@ -101,6 +111,8 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client) (*a
|
|||
pushTomb: tomb.Tomb{},
|
||||
metricsTomb: tomb.Tomb{},
|
||||
scenarioList: make([]string, 0),
|
||||
decisionsToDelete: make(chan []string),
|
||||
consoleConfig: consoleConfig,
|
||||
}
|
||||
|
||||
ret.pullInterval, err = time.ParseDuration(PullInterval)
|
||||
|
@ -149,6 +161,7 @@ func (a *apic) Push() error {
|
|||
case <-a.pushTomb.Dying(): // if one apic routine is dying, do we kill the others?
|
||||
a.pullTomb.Kill(nil)
|
||||
a.metricsTomb.Kill(nil)
|
||||
a.deleteDecisionsTomb.Kill(nil)
|
||||
log.Infof("push tomb is dying, sending cache (%d elements) before exiting", len(cache))
|
||||
if len(cache) == 0 {
|
||||
return nil
|
||||
|
@ -167,20 +180,42 @@ func (a *apic) Push() error {
|
|||
case alerts := <-a.alertToPush:
|
||||
var signals []*models.AddSignalsRequestItem
|
||||
for _, alert := range alerts {
|
||||
/*we're only interested into decisions coming from scenarios of the hub*/
|
||||
if alert.ScenarioHash == nil || *alert.ScenarioHash == "" {
|
||||
continue
|
||||
}
|
||||
/*and we're not interested into tainted scenarios neither*/
|
||||
if alert.ScenarioVersion == nil || *alert.ScenarioVersion == "" || *alert.ScenarioVersion == "?" {
|
||||
continue
|
||||
}
|
||||
/*we also ignore alerts in simulated mode*/
|
||||
if *alert.Simulated {
|
||||
log.Debugf("simulation enabled for alert (id:%d), will not be sent to CAPI", alert.ID)
|
||||
continue
|
||||
}
|
||||
signals = append(signals, AlertToSignal(alert))
|
||||
scenarioTrust := "certified"
|
||||
if alert.ScenarioHash == nil || *alert.ScenarioHash == "" {
|
||||
scenarioTrust = "custom"
|
||||
}
|
||||
if alert.ScenarioVersion == nil || *alert.ScenarioVersion == "" || *alert.ScenarioVersion == "?" {
|
||||
scenarioTrust = "tainted"
|
||||
}
|
||||
if len(alert.Decisions) > 0 {
|
||||
if *alert.Decisions[0].Origin == "cscli" {
|
||||
scenarioTrust = "manual"
|
||||
}
|
||||
}
|
||||
switch scenarioTrust {
|
||||
case "manual":
|
||||
if !*a.consoleConfig.ShareManualDecisions {
|
||||
log.Debugf("manual decision generated an alert, doesn't send it to CAPI because options is disabled")
|
||||
continue
|
||||
}
|
||||
case "tainted":
|
||||
if !*a.consoleConfig.ShareTaintedScenarios {
|
||||
log.Debugf("tainted scenario generated an alert, doesn't send it to CAPI because options is disabled")
|
||||
continue
|
||||
}
|
||||
case "custom":
|
||||
if !*a.consoleConfig.ShareCustomScenarios {
|
||||
log.Debugf("custom scenario generated an alert, doesn't send it to CAPI because options is disabled")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Add signals for '%s' alert", scenarioTrust)
|
||||
signals = append(signals, AlertToSignal(alert, scenarioTrust, *a.consoleConfig.ShareDecisions))
|
||||
}
|
||||
a.mu.Lock()
|
||||
cache = append(cache, signals...)
|
||||
|
@ -189,6 +224,57 @@ func (a *apic) Push() error {
|
|||
}
|
||||
}
|
||||
|
||||
func (a *apic) DeleteDecisions() error {
|
||||
defer types.CatchPanic("lapi/deleteDecisionsCAPI")
|
||||
|
||||
log.Infof("start goroutine for manual deleted decisions")
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-a.pushTomb.Dying(): // if one apic routine is dying, do we kill the others?
|
||||
a.pullTomb.Kill(nil)
|
||||
a.metricsTomb.Kill(nil)
|
||||
a.pushTomb.Kill(nil)
|
||||
return nil
|
||||
case decisions := <-a.decisionsToDelete:
|
||||
go a.SendDeletedDecisions(decisions)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *apic) SendDeletedDecisions(deletedDecisions []string) {
|
||||
var send []string
|
||||
|
||||
bulkSize := 50
|
||||
pageStart := 0
|
||||
pageEnd := bulkSize
|
||||
|
||||
for {
|
||||
|
||||
if pageEnd >= len(deletedDecisions) {
|
||||
send = deletedDecisions[pageStart:]
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
_, _, err := a.apiClient.Decisions.DeleteManualDecisions(ctx, send)
|
||||
if err != nil {
|
||||
log.Errorf("Error while sending final chunk to central API : %s", err)
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
send = deletedDecisions[pageStart:pageEnd]
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
_, _, err := a.apiClient.Decisions.DeleteManualDecisions(ctx, send)
|
||||
if err != nil {
|
||||
//we log it here as well, because the return value of func might be discarded
|
||||
log.Errorf("Error while sending chunk to central API : %s", err)
|
||||
}
|
||||
pageStart += bulkSize
|
||||
pageEnd += bulkSize
|
||||
}
|
||||
}
|
||||
|
||||
func (a *apic) Send(cacheOrig *models.AddSignalsRequest) {
|
||||
/*we do have a problem with this :
|
||||
The apic.Push background routine reads from alertToPush chan.
|
||||
|
@ -361,6 +447,7 @@ func (a *apic) Pull() error {
|
|||
case <-a.pullTomb.Dying(): // if one apic routine is dying, do we kill the others?
|
||||
a.metricsTomb.Kill(nil)
|
||||
a.pushTomb.Kill(nil)
|
||||
a.deleteDecisionsTomb.Kill(nil)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -377,8 +464,8 @@ func (a *apic) SendMetrics() error {
|
|||
version := cwversion.VersionStr()
|
||||
metric := &models.Metrics{
|
||||
ApilVersion: &version,
|
||||
Machines: make([]*models.MetricsSoftInfo, 0),
|
||||
Bouncers: make([]*models.MetricsSoftInfo, 0),
|
||||
Machines: make([]*models.MetricsAgentInfo, 0),
|
||||
Bouncers: make([]*models.MetricsBouncerInfo, 0),
|
||||
}
|
||||
machines, err := a.dbClient.ListMachines()
|
||||
if err != nil {
|
||||
|
@ -389,17 +476,21 @@ func (a *apic) SendMetrics() error {
|
|||
return err
|
||||
}
|
||||
for _, machine := range machines {
|
||||
m := &models.MetricsSoftInfo{
|
||||
m := &models.MetricsAgentInfo{
|
||||
Version: machine.Version,
|
||||
Name: machine.MachineId,
|
||||
LastUpdate: machine.UpdatedAt.String(),
|
||||
LastPush: machine.LastPush.String(),
|
||||
}
|
||||
metric.Machines = append(metric.Machines, m)
|
||||
}
|
||||
|
||||
for _, bouncer := range bouncers {
|
||||
m := &models.MetricsSoftInfo{
|
||||
m := &models.MetricsBouncerInfo{
|
||||
Version: bouncer.Version,
|
||||
CustomName: bouncer.Name,
|
||||
Name: bouncer.Type,
|
||||
LastPull: bouncer.LastPull.String(),
|
||||
}
|
||||
metric.Bouncers = append(metric.Bouncers, m)
|
||||
}
|
||||
|
@ -411,6 +502,7 @@ func (a *apic) SendMetrics() error {
|
|||
case <-a.metricsTomb.Dying(): // if one apic routine is dying, do we kill the others?
|
||||
a.pullTomb.Kill(nil)
|
||||
a.pushTomb.Kill(nil)
|
||||
a.deleteDecisionsTomb.Kill(nil)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ type APIServer struct {
|
|||
httpServer *http.Server
|
||||
apic *apic
|
||||
httpServerTomb tomb.Tomb
|
||||
consoleConfig *csconfig.ConsoleConfig
|
||||
}
|
||||
|
||||
// RecoveryWithWriter returns a middleware for a given writer that recovers from any panics and writes a 500 if there was one.
|
||||
|
@ -165,26 +166,34 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) {
|
|||
return
|
||||
})
|
||||
router.Use(CustomRecoveryWithWriter())
|
||||
|
||||
controller := &controllers.Controller{
|
||||
DBClient: dbClient,
|
||||
Ectx: context.Background(),
|
||||
Router: router,
|
||||
Profiles: config.Profiles,
|
||||
Log: clog,
|
||||
ConsoleConfig: config.ConsoleConfig,
|
||||
}
|
||||
|
||||
var apiClient *apic
|
||||
|
||||
if config.OnlineClient != nil && config.OnlineClient.Credentials != nil {
|
||||
log.Printf("Loading CAPI pusher")
|
||||
apiClient, err = NewAPIC(config.OnlineClient, dbClient)
|
||||
apiClient, err = NewAPIC(config.OnlineClient, dbClient, config.ConsoleConfig)
|
||||
if err != nil {
|
||||
return &APIServer{}, err
|
||||
}
|
||||
controller.CAPIChan = apiClient.alertToPush
|
||||
if *config.ConsoleConfig.ShareDecisions {
|
||||
controller.DeleteDecisionChannel = apiClient.decisionsToDelete
|
||||
} else {
|
||||
controller.DeleteDecisionChannel = nil
|
||||
}
|
||||
} else {
|
||||
apiClient = nil
|
||||
controller.CAPIChan = nil
|
||||
controller.DeleteDecisionChannel = nil
|
||||
}
|
||||
|
||||
return &APIServer{
|
||||
|
@ -197,6 +206,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) {
|
|||
router: router,
|
||||
apic: apiClient,
|
||||
httpServerTomb: tomb.Tomb{},
|
||||
consoleConfig: config.ConsoleConfig,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
@ -235,6 +245,15 @@ func (s *APIServer) Run() error {
|
|||
}
|
||||
return nil
|
||||
})
|
||||
if *s.apic.consoleConfig.ShareDecisions {
|
||||
s.apic.deleteDecisionsTomb.Go(func() error {
|
||||
if err := s.apic.DeleteDecisions(); err != nil {
|
||||
log.Errorf("capi send deleted decisions: %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
s.httpServerTomb.Go(func() error {
|
||||
|
|
|
@ -49,6 +49,13 @@ func LoadTestConfig() csconfig.Config {
|
|||
ListenURI: "http://127.0.0.1:8080",
|
||||
DbConfig: &dbconfig,
|
||||
ProfilesPath: "./tests/profiles.yaml",
|
||||
ConsoleConfig: &csconfig.ConsoleConfig{
|
||||
ShareManualDecisions: new(bool),
|
||||
ShareTaintedScenarios: new(bool),
|
||||
ShareCustomScenarios: new(bool),
|
||||
ShareDecisions: new(bool),
|
||||
ShareSimulatedDecisions: new(bool),
|
||||
},
|
||||
}
|
||||
apiConfig := csconfig.APICfg{
|
||||
Server: &apiServerConfig,
|
||||
|
@ -76,6 +83,13 @@ func LoadTestConfigForwardedFor() csconfig.Config {
|
|||
DbConfig: &dbconfig,
|
||||
ProfilesPath: "./tests/profiles.yaml",
|
||||
UseForwardedForHeaders: true,
|
||||
ConsoleConfig: &csconfig.ConsoleConfig{
|
||||
ShareManualDecisions: new(bool),
|
||||
ShareTaintedScenarios: new(bool),
|
||||
ShareCustomScenarios: new(bool),
|
||||
ShareDecisions: new(bool),
|
||||
ShareSimulatedDecisions: new(bool),
|
||||
},
|
||||
}
|
||||
apiConfig := csconfig.APICfg{
|
||||
Server: &apiServerConfig,
|
||||
|
|
|
@ -2,6 +2,8 @@ package controllers
|
|||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/alexliesenfeld/health"
|
||||
v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1"
|
||||
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
|
||||
|
@ -10,7 +12,6 @@ import (
|
|||
"github.com/crowdsecurity/crowdsec/pkg/models"
|
||||
"github.com/gin-gonic/gin"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
|
@ -21,6 +22,8 @@ type Controller struct {
|
|||
CAPIChan chan []*models.Alert
|
||||
PluginChannel chan csplugin.ProfileAlert
|
||||
Log *log.Logger
|
||||
ConsoleConfig *csconfig.ConsoleConfig
|
||||
DeleteDecisionChannel chan []string
|
||||
}
|
||||
|
||||
func (c *Controller) Init() error {
|
||||
|
@ -51,7 +54,7 @@ func serveHealth() http.HandlerFunc {
|
|||
}
|
||||
|
||||
func (c *Controller) NewV1() error {
|
||||
handlerV1, err := v1.New(c.DBClient, c.Ectx, c.Profiles, c.CAPIChan, c.PluginChannel)
|
||||
handlerV1, err := v1.New(c.DBClient, c.Ectx, c.Profiles, c.CAPIChan, c.PluginChannel, *c.ConsoleConfig, c.DeleteDecisionChannel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -126,36 +126,27 @@ func (c *Controller) CreateAlert(gctx *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
for _, alert := range input {
|
||||
alert.MachineID = machineID
|
||||
if len(alert.Decisions) != 0 {
|
||||
for pIdx, profile := range c.Profiles {
|
||||
_, matched, err := csprofiles.EvaluateProfile(profile, alert)
|
||||
if err != nil {
|
||||
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
|
||||
if err := c.DBClient.UpdateMachineLastPush(machineID); err != nil {
|
||||
c.HandleDBErrors(gctx, err)
|
||||
return
|
||||
}
|
||||
if !matched {
|
||||
continue
|
||||
}
|
||||
c.sendAlertToPluginChannel(alert, uint(pIdx))
|
||||
if profile.OnSuccess == "break" {
|
||||
break
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, alert := range input {
|
||||
alert.MachineID = machineID
|
||||
for pIdx, profile := range c.Profiles {
|
||||
profileDecisions, matched, err := csprofiles.EvaluateProfile(profile, alert)
|
||||
if err != nil {
|
||||
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
if !matched {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(alert.Decisions) == 0 { // non manual decision
|
||||
alert.Decisions = append(alert.Decisions, profileDecisions...)
|
||||
}
|
||||
profileAlert := *alert
|
||||
c.sendAlertToPluginChannel(&profileAlert, uint(pIdx))
|
||||
if profile.OnSuccess == "break" {
|
||||
|
@ -164,7 +155,7 @@ func (c *Controller) CreateAlert(gctx *gin.Context) {
|
|||
}
|
||||
}
|
||||
|
||||
alerts, err := c.DBClient.CreateAlert(machineID, input)
|
||||
alertsID, alertsToSend, err := c.DBClient.CreateAlert(machineID, input)
|
||||
if err != nil {
|
||||
c.HandleDBErrors(gctx, err)
|
||||
return
|
||||
|
@ -172,14 +163,14 @@ func (c *Controller) CreateAlert(gctx *gin.Context) {
|
|||
|
||||
if c.CAPIChan != nil {
|
||||
select {
|
||||
case c.CAPIChan <- input:
|
||||
case c.CAPIChan <- alertsToSend:
|
||||
log.Debug("alert sent to CAPI channel")
|
||||
default:
|
||||
log.Warning("Cannot send alert to Central API channel")
|
||||
}
|
||||
}
|
||||
|
||||
gctx.JSON(http.StatusCreated, alerts)
|
||||
gctx.JSON(http.StatusCreated, alertsID)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -18,9 +18,11 @@ type Controller struct {
|
|||
Profiles []*csconfig.ProfileCfg
|
||||
CAPIChan chan []*models.Alert
|
||||
PluginChannel chan csplugin.ProfileAlert
|
||||
DeleteDecisionsChannel chan []string
|
||||
ConsoleConfig csconfig.ConsoleConfig
|
||||
}
|
||||
|
||||
func New(dbClient *database.Client, ctx context.Context, profiles []*csconfig.ProfileCfg, capiChan chan []*models.Alert, pluginChannel chan csplugin.ProfileAlert) (*Controller, error) {
|
||||
func New(dbClient *database.Client, ctx context.Context, profiles []*csconfig.ProfileCfg, capiChan chan []*models.Alert, pluginChannel chan csplugin.ProfileAlert, consoleConfig csconfig.ConsoleConfig, deleteDecisionsChannel chan []string) (*Controller, error) {
|
||||
var err error
|
||||
v1 := &Controller{
|
||||
Ectx: ctx,
|
||||
|
@ -29,6 +31,8 @@ func New(dbClient *database.Client, ctx context.Context, profiles []*csconfig.Pr
|
|||
Profiles: profiles,
|
||||
CAPIChan: capiChan,
|
||||
PluginChannel: pluginChannel,
|
||||
DeleteDecisionsChannel: deleteDecisionsChannel,
|
||||
ConsoleConfig: consoleConfig,
|
||||
}
|
||||
v1.Middlewares, err = middlewares.NewMiddlewares(dbClient)
|
||||
if err != nil {
|
||||
|
|
|
@ -83,12 +83,34 @@ func (c *Controller) DeleteDecisionById(gctx *gin.Context) {
|
|||
NbDeleted: "1",
|
||||
}
|
||||
|
||||
if *c.ConsoleConfig.ShareDecisions {
|
||||
if c.DeleteDecisionsChannel != nil {
|
||||
select {
|
||||
case c.DeleteDecisionsChannel <- []string{decisionIDStr}:
|
||||
log.Debug("alert sent to delete decisions channel")
|
||||
default:
|
||||
log.Warning("Cannot send alert to delete decisions channel")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gctx.JSON(http.StatusOK, deleteDecisionResp)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Controller) DeleteDecisions(gctx *gin.Context) {
|
||||
var err error
|
||||
decisionsIDToDelete := make([]string, 0)
|
||||
|
||||
if *c.ConsoleConfig.ShareDecisions {
|
||||
decisionsToDelete, err := c.DBClient.QueryDecisionWithFilter(gctx.Request.URL.Query())
|
||||
if err != nil {
|
||||
log.Errorf("unable to list decisions to delete to send to console: %s", err)
|
||||
}
|
||||
for _, decision := range decisionsToDelete {
|
||||
decisionsIDToDelete = append(decisionsIDToDelete, strconv.Itoa(decision.ID))
|
||||
}
|
||||
}
|
||||
|
||||
nbDeleted, err := c.DBClient.SoftDeleteDecisionsWithFilter(gctx.Request.URL.Query())
|
||||
if err != nil {
|
||||
|
@ -99,6 +121,17 @@ func (c *Controller) DeleteDecisions(gctx *gin.Context) {
|
|||
NbDeleted: nbDeleted,
|
||||
}
|
||||
|
||||
if len(decisionsIDToDelete) > 0 {
|
||||
if c.DeleteDecisionsChannel != nil {
|
||||
select {
|
||||
case c.DeleteDecisionsChannel <- decisionsIDToDelete:
|
||||
log.Debug("alert sent to delete decisions channel")
|
||||
default:
|
||||
log.Warning("Cannot send alert to delete decisions channel")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gctx.JSON(http.StatusOK, deleteDecisionResp)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -85,9 +85,11 @@ type LocalApiServerCfg struct {
|
|||
LogMedia string `yaml:"-"`
|
||||
OnlineClient *OnlineApiClientCfg `yaml:"online_client"`
|
||||
ProfilesPath string `yaml:"profiles_path,omitempty"`
|
||||
ConsoleConfigPath string `yaml:"console_path,omitempty"`
|
||||
Profiles []*ProfileCfg `yaml:"-"`
|
||||
LogLevel *log.Level `yaml:"log_level"`
|
||||
UseForwardedForHeaders bool `yaml:"use_forwarded_for_headers,omitempty"`
|
||||
ConsoleConfig *ConsoleConfig `yaml:"-"`
|
||||
}
|
||||
|
||||
type TLSCfg struct {
|
||||
|
@ -105,6 +107,13 @@ func (c *Config) LoadAPIServer() error {
|
|||
if err := c.API.Server.LoadProfiles(); err != nil {
|
||||
return errors.Wrap(err, "while loading profiles for LAPI")
|
||||
}
|
||||
if c.API.Server.ConsoleConfigPath == "" {
|
||||
c.API.Server.ConsoleConfigPath = DefaultConsoleConfgFilePath
|
||||
}
|
||||
if err := c.API.Server.LoadConsoleConfig(); err != nil {
|
||||
return errors.Wrap(err, "while loading console options")
|
||||
}
|
||||
|
||||
if c.API.Server.OnlineClient != nil && c.API.Server.OnlineClient.CredentialsFilePath != "" {
|
||||
if err := c.API.Server.OnlineClient.Load(); err != nil {
|
||||
return errors.Wrap(err, "loading online client credentials")
|
||||
|
|
|
@ -206,6 +206,14 @@ func TestLoadAPIServer(t *testing.T) {
|
|||
DbPath: "./tests/test.db",
|
||||
Type: "sqlite",
|
||||
},
|
||||
ConsoleConfigPath: "/etc/crowdsec/console_config.yaml",
|
||||
ConsoleConfig: &ConsoleConfig{
|
||||
ShareManualDecisions: new(bool),
|
||||
ShareTaintedScenarios: new(bool),
|
||||
ShareCustomScenarios: new(bool),
|
||||
ShareDecisions: new(bool),
|
||||
ShareSimulatedDecisions: new(bool),
|
||||
},
|
||||
LogDir: LogDirFullPath,
|
||||
LogMedia: "stdout",
|
||||
OnlineClient: &OnlineApiClientCfg{
|
||||
|
|
96
pkg/csconfig/console.go
Normal file
96
pkg/csconfig/console.go
Normal file
|
@ -0,0 +1,96 @@
|
|||
package csconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
SEND_CUSTOM_SCENARIOS = "custom"
|
||||
SEND_TAINTED_SCENARIOS = "tainted"
|
||||
SEND_MANUAL_SCENARIOS = "manual"
|
||||
SEND_LIVE_DECISIONS = "live_decisions"
|
||||
SEND_SIMULATED_DECISIONS = "simulated_decisions"
|
||||
)
|
||||
|
||||
var DefaultConsoleConfgFilePath = "/etc/crowdsec/console_config.yaml"
|
||||
|
||||
var CONSOLE_CONFIGS = []string{SEND_CUSTOM_SCENARIOS, SEND_LIVE_DECISIONS, SEND_MANUAL_SCENARIOS, SEND_TAINTED_SCENARIOS, SEND_SIMULATED_DECISIONS}
|
||||
|
||||
type ConsoleConfig struct {
|
||||
ShareManualDecisions *bool `yaml:"share_manual_decisions"`
|
||||
ShareTaintedScenarios *bool `yaml:"share_custom"`
|
||||
ShareCustomScenarios *bool `yaml:"share_tainted"`
|
||||
ShareDecisions *bool `yaml:"share_decisions"`
|
||||
ShareSimulatedDecisions *bool `yaml:"share_simulated_decisions"`
|
||||
}
|
||||
|
||||
func (c *LocalApiServerCfg) LoadConsoleConfig() error {
|
||||
c.ConsoleConfig = &ConsoleConfig{}
|
||||
if _, err := os.Stat(c.ConsoleConfigPath); err != nil && os.IsNotExist(err) {
|
||||
log.Debugf("no console configuration to load")
|
||||
c.ConsoleConfig.ShareCustomScenarios = new(bool)
|
||||
c.ConsoleConfig.ShareTaintedScenarios = new(bool)
|
||||
c.ConsoleConfig.ShareManualDecisions = new(bool)
|
||||
c.ConsoleConfig.ShareDecisions = new(bool)
|
||||
c.ConsoleConfig.ShareSimulatedDecisions = new(bool)
|
||||
return nil
|
||||
}
|
||||
|
||||
yamlFile, err := ioutil.ReadFile(c.ConsoleConfigPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading console config file '%s': %s", c.ConsoleConfigPath, err)
|
||||
}
|
||||
err = yaml.Unmarshal(yamlFile, c.ConsoleConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unmarshaling console config file '%s': %s", c.ConsoleConfigPath, err)
|
||||
}
|
||||
|
||||
if c.ConsoleConfig.ShareCustomScenarios == nil {
|
||||
log.Debugf("no share_custom scenarios found, setting to false")
|
||||
c.ConsoleConfig.ShareCustomScenarios = new(bool)
|
||||
}
|
||||
if c.ConsoleConfig.ShareTaintedScenarios == nil {
|
||||
log.Debugf("no share_tainted scenarios found, setting to false")
|
||||
c.ConsoleConfig.ShareTaintedScenarios = new(bool)
|
||||
}
|
||||
if c.ConsoleConfig.ShareManualDecisions == nil {
|
||||
log.Debugf("no share_manual scenarios found, setting to false")
|
||||
c.ConsoleConfig.ShareManualDecisions = new(bool)
|
||||
}
|
||||
if c.ConsoleConfig.ShareDecisions == nil {
|
||||
log.Debugf("no share_decisions scenarios found, setting to false")
|
||||
c.ConsoleConfig.ShareDecisions = new(bool)
|
||||
}
|
||||
if c.ConsoleConfig.ShareSimulatedDecisions == nil {
|
||||
log.Debugf("no share_simulated_decisions scenarios found, setting to false")
|
||||
c.ConsoleConfig.ShareSimulatedDecisions = new(bool)
|
||||
}
|
||||
log.Debugf("Console configuration '%s' loaded successfully", c.ConsoleConfigPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *LocalApiServerCfg) DumpConsoleConfig() error {
|
||||
var out []byte
|
||||
var err error
|
||||
|
||||
if out, err = yaml.Marshal(c.ConsoleConfig); err != nil {
|
||||
return errors.Wrapf(err, "while marshaling ConsoleConfig (for %s)", c.ConsoleConfigPath)
|
||||
}
|
||||
if c.ConsoleConfigPath == "" {
|
||||
log.Debugf("Empty console_path, defaulting to %s", DefaultConsoleConfgFilePath)
|
||||
c.ConsoleConfigPath = DefaultConsoleConfgFilePath
|
||||
}
|
||||
|
||||
if err := os.WriteFile(c.ConsoleConfigPath, out, 0600); err != nil {
|
||||
return errors.Wrapf(err, "while dumping console config to %s", c.ConsoleConfigPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -90,28 +90,31 @@ func formatAlertAsString(machineId string, alert *models.Alert) []string {
|
|||
return retStr
|
||||
}
|
||||
|
||||
func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]string, error) {
|
||||
func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]string, []*models.Alert, error) {
|
||||
pageStart := 0
|
||||
pageEnd := bulkSize
|
||||
ret := []string{}
|
||||
alertListRet := make([]*models.Alert, 0)
|
||||
for {
|
||||
if pageEnd >= len(alertList) {
|
||||
results, err := c.CreateAlertBulk(machineID, alertList[pageStart:])
|
||||
results, alerts, err := c.CreateAlertBulk(machineID, alertList[pageStart:])
|
||||
if err != nil {
|
||||
return []string{}, fmt.Errorf("unable to create alerts: %s", err)
|
||||
return []string{}, alertListRet, fmt.Errorf("unable to create alerts: %s", err)
|
||||
}
|
||||
ret = append(ret, results...)
|
||||
alertListRet = append(alertListRet, alerts...)
|
||||
break
|
||||
}
|
||||
results, err := c.CreateAlertBulk(machineID, alertList[pageStart:pageEnd])
|
||||
results, alerts, err := c.CreateAlertBulk(machineID, alertList[pageStart:pageEnd])
|
||||
if err != nil {
|
||||
return []string{}, fmt.Errorf("unable to create alerts: %s", err)
|
||||
return []string{}, alertListRet, fmt.Errorf("unable to create alerts: %s", err)
|
||||
}
|
||||
ret = append(ret, results...)
|
||||
alertListRet = append(alertListRet, alerts...)
|
||||
pageStart += bulkSize
|
||||
pageEnd += bulkSize
|
||||
}
|
||||
return ret, nil
|
||||
return ret, alertListRet, nil
|
||||
}
|
||||
|
||||
/*We can't bulk both the alert and the decision at the same time. With new consensus, we want to bulk a single alert with a lot of decisions.*/
|
||||
|
@ -278,7 +281,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in
|
|||
return alertRef.ID, inserted, deleted, nil
|
||||
}
|
||||
|
||||
func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([]string, error) {
|
||||
func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([]string, []*models.Alert, error) {
|
||||
|
||||
ret := []string{}
|
||||
bulkSize := 20
|
||||
|
@ -293,19 +296,19 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([
|
|||
owner, err := c.QueryMachineByID(machineId)
|
||||
if err != nil {
|
||||
if errors.Cause(err) != UserNotExists {
|
||||
return []string{}, errors.Wrapf(QueryFail, "machine '%s': %s", alertItem.MachineID, err)
|
||||
return []string{}, alertList, errors.Wrapf(QueryFail, "machine '%s': %s", alertItem.MachineID, err)
|
||||
}
|
||||
c.Log.Debugf("CreateAlertBulk: Machine Id %s doesn't exist", machineId)
|
||||
owner = nil
|
||||
}
|
||||
startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrapf(ParseTimeFail, "start_at field time '%s': %s", *alertItem.StartAt, err)
|
||||
return []string{}, alertList, errors.Wrapf(ParseTimeFail, "start_at field time '%s': %s", *alertItem.StartAt, err)
|
||||
}
|
||||
|
||||
stopAtTime, err := time.Parse(time.RFC3339, *alertItem.StopAt)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrapf(ParseTimeFail, "stop_at field time '%s': %s", *alertItem.StopAt, err)
|
||||
return []string{}, alertList, errors.Wrapf(ParseTimeFail, "stop_at field time '%s': %s", *alertItem.StopAt, err)
|
||||
}
|
||||
/*display proper alert in logs*/
|
||||
for _, disp := range formatAlertAsString(machineId, alertItem) {
|
||||
|
@ -321,11 +324,11 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([
|
|||
for i, eventItem := range alertItem.Events {
|
||||
ts, err := time.Parse(time.RFC3339, *eventItem.Timestamp)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrapf(ParseTimeFail, "event timestamp '%s' : %s", *eventItem.Timestamp, err)
|
||||
return []string{}, alertList, errors.Wrapf(ParseTimeFail, "event timestamp '%s' : %s", *eventItem.Timestamp, err)
|
||||
}
|
||||
marshallMetas, err := json.Marshal(eventItem.Meta)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrapf(MarshalFail, "event meta '%v' : %s", eventItem.Meta, err)
|
||||
return []string{}, alertList, errors.Wrapf(MarshalFail, "event meta '%v' : %s", eventItem.Meta, err)
|
||||
}
|
||||
|
||||
//the serialized field is too big, let's try to progressively strip it
|
||||
|
@ -343,7 +346,7 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([
|
|||
|
||||
marshallMetas, err = json.Marshal(eventItem.Meta)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrapf(MarshalFail, "event meta '%v' : %s", eventItem.Meta, err)
|
||||
return []string{}, alertList, errors.Wrapf(MarshalFail, "event meta '%v' : %s", eventItem.Meta, err)
|
||||
}
|
||||
if event.SerializedValidator(string(marshallMetas)) == nil {
|
||||
valid = true
|
||||
|
@ -372,7 +375,7 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([
|
|||
}
|
||||
events, err = c.Ent.Event.CreateBulk(eventBulk...).Save(c.CTX)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrapf(BulkError, "creating alert events: %s", err)
|
||||
return []string{}, alertList, errors.Wrapf(BulkError, "creating alert events: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -385,7 +388,7 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([
|
|||
}
|
||||
metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(c.CTX)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrapf(BulkError, "creating alert meta: %s", err)
|
||||
return []string{}, alertList, errors.Wrapf(BulkError, "creating alert meta: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -402,14 +405,14 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([
|
|||
|
||||
duration, err := time.ParseDuration(*decisionItem.Duration)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrapf(ParseDurationFail, "decision duration '%v' : %s", decisionItem.Duration, err)
|
||||
return []string{}, alertList, errors.Wrapf(ParseDurationFail, "decision duration '%v' : %s", decisionItem.Duration, err)
|
||||
}
|
||||
|
||||
/*if the scope is IP or Range, convert the value to integers */
|
||||
if strings.ToLower(*decisionItem.Scope) == "ip" || strings.ToLower(*decisionItem.Scope) == "range" {
|
||||
sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(*decisionItem.Value)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrapf(ParseDurationFail, "invalid addr/range %s : %s", *decisionItem.Value, err)
|
||||
return []string{}, alertList, errors.Wrapf(ParseDurationFail, "invalid addr/range %s : %s", *decisionItem.Value, err)
|
||||
}
|
||||
}
|
||||
decisionBulk[i] = c.Ent.Decision.Create().
|
||||
|
@ -428,9 +431,13 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([
|
|||
}
|
||||
decisions, err = c.Ent.Decision.CreateBulk(decisionBulk...).Save(c.CTX)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrapf(BulkError, "creating alert decisions: %s", err)
|
||||
return []string{}, alertList, errors.Wrapf(BulkError, "creating alert decisions: %s", err)
|
||||
|
||||
}
|
||||
for i, decisionItem := range alertItem.Decisions {
|
||||
decisionItem.ID = int64(decisions[i].ID)
|
||||
decisionItem.Until = decisions[i].Until.String()
|
||||
}
|
||||
}
|
||||
|
||||
alertB := c.Ent.Alert.
|
||||
|
@ -466,7 +473,7 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([
|
|||
if len(bulk) == bulkSize {
|
||||
alerts, err := c.Ent.Alert.CreateBulk(bulk...).Save(c.CTX)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrapf(BulkError, "bulk creating alert : %s", err)
|
||||
return []string{}, alertList, errors.Wrapf(BulkError, "bulk creating alert : %s", err)
|
||||
}
|
||||
for _, alert := range alerts {
|
||||
ret = append(ret, strconv.Itoa(alert.ID))
|
||||
|
@ -482,14 +489,22 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([
|
|||
|
||||
alerts, err := c.Ent.Alert.CreateBulk(bulk...).Save(c.CTX)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrapf(BulkError, "leftovers creating alert : %s", err)
|
||||
return []string{}, alertList, errors.Wrapf(BulkError, "leftovers creating alert : %s", err)
|
||||
}
|
||||
|
||||
for _, alert := range alerts {
|
||||
ret = append(ret, strconv.Itoa(alert.ID))
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
for i, alertID := range ret {
|
||||
alertIDInt, err := strconv.Atoi(alertID)
|
||||
if err != nil {
|
||||
log.Errorf("unable to convert alert ID '%s' to int: %s", alertID, err)
|
||||
}
|
||||
alertList[i].ID = int64(alertIDInt)
|
||||
}
|
||||
|
||||
return ret, alertList, nil
|
||||
}
|
||||
|
||||
func BuildAlertRequestFromFilter(alerts *ent.AlertQuery, filter map[string][]string) (*ent.AlertQuery, error) {
|
||||
|
|
|
@ -20,6 +20,8 @@ type Machine struct {
|
|||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// LastPush holds the value of the "last_push" field.
|
||||
LastPush time.Time `json:"last_push,omitempty"`
|
||||
// MachineId holds the value of the "machineId" field.
|
||||
MachineId string `json:"machineId,omitempty"`
|
||||
// Password holds the value of the "password" field.
|
||||
|
@ -68,7 +70,7 @@ func (*Machine) scanValues(columns []string) ([]interface{}, error) {
|
|||
values[i] = new(sql.NullInt64)
|
||||
case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldStatus:
|
||||
values[i] = new(sql.NullString)
|
||||
case machine.FieldCreatedAt, machine.FieldUpdatedAt:
|
||||
case machine.FieldCreatedAt, machine.FieldUpdatedAt, machine.FieldLastPush:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type Machine", columns[i])
|
||||
|
@ -103,6 +105,12 @@ func (m *Machine) assignValues(columns []string, values []interface{}) error {
|
|||
} else if value.Valid {
|
||||
m.UpdatedAt = value.Time
|
||||
}
|
||||
case machine.FieldLastPush:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field last_push", values[i])
|
||||
} else if value.Valid {
|
||||
m.LastPush = value.Time
|
||||
}
|
||||
case machine.FieldMachineId:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field machineId", values[i])
|
||||
|
@ -182,6 +190,8 @@ func (m *Machine) String() string {
|
|||
builder.WriteString(m.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", updated_at=")
|
||||
builder.WriteString(m.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", last_push=")
|
||||
builder.WriteString(m.LastPush.Format(time.ANSIC))
|
||||
builder.WriteString(", machineId=")
|
||||
builder.WriteString(m.MachineId)
|
||||
builder.WriteString(", password=<sensitive>")
|
||||
|
|
|
@ -15,6 +15,8 @@ const (
|
|||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldLastPush holds the string denoting the last_push field in the database.
|
||||
FieldLastPush = "last_push"
|
||||
// FieldMachineId holds the string denoting the machineid field in the database.
|
||||
FieldMachineId = "machine_id"
|
||||
// FieldPassword holds the string denoting the password field in the database.
|
||||
|
@ -47,6 +49,7 @@ var Columns = []string{
|
|||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldLastPush,
|
||||
FieldMachineId,
|
||||
FieldPassword,
|
||||
FieldIpAddress,
|
||||
|
@ -71,6 +74,8 @@ var (
|
|||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// DefaultLastPush holds the default value on creation for the "last_push" field.
|
||||
DefaultLastPush func() time.Time
|
||||
// ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save.
|
||||
ScenariosValidator func(string) error
|
||||
// DefaultIsValidated holds the default value on creation for the "isValidated" field.
|
||||
|
|
|
@ -107,6 +107,13 @@ func UpdatedAt(v time.Time) predicate.Machine {
|
|||
})
|
||||
}
|
||||
|
||||
// LastPush applies equality check predicate on the "last_push" field. It's identical to LastPushEQ.
|
||||
func LastPush(v time.Time) predicate.Machine {
|
||||
return predicate.Machine(func(s *sql.Selector) {
|
||||
s.Where(sql.EQ(s.C(FieldLastPush), v))
|
||||
})
|
||||
}
|
||||
|
||||
// MachineId applies equality check predicate on the "machineId" field. It's identical to MachineIdEQ.
|
||||
func MachineId(v string) predicate.Machine {
|
||||
return predicate.Machine(func(s *sql.Selector) {
|
||||
|
@ -308,6 +315,96 @@ func UpdatedAtLTE(v time.Time) predicate.Machine {
|
|||
})
|
||||
}
|
||||
|
||||
// LastPushEQ applies the EQ predicate on the "last_push" field.
|
||||
func LastPushEQ(v time.Time) predicate.Machine {
|
||||
return predicate.Machine(func(s *sql.Selector) {
|
||||
s.Where(sql.EQ(s.C(FieldLastPush), v))
|
||||
})
|
||||
}
|
||||
|
||||
// LastPushNEQ applies the NEQ predicate on the "last_push" field.
|
||||
func LastPushNEQ(v time.Time) predicate.Machine {
|
||||
return predicate.Machine(func(s *sql.Selector) {
|
||||
s.Where(sql.NEQ(s.C(FieldLastPush), v))
|
||||
})
|
||||
}
|
||||
|
||||
// LastPushIn applies the In predicate on the "last_push" field.
|
||||
func LastPushIn(vs ...time.Time) predicate.Machine {
|
||||
v := make([]interface{}, len(vs))
|
||||
for i := range v {
|
||||
v[i] = vs[i]
|
||||
}
|
||||
return predicate.Machine(func(s *sql.Selector) {
|
||||
// if not arguments were provided, append the FALSE constants,
|
||||
// since we can't apply "IN ()". This will make this predicate falsy.
|
||||
if len(v) == 0 {
|
||||
s.Where(sql.False())
|
||||
return
|
||||
}
|
||||
s.Where(sql.In(s.C(FieldLastPush), v...))
|
||||
})
|
||||
}
|
||||
|
||||
// LastPushNotIn applies the NotIn predicate on the "last_push" field.
|
||||
func LastPushNotIn(vs ...time.Time) predicate.Machine {
|
||||
v := make([]interface{}, len(vs))
|
||||
for i := range v {
|
||||
v[i] = vs[i]
|
||||
}
|
||||
return predicate.Machine(func(s *sql.Selector) {
|
||||
// if not arguments were provided, append the FALSE constants,
|
||||
// since we can't apply "IN ()". This will make this predicate falsy.
|
||||
if len(v) == 0 {
|
||||
s.Where(sql.False())
|
||||
return
|
||||
}
|
||||
s.Where(sql.NotIn(s.C(FieldLastPush), v...))
|
||||
})
|
||||
}
|
||||
|
||||
// LastPushGT applies the GT predicate on the "last_push" field.
|
||||
func LastPushGT(v time.Time) predicate.Machine {
|
||||
return predicate.Machine(func(s *sql.Selector) {
|
||||
s.Where(sql.GT(s.C(FieldLastPush), v))
|
||||
})
|
||||
}
|
||||
|
||||
// LastPushGTE applies the GTE predicate on the "last_push" field.
|
||||
func LastPushGTE(v time.Time) predicate.Machine {
|
||||
return predicate.Machine(func(s *sql.Selector) {
|
||||
s.Where(sql.GTE(s.C(FieldLastPush), v))
|
||||
})
|
||||
}
|
||||
|
||||
// LastPushLT applies the LT predicate on the "last_push" field.
|
||||
func LastPushLT(v time.Time) predicate.Machine {
|
||||
return predicate.Machine(func(s *sql.Selector) {
|
||||
s.Where(sql.LT(s.C(FieldLastPush), v))
|
||||
})
|
||||
}
|
||||
|
||||
// LastPushLTE applies the LTE predicate on the "last_push" field.
|
||||
func LastPushLTE(v time.Time) predicate.Machine {
|
||||
return predicate.Machine(func(s *sql.Selector) {
|
||||
s.Where(sql.LTE(s.C(FieldLastPush), v))
|
||||
})
|
||||
}
|
||||
|
||||
// LastPushIsNil applies the IsNil predicate on the "last_push" field.
|
||||
func LastPushIsNil() predicate.Machine {
|
||||
return predicate.Machine(func(s *sql.Selector) {
|
||||
s.Where(sql.IsNull(s.C(FieldLastPush)))
|
||||
})
|
||||
}
|
||||
|
||||
// LastPushNotNil applies the NotNil predicate on the "last_push" field.
|
||||
func LastPushNotNil() predicate.Machine {
|
||||
return predicate.Machine(func(s *sql.Selector) {
|
||||
s.Where(sql.NotNull(s.C(FieldLastPush)))
|
||||
})
|
||||
}
|
||||
|
||||
// MachineIdEQ applies the EQ predicate on the "machineId" field.
|
||||
func MachineIdEQ(v string) predicate.Machine {
|
||||
return predicate.Machine(func(s *sql.Selector) {
|
||||
|
|
|
@ -49,6 +49,20 @@ func (mc *MachineCreate) SetNillableUpdatedAt(t *time.Time) *MachineCreate {
|
|||
return mc
|
||||
}
|
||||
|
||||
// SetLastPush sets the "last_push" field.
|
||||
func (mc *MachineCreate) SetLastPush(t time.Time) *MachineCreate {
|
||||
mc.mutation.SetLastPush(t)
|
||||
return mc
|
||||
}
|
||||
|
||||
// SetNillableLastPush sets the "last_push" field if the given value is not nil.
|
||||
func (mc *MachineCreate) SetNillableLastPush(t *time.Time) *MachineCreate {
|
||||
if t != nil {
|
||||
mc.SetLastPush(*t)
|
||||
}
|
||||
return mc
|
||||
}
|
||||
|
||||
// SetMachineId sets the "machineId" field.
|
||||
func (mc *MachineCreate) SetMachineId(s string) *MachineCreate {
|
||||
mc.mutation.SetMachineId(s)
|
||||
|
@ -217,6 +231,10 @@ func (mc *MachineCreate) defaults() {
|
|||
v := machine.DefaultUpdatedAt()
|
||||
mc.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
if _, ok := mc.mutation.LastPush(); !ok {
|
||||
v := machine.DefaultLastPush()
|
||||
mc.mutation.SetLastPush(v)
|
||||
}
|
||||
if _, ok := mc.mutation.IsValidated(); !ok {
|
||||
v := machine.DefaultIsValidated
|
||||
mc.mutation.SetIsValidated(v)
|
||||
|
@ -291,6 +309,14 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) {
|
|||
})
|
||||
_node.UpdatedAt = value
|
||||
}
|
||||
if value, ok := mc.mutation.LastPush(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeTime,
|
||||
Value: value,
|
||||
Column: machine.FieldLastPush,
|
||||
})
|
||||
_node.LastPush = value
|
||||
}
|
||||
if value, ok := mc.mutation.MachineId(); ok {
|
||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeString,
|
||||
|
|
|
@ -56,6 +56,26 @@ func (mu *MachineUpdate) SetNillableUpdatedAt(t *time.Time) *MachineUpdate {
|
|||
return mu
|
||||
}
|
||||
|
||||
// SetLastPush sets the "last_push" field.
|
||||
func (mu *MachineUpdate) SetLastPush(t time.Time) *MachineUpdate {
|
||||
mu.mutation.SetLastPush(t)
|
||||
return mu
|
||||
}
|
||||
|
||||
// SetNillableLastPush sets the "last_push" field if the given value is not nil.
|
||||
func (mu *MachineUpdate) SetNillableLastPush(t *time.Time) *MachineUpdate {
|
||||
if t != nil {
|
||||
mu.SetLastPush(*t)
|
||||
}
|
||||
return mu
|
||||
}
|
||||
|
||||
// ClearLastPush clears the value of the "last_push" field.
|
||||
func (mu *MachineUpdate) ClearLastPush() *MachineUpdate {
|
||||
mu.mutation.ClearLastPush()
|
||||
return mu
|
||||
}
|
||||
|
||||
// SetMachineId sets the "machineId" field.
|
||||
func (mu *MachineUpdate) SetMachineId(s string) *MachineUpdate {
|
||||
mu.mutation.SetMachineId(s)
|
||||
|
@ -291,6 +311,19 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||
Column: machine.FieldUpdatedAt,
|
||||
})
|
||||
}
|
||||
if value, ok := mu.mutation.LastPush(); ok {
|
||||
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeTime,
|
||||
Value: value,
|
||||
Column: machine.FieldLastPush,
|
||||
})
|
||||
}
|
||||
if mu.mutation.LastPushCleared() {
|
||||
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeTime,
|
||||
Column: machine.FieldLastPush,
|
||||
})
|
||||
}
|
||||
if value, ok := mu.mutation.MachineId(); ok {
|
||||
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeString,
|
||||
|
@ -459,6 +492,26 @@ func (muo *MachineUpdateOne) SetNillableUpdatedAt(t *time.Time) *MachineUpdateOn
|
|||
return muo
|
||||
}
|
||||
|
||||
// SetLastPush sets the "last_push" field.
|
||||
func (muo *MachineUpdateOne) SetLastPush(t time.Time) *MachineUpdateOne {
|
||||
muo.mutation.SetLastPush(t)
|
||||
return muo
|
||||
}
|
||||
|
||||
// SetNillableLastPush sets the "last_push" field if the given value is not nil.
|
||||
func (muo *MachineUpdateOne) SetNillableLastPush(t *time.Time) *MachineUpdateOne {
|
||||
if t != nil {
|
||||
muo.SetLastPush(*t)
|
||||
}
|
||||
return muo
|
||||
}
|
||||
|
||||
// ClearLastPush clears the value of the "last_push" field.
|
||||
func (muo *MachineUpdateOne) ClearLastPush() *MachineUpdateOne {
|
||||
muo.mutation.ClearLastPush()
|
||||
return muo
|
||||
}
|
||||
|
||||
// SetMachineId sets the "machineId" field.
|
||||
func (muo *MachineUpdateOne) SetMachineId(s string) *MachineUpdateOne {
|
||||
muo.mutation.SetMachineId(s)
|
||||
|
@ -718,6 +771,19 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e
|
|||
Column: machine.FieldUpdatedAt,
|
||||
})
|
||||
}
|
||||
if value, ok := muo.mutation.LastPush(); ok {
|
||||
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeTime,
|
||||
Value: value,
|
||||
Column: machine.FieldLastPush,
|
||||
})
|
||||
}
|
||||
if muo.mutation.LastPushCleared() {
|
||||
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeTime,
|
||||
Column: machine.FieldLastPush,
|
||||
})
|
||||
}
|
||||
if value, ok := muo.mutation.MachineId(); ok {
|
||||
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
|
||||
Type: field.TypeString,
|
||||
|
|
|
@ -137,6 +137,7 @@ var (
|
|||
{Name: "id", Type: field.TypeInt, Increment: true},
|
||||
{Name: "created_at", Type: field.TypeTime},
|
||||
{Name: "updated_at", Type: field.TypeTime},
|
||||
{Name: "last_push", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "machine_id", Type: field.TypeString, Unique: true},
|
||||
{Name: "password", Type: field.TypeString},
|
||||
{Name: "ip_address", Type: field.TypeString},
|
||||
|
|
|
@ -4986,6 +4986,7 @@ type MachineMutation struct {
|
|||
id *int
|
||||
created_at *time.Time
|
||||
updated_at *time.Time
|
||||
last_push *time.Time
|
||||
machineId *string
|
||||
password *string
|
||||
ipAddress *string
|
||||
|
@ -5153,6 +5154,55 @@ func (m *MachineMutation) ResetUpdatedAt() {
|
|||
m.updated_at = nil
|
||||
}
|
||||
|
||||
// SetLastPush sets the "last_push" field.
|
||||
func (m *MachineMutation) SetLastPush(t time.Time) {
|
||||
m.last_push = &t
|
||||
}
|
||||
|
||||
// LastPush returns the value of the "last_push" field in the mutation.
|
||||
func (m *MachineMutation) LastPush() (r time.Time, exists bool) {
|
||||
v := m.last_push
|
||||
if v == nil {
|
||||
return
|
||||
}
|
||||
return *v, true
|
||||
}
|
||||
|
||||
// OldLastPush returns the old "last_push" field's value of the Machine entity.
|
||||
// If the Machine object wasn't provided to the builder, the object is fetched from the database.
|
||||
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
|
||||
func (m *MachineMutation) OldLastPush(ctx context.Context) (v time.Time, err error) {
|
||||
if !m.op.Is(OpUpdateOne) {
|
||||
return v, fmt.Errorf("OldLastPush is only allowed on UpdateOne operations")
|
||||
}
|
||||
if m.id == nil || m.oldValue == nil {
|
||||
return v, fmt.Errorf("OldLastPush requires an ID field in the mutation")
|
||||
}
|
||||
oldValue, err := m.oldValue(ctx)
|
||||
if err != nil {
|
||||
return v, fmt.Errorf("querying old value for OldLastPush: %w", err)
|
||||
}
|
||||
return oldValue.LastPush, nil
|
||||
}
|
||||
|
||||
// ClearLastPush clears the value of the "last_push" field.
|
||||
func (m *MachineMutation) ClearLastPush() {
|
||||
m.last_push = nil
|
||||
m.clearedFields[machine.FieldLastPush] = struct{}{}
|
||||
}
|
||||
|
||||
// LastPushCleared returns if the "last_push" field was cleared in this mutation.
|
||||
func (m *MachineMutation) LastPushCleared() bool {
|
||||
_, ok := m.clearedFields[machine.FieldLastPush]
|
||||
return ok
|
||||
}
|
||||
|
||||
// ResetLastPush resets all changes to the "last_push" field.
|
||||
func (m *MachineMutation) ResetLastPush() {
|
||||
m.last_push = nil
|
||||
delete(m.clearedFields, machine.FieldLastPush)
|
||||
}
|
||||
|
||||
// SetMachineId sets the "machineId" field.
|
||||
func (m *MachineMutation) SetMachineId(s string) {
|
||||
m.machineId = &s
|
||||
|
@ -5517,13 +5567,16 @@ func (m *MachineMutation) Type() string {
|
|||
// order to get all numeric fields that were incremented/decremented, call
|
||||
// AddedFields().
|
||||
func (m *MachineMutation) Fields() []string {
|
||||
fields := make([]string, 0, 9)
|
||||
fields := make([]string, 0, 10)
|
||||
if m.created_at != nil {
|
||||
fields = append(fields, machine.FieldCreatedAt)
|
||||
}
|
||||
if m.updated_at != nil {
|
||||
fields = append(fields, machine.FieldUpdatedAt)
|
||||
}
|
||||
if m.last_push != nil {
|
||||
fields = append(fields, machine.FieldLastPush)
|
||||
}
|
||||
if m.machineId != nil {
|
||||
fields = append(fields, machine.FieldMachineId)
|
||||
}
|
||||
|
@ -5557,6 +5610,8 @@ func (m *MachineMutation) Field(name string) (ent.Value, bool) {
|
|||
return m.CreatedAt()
|
||||
case machine.FieldUpdatedAt:
|
||||
return m.UpdatedAt()
|
||||
case machine.FieldLastPush:
|
||||
return m.LastPush()
|
||||
case machine.FieldMachineId:
|
||||
return m.MachineId()
|
||||
case machine.FieldPassword:
|
||||
|
@ -5584,6 +5639,8 @@ func (m *MachineMutation) OldField(ctx context.Context, name string) (ent.Value,
|
|||
return m.OldCreatedAt(ctx)
|
||||
case machine.FieldUpdatedAt:
|
||||
return m.OldUpdatedAt(ctx)
|
||||
case machine.FieldLastPush:
|
||||
return m.OldLastPush(ctx)
|
||||
case machine.FieldMachineId:
|
||||
return m.OldMachineId(ctx)
|
||||
case machine.FieldPassword:
|
||||
|
@ -5621,6 +5678,13 @@ func (m *MachineMutation) SetField(name string, value ent.Value) error {
|
|||
}
|
||||
m.SetUpdatedAt(v)
|
||||
return nil
|
||||
case machine.FieldLastPush:
|
||||
v, ok := value.(time.Time)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field %s", value, name)
|
||||
}
|
||||
m.SetLastPush(v)
|
||||
return nil
|
||||
case machine.FieldMachineId:
|
||||
v, ok := value.(string)
|
||||
if !ok {
|
||||
|
@ -5700,6 +5764,9 @@ func (m *MachineMutation) AddField(name string, value ent.Value) error {
|
|||
// mutation.
|
||||
func (m *MachineMutation) ClearedFields() []string {
|
||||
var fields []string
|
||||
if m.FieldCleared(machine.FieldLastPush) {
|
||||
fields = append(fields, machine.FieldLastPush)
|
||||
}
|
||||
if m.FieldCleared(machine.FieldScenarios) {
|
||||
fields = append(fields, machine.FieldScenarios)
|
||||
}
|
||||
|
@ -5723,6 +5790,9 @@ func (m *MachineMutation) FieldCleared(name string) bool {
|
|||
// error if the field is not defined in the schema.
|
||||
func (m *MachineMutation) ClearField(name string) error {
|
||||
switch name {
|
||||
case machine.FieldLastPush:
|
||||
m.ClearLastPush()
|
||||
return nil
|
||||
case machine.FieldScenarios:
|
||||
m.ClearScenarios()
|
||||
return nil
|
||||
|
@ -5746,6 +5816,9 @@ func (m *MachineMutation) ResetField(name string) error {
|
|||
case machine.FieldUpdatedAt:
|
||||
m.ResetUpdatedAt()
|
||||
return nil
|
||||
case machine.FieldLastPush:
|
||||
m.ResetLastPush()
|
||||
return nil
|
||||
case machine.FieldMachineId:
|
||||
m.ResetMachineId()
|
||||
return nil
|
||||
|
|
|
@ -112,12 +112,16 @@ func init() {
|
|||
machineDescUpdatedAt := machineFields[1].Descriptor()
|
||||
// machine.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||
machine.DefaultUpdatedAt = machineDescUpdatedAt.Default.(func() time.Time)
|
||||
// machineDescLastPush is the schema descriptor for last_push field.
|
||||
machineDescLastPush := machineFields[2].Descriptor()
|
||||
// machine.DefaultLastPush holds the default value on creation for the last_push field.
|
||||
machine.DefaultLastPush = machineDescLastPush.Default.(func() time.Time)
|
||||
// machineDescScenarios is the schema descriptor for scenarios field.
|
||||
machineDescScenarios := machineFields[5].Descriptor()
|
||||
machineDescScenarios := machineFields[6].Descriptor()
|
||||
// machine.ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save.
|
||||
machine.ScenariosValidator = machineDescScenarios.Validators[0].(func(string) error)
|
||||
// machineDescIsValidated is the schema descriptor for isValidated field.
|
||||
machineDescIsValidated := machineFields[7].Descriptor()
|
||||
machineDescIsValidated := machineFields[8].Descriptor()
|
||||
// machine.DefaultIsValidated holds the default value on creation for the isValidated field.
|
||||
machine.DefaultIsValidated = machineDescIsValidated.Default.(bool)
|
||||
metaFields := schema.Meta{}.Fields()
|
||||
|
|
|
@ -20,6 +20,8 @@ func (Machine) Fields() []ent.Field {
|
|||
Default(time.Now),
|
||||
field.Time("updated_at").
|
||||
Default(time.Now),
|
||||
field.Time("last_push").
|
||||
Default(time.Now).Optional(),
|
||||
field.String("machineId").Unique(),
|
||||
field.String("password").Sensitive(),
|
||||
field.String("ipAddress"),
|
||||
|
|
|
@ -107,6 +107,14 @@ func (c *Client) DeleteWatcher(name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) UpdateMachineLastPush(machineID string) error {
|
||||
_, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetLastPush(time.Now()).Save(c.CTX)
|
||||
if err != nil {
|
||||
return errors.Wrapf(UpdateFail, "updating machine last_push: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) UpdateMachineScenarios(scenarios string, ID int) error {
|
||||
_, err := c.Ent.Machine.UpdateOneID(ID).
|
||||
SetUpdatedAt(time.Now()).
|
||||
|
|
|
@ -6,6 +6,8 @@ package models
|
|||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
|
@ -17,9 +19,16 @@ import (
|
|||
// swagger:model AddSignalsRequestItem
|
||||
type AddSignalsRequestItem struct {
|
||||
|
||||
// alert id
|
||||
// Required: true
|
||||
AlertID *int64 `json:"alert_id"`
|
||||
|
||||
// created at
|
||||
CreatedAt string `json:"created_at,omitempty"`
|
||||
|
||||
// decisions
|
||||
Decisions []*Decision `json:"decisions"`
|
||||
|
||||
// machine id
|
||||
MachineID string `json:"machine_id,omitempty"`
|
||||
|
||||
|
@ -35,6 +44,10 @@ type AddSignalsRequestItem struct {
|
|||
// Required: true
|
||||
ScenarioHash *string `json:"scenario_hash"`
|
||||
|
||||
// scenario trust
|
||||
// Required: true
|
||||
ScenarioTrust *string `json:"scenario_trust"`
|
||||
|
||||
// scenario version
|
||||
// Required: true
|
||||
ScenarioVersion *string `json:"scenario_version"`
|
||||
|
@ -56,6 +69,14 @@ type AddSignalsRequestItem struct {
|
|||
func (m *AddSignalsRequestItem) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validateAlertID(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateDecisions(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateMessage(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
@ -68,6 +89,10 @@ func (m *AddSignalsRequestItem) Validate(formats strfmt.Registry) error {
|
|||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateScenarioTrust(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateScenarioVersion(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
@ -90,6 +115,39 @@ func (m *AddSignalsRequestItem) Validate(formats strfmt.Registry) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *AddSignalsRequestItem) validateAlertID(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("alert_id", "body", m.AlertID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AddSignalsRequestItem) validateDecisions(formats strfmt.Registry) error {
|
||||
if swag.IsZero(m.Decisions) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(m.Decisions); i++ {
|
||||
if swag.IsZero(m.Decisions[i]) { // not required
|
||||
continue
|
||||
}
|
||||
|
||||
if m.Decisions[i] != nil {
|
||||
if err := m.Decisions[i].Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("decisions" + "." + strconv.Itoa(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AddSignalsRequestItem) validateMessage(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("message", "body", m.Message); err != nil {
|
||||
|
@ -117,6 +175,15 @@ func (m *AddSignalsRequestItem) validateScenarioHash(formats strfmt.Registry) er
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *AddSignalsRequestItem) validateScenarioTrust(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("scenario_trust", "body", m.ScenarioTrust); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AddSignalsRequestItem) validateScenarioVersion(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("scenario_version", "body", m.ScenarioVersion); err != nil {
|
||||
|
|
|
@ -6,6 +6,8 @@ package models
|
|||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
|
@ -17,7 +19,7 @@ import (
|
|||
// swagger:model Decision
|
||||
type Decision struct {
|
||||
|
||||
// duration
|
||||
// the duration of the decisions
|
||||
// Required: true
|
||||
Duration *string `json:"duration"`
|
||||
|
||||
|
@ -45,6 +47,9 @@ type Decision struct {
|
|||
// Required: true
|
||||
Type *string `json:"type"`
|
||||
|
||||
// the date until the decisions must be active
|
||||
Until string `json:"until,omitempty"`
|
||||
|
||||
// the value of the decision scope : an IP, a range, a username, etc
|
||||
// Required: true
|
||||
Value *string `json:"value"`
|
||||
|
@ -138,6 +143,42 @@ func (m *Decision) validateValue(formats strfmt.Registry) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ContextValidate validate this decision based on the context it is used
|
||||
func (m *Decision) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.contextValidateID(ctx, formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.contextValidateSimulated(ctx, formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Decision) contextValidateID(ctx context.Context, formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.ReadOnly(ctx, "id", "body", int64(m.ID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Decision) contextValidateSimulated(ctx context.Context, formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.ReadOnly(ctx, "simulated", "body", m.Simulated); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *Decision) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
|
|
|
@ -45,7 +45,6 @@ paths:
|
|||
required: false
|
||||
type: string
|
||||
description: 'Comma separated scopes of decisions to fetch'
|
||||
example: ip,range,country
|
||||
responses:
|
||||
'200':
|
||||
description: successful operation
|
||||
|
@ -768,17 +767,34 @@ definitions:
|
|||
bouncers:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/MetricsSoftInfo'
|
||||
$ref: '#/definitions/MetricsBouncerInfo'
|
||||
machines:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/MetricsSoftInfo'
|
||||
$ref: '#/definitions/MetricsAgentInfo'
|
||||
required:
|
||||
- apil_version
|
||||
- bouncers
|
||||
- machines
|
||||
MetricsSoftInfo:
|
||||
title: MetricsSoftInfo
|
||||
MetricsBouncerInfo:
|
||||
title: MetricsBouncerInfo
|
||||
description: Software version info (so we can warn users about out-of-date software). The software name and the version are "guessed" from the user-agent
|
||||
type: object
|
||||
properties:
|
||||
custom_name:
|
||||
type: string
|
||||
description: name of the component
|
||||
name:
|
||||
type: string
|
||||
description: bouncer type (firewall, php ...)
|
||||
version:
|
||||
type: string
|
||||
description: software version
|
||||
last_pull:
|
||||
type: string
|
||||
description: last bouncer pull date
|
||||
MetricsAgentInfo:
|
||||
title: MetricsAgentInfo
|
||||
description: Software version info (so we can warn users about out-of-date software). The software name and the version are "guessed" from the user-agent
|
||||
type: object
|
||||
properties:
|
||||
|
@ -788,6 +804,12 @@ definitions:
|
|||
version:
|
||||
type: string
|
||||
description: software version
|
||||
last_update:
|
||||
type: string
|
||||
description: last agent update date
|
||||
last_push:
|
||||
type: string
|
||||
description: last agent push date
|
||||
Decision:
|
||||
title: Decision
|
||||
type: object
|
||||
|
@ -809,7 +831,11 @@ definitions:
|
|||
description: 'the value of the decision scope : an IP, a range, a username, etc'
|
||||
type: string
|
||||
duration:
|
||||
description: 'the duration of the decisions'
|
||||
type: string
|
||||
until:
|
||||
type: string
|
||||
description: 'the date until the decisions must be active'
|
||||
scenario:
|
||||
type: string
|
||||
simulated:
|
||||
|
@ -917,9 +943,13 @@ definitions:
|
|||
- "source"
|
||||
- "start_at"
|
||||
- "stop_at"
|
||||
- "scenario_trust"
|
||||
- "alert_id"
|
||||
properties:
|
||||
scenario_hash:
|
||||
type: "string"
|
||||
alert_id:
|
||||
type: "integer"
|
||||
scenario:
|
||||
type: "string"
|
||||
created_at:
|
||||
|
@ -930,6 +960,8 @@ definitions:
|
|||
$ref: "#/definitions/Source"
|
||||
scenario_version:
|
||||
type: "string"
|
||||
scenario_trust:
|
||||
type: "string"
|
||||
message:
|
||||
type: "string"
|
||||
description: "a human readable message"
|
||||
|
@ -937,6 +969,10 @@ definitions:
|
|||
type: "string"
|
||||
stop_at:
|
||||
type: "string"
|
||||
decisions:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/Decision'
|
||||
title: "Signal"
|
||||
tags:
|
||||
- name: bouncers
|
||||
|
|
|
@ -6,6 +6,7 @@ package models
|
|||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-openapi/errors"
|
||||
|
@ -25,11 +26,11 @@ type Metrics struct {
|
|||
|
||||
// bouncers
|
||||
// Required: true
|
||||
Bouncers []*MetricsSoftInfo `json:"bouncers"`
|
||||
Bouncers []*MetricsBouncerInfo `json:"bouncers"`
|
||||
|
||||
// machines
|
||||
// Required: true
|
||||
Machines []*MetricsSoftInfo `json:"machines"`
|
||||
Machines []*MetricsAgentInfo `json:"machines"`
|
||||
}
|
||||
|
||||
// Validate validates this metrics
|
||||
|
@ -113,6 +114,60 @@ func (m *Metrics) validateMachines(formats strfmt.Registry) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ContextValidate validate this metrics based on the context it is used
|
||||
func (m *Metrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.contextValidateBouncers(ctx, formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.contextValidateMachines(ctx, formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Metrics) contextValidateBouncers(ctx context.Context, formats strfmt.Registry) error {
|
||||
|
||||
for i := 0; i < len(m.Bouncers); i++ {
|
||||
|
||||
if m.Bouncers[i] != nil {
|
||||
if err := m.Bouncers[i].ContextValidate(ctx, formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("bouncers" + "." + strconv.Itoa(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Metrics) contextValidateMachines(ctx context.Context, formats strfmt.Registry) error {
|
||||
|
||||
for i := 0; i < len(m.Machines); i++ {
|
||||
|
||||
if m.Machines[i] != nil {
|
||||
if err := m.Machines[i].ContextValidate(ctx, formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("machines" + "." + strconv.Itoa(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *Metrics) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
|
|
|
@ -6,16 +6,24 @@ package models
|
|||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
// MetricsSoftInfo MetricsSoftInfo
|
||||
// MetricsAgentInfo MetricsAgentInfo
|
||||
//
|
||||
// Software version info (so we can warn users about out-of-date software). The software name and the version are "guessed" from the user-agent
|
||||
//
|
||||
// swagger:model MetricsSoftInfo
|
||||
type MetricsSoftInfo struct {
|
||||
// swagger:model MetricsAgentInfo
|
||||
type MetricsAgentInfo struct {
|
||||
|
||||
// last agent push date
|
||||
LastPush string `json:"last_push,omitempty"`
|
||||
|
||||
// last agent update date
|
||||
LastUpdate string `json:"last_update,omitempty"`
|
||||
|
||||
// name of the component
|
||||
Name string `json:"name,omitempty"`
|
||||
|
@ -24,13 +32,18 @@ type MetricsSoftInfo struct {
|
|||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
// Validate validates this metrics soft info
|
||||
func (m *MetricsSoftInfo) Validate(formats strfmt.Registry) error {
|
||||
// Validate validates this metrics agent info
|
||||
func (m *MetricsAgentInfo) Validate(formats strfmt.Registry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ContextValidate validates this metrics agent info based on context it is used
|
||||
func (m *MetricsAgentInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *MetricsSoftInfo) MarshalBinary() ([]byte, error) {
|
||||
func (m *MetricsAgentInfo) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -38,8 +51,8 @@ func (m *MetricsSoftInfo) MarshalBinary() ([]byte, error) {
|
|||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *MetricsSoftInfo) UnmarshalBinary(b []byte) error {
|
||||
var res MetricsSoftInfo
|
||||
func (m *MetricsAgentInfo) UnmarshalBinary(b []byte) error {
|
||||
var res MetricsAgentInfo
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
61
pkg/models/metrics_bouncer_info.go
Normal file
61
pkg/models/metrics_bouncer_info.go
Normal file
|
@ -0,0 +1,61 @@
|
|||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package models
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
// MetricsBouncerInfo MetricsBouncerInfo
|
||||
//
|
||||
// Software version info (so we can warn users about out-of-date software). The software name and the version are "guessed" from the user-agent
|
||||
//
|
||||
// swagger:model MetricsBouncerInfo
|
||||
type MetricsBouncerInfo struct {
|
||||
|
||||
// name of the component
|
||||
CustomName string `json:"custom_name,omitempty"`
|
||||
|
||||
// last bouncer pull date
|
||||
LastPull string `json:"last_pull,omitempty"`
|
||||
|
||||
// bouncer type (firewall, php ...)
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// software version
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
// Validate validates this metrics bouncer info
|
||||
func (m *MetricsBouncerInfo) Validate(formats strfmt.Registry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ContextValidate validates this metrics bouncer info based on context it is used
|
||||
func (m *MetricsBouncerInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *MetricsBouncerInfo) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *MetricsBouncerInfo) UnmarshalBinary(b []byte) error {
|
||||
var res MetricsBouncerInfo
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
|
@ -199,3 +199,12 @@ func Int32Ptr(i int32) *int32 {
|
|||
func BoolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func InSlice(str string, slice []string) bool {
|
||||
for _, item := range slice {
|
||||
if str == item {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ install -m 644 -D config/patterns/* -t %{buildroot}%{_sysconfdir}/crowdsec/patte
|
|||
install -m 644 -D config/config.yaml %{buildroot}%{_sysconfdir}/crowdsec
|
||||
install -m 644 -D config/simulation.yaml %{buildroot}%{_sysconfdir}/crowdsec
|
||||
install -m 644 -D config/profiles.yaml %{buildroot}%{_sysconfdir}/crowdsec
|
||||
install -m 644 -D config/console_config.yaml %{buildroot}%{_sysconfdir}/crowdsec
|
||||
install -m 644 -D %{SOURCE1} %{buildroot}%{_presetdir}
|
||||
|
||||
install -m 551 plugins/notifications/slack/notification-slack %{buildroot}%{_libdir}/%{name}/plugins/
|
||||
|
|
|
@ -31,6 +31,8 @@ CSCLI_BIN="./cmd/crowdsec-cli/cscli"
|
|||
CLIENT_SECRETS="local_api_credentials.yaml"
|
||||
LAPI_SECRETS="online_api_credentials.yaml"
|
||||
|
||||
CONSOLE_FILE="console_config.yaml"
|
||||
|
||||
BIN_INSTALL_PATH="/usr/local/bin"
|
||||
CROWDSEC_BIN_INSTALLED="${BIN_INSTALL_PATH}/crowdsec"
|
||||
|
||||
|
@ -405,6 +407,7 @@ install_crowdsec() {
|
|||
install -v -m 644 -D ./config/acquis.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit
|
||||
install -v -m 644 -D ./config/profiles.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit
|
||||
install -v -m 644 -D ./config/simulation.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit
|
||||
install -v -m 644 -D ./config/"${CONSOLE_FILE}" "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit
|
||||
|
||||
mkdir -p ${PID_DIR} || exit
|
||||
PID=${PID_DIR} DATA=${CROWDSEC_DATA_DIR} CFG=${CROWDSEC_CONFIG_PATH} envsubst '$CFG $PID $DATA' < ./config/user.yaml > ${CROWDSEC_CONFIG_PATH}"/user.yaml" || log_fatal "unable to generate user configuration file"
|
||||
|
|
Loading…
Reference in a new issue