noop code removal, typos and lint fixes (#1329)
This commit is contained in:
parent
59ad91a8ca
commit
7c0593c659
17 changed files with 136 additions and 123 deletions
|
@ -50,16 +50,13 @@ func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error {
|
|||
|
||||
if csConfig.Cscli.Output == "raw" {
|
||||
csvwriter := csv.NewWriter(os.Stdout)
|
||||
header := []string{"id", "scope", "value", "reason", "country", "as", "decisions", "created_at"}
|
||||
if printMachine {
|
||||
err := csvwriter.Write([]string{"id", "scope", "value", "reason", "country", "as", "decisions", "created_at", "machine"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err := csvwriter.Write([]string{"id", "scope", "value", "reason", "country", "as", "decisions", "created_at"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
header = append(header, "machine")
|
||||
}
|
||||
err := csvwriter.Write(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, alertItem := range *alerts {
|
||||
row := []string{
|
||||
|
@ -87,11 +84,11 @@ func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error {
|
|||
} else if csConfig.Cscli.Output == "human" {
|
||||
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
header := []string{"ID", "value", "reason", "country", "as", "decisions", "created_at"}
|
||||
if printMachine {
|
||||
table.SetHeader([]string{"ID", "value", "reason", "country", "as", "decisions", "created_at", "machine"})
|
||||
} else {
|
||||
table.SetHeader([]string{"ID", "value", "reason", "country", "as", "decisions", "created_at"})
|
||||
header = append(header, "machine")
|
||||
}
|
||||
table.SetHeader(header)
|
||||
|
||||
if len(*alerts) == 0 {
|
||||
fmt.Println("No active alerts")
|
||||
|
@ -103,28 +100,19 @@ func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error {
|
|||
if *alertItem.Source.Value != "" {
|
||||
displayVal += ":" + *alertItem.Source.Value
|
||||
}
|
||||
if printMachine {
|
||||
table.Append([]string{
|
||||
strconv.Itoa(int(alertItem.ID)),
|
||||
displayVal,
|
||||
*alertItem.Scenario,
|
||||
alertItem.Source.Cn,
|
||||
alertItem.Source.AsNumber + " " + alertItem.Source.AsName,
|
||||
DecisionsFromAlert(alertItem),
|
||||
*alertItem.StartAt,
|
||||
alertItem.MachineID,
|
||||
})
|
||||
} else {
|
||||
table.Append([]string{
|
||||
strconv.Itoa(int(alertItem.ID)),
|
||||
displayVal,
|
||||
*alertItem.Scenario,
|
||||
alertItem.Source.Cn,
|
||||
alertItem.Source.AsNumber + " " + alertItem.Source.AsName,
|
||||
DecisionsFromAlert(alertItem),
|
||||
*alertItem.StartAt,
|
||||
})
|
||||
row := []string{
|
||||
strconv.Itoa(int(alertItem.ID)),
|
||||
displayVal,
|
||||
*alertItem.Scenario,
|
||||
alertItem.Source.Cn,
|
||||
alertItem.Source.AsNumber + " " + alertItem.Source.AsName,
|
||||
DecisionsFromAlert(alertItem),
|
||||
*alertItem.StartAt,
|
||||
}
|
||||
if printMachine {
|
||||
row = append(row, alertItem.MachineID)
|
||||
}
|
||||
table.Append(row)
|
||||
}
|
||||
table.Render() // Send output
|
||||
}
|
||||
|
|
|
@ -68,9 +68,9 @@ Note: This command requires database direct access, so is intended to be run on
|
|||
for _, b := range blockers {
|
||||
var revoked string
|
||||
if !b.Revoked {
|
||||
revoked = fmt.Sprintf("%s", emoji.CheckMark)
|
||||
revoked = emoji.CheckMark.String()
|
||||
} else {
|
||||
revoked = fmt.Sprintf("%s", emoji.Prohibited)
|
||||
revoked = emoji.Prohibited.String()
|
||||
}
|
||||
table.Append([]string{b.Name, b.IPAddress, revoked, b.LastPull.Format(time.RFC3339), b.Type, b.Version})
|
||||
}
|
||||
|
|
|
@ -134,7 +134,6 @@ Enable given information push to the central API. Allows to empower the console`
|
|||
if enableAll {
|
||||
SetConsoleOpts(csconfig.CONSOLE_CONFIGS, true)
|
||||
log.Infof("All features have been enabled successfully")
|
||||
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
log.Fatalf("You must specify at least one feature to enable")
|
||||
|
|
|
@ -82,7 +82,6 @@ func DecisionsToTable(alerts *models.GetAlertsResponse) error {
|
|||
x, _ := json.MarshalIndent(alerts, "", " ")
|
||||
fmt.Printf("%s", string(x))
|
||||
} else if csConfig.Cscli.Output == "human" {
|
||||
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"ID", "Source", "Scope:Value", "Reason", "Action", "Country", "AS", "Events", "expiration", "Alert ID"})
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios
|
|||
}
|
||||
|
||||
if logType == "" {
|
||||
log.Fatalf("please provid a type (--type) for the test")
|
||||
log.Fatalf("please provide a type (--type) for the test")
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(testPath, os.ModePerm); err != nil {
|
||||
|
|
|
@ -131,9 +131,9 @@ Note: This command requires database direct access, so is intended to be run on
|
|||
for _, w := range machines {
|
||||
var validated string
|
||||
if w.IsValidated {
|
||||
validated = fmt.Sprintf("%s", emoji.CheckMark)
|
||||
validated = emoji.CheckMark.String()
|
||||
} else {
|
||||
validated = fmt.Sprintf("%s", emoji.Prohibited)
|
||||
validated = emoji.Prohibited.String()
|
||||
}
|
||||
table.Append([]string{w.MachineId, w.IpAddress, w.UpdatedAt.Format(time.RFC3339), validated, w.Version})
|
||||
}
|
||||
|
|
|
@ -106,12 +106,11 @@ func ListItems(itemTypes []string, args []string, showType bool, showHeader bool
|
|||
var hubStatusByItemType = make(map[string][]cwhub.ItemHubStatus)
|
||||
|
||||
for _, itemType := range itemTypes {
|
||||
itemName := ""
|
||||
if len(args) == 1 {
|
||||
// This means that user requested a specific item by name
|
||||
hubStatusByItemType[itemType] = cwhub.GetHubStatusForItemType(itemType, args[0], all)
|
||||
} else {
|
||||
hubStatusByItemType[itemType] = cwhub.GetHubStatusForItemType(itemType, "", all)
|
||||
itemName = args[0]
|
||||
}
|
||||
hubStatusByItemType[itemType] = cwhub.GetHubStatusForItemType(itemType, itemName, all)
|
||||
}
|
||||
|
||||
if csConfig.Cscli.Output == "human" {
|
||||
|
@ -137,16 +136,13 @@ func ListItems(itemTypes []string, args []string, showType bool, showHeader bool
|
|||
} else if csConfig.Cscli.Output == "raw" {
|
||||
csvwriter := csv.NewWriter(os.Stdout)
|
||||
if showHeader {
|
||||
header := []string{"name", "status", "version", "description"}
|
||||
if showType {
|
||||
err := csvwriter.Write([]string{"name", "status", "version", "description", "type"})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to write header: %s", err)
|
||||
}
|
||||
} else {
|
||||
err := csvwriter.Write([]string{"name", "status", "version", "description"})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to write header: %s", err)
|
||||
}
|
||||
header = append(header, "type")
|
||||
}
|
||||
err := csvwriter.Write(header)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to write header: %s", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -685,57 +681,56 @@ func BackupHub(dirPath string) error {
|
|||
"type": itemType,
|
||||
})
|
||||
itemMap := cwhub.GetItemMap(itemType)
|
||||
if itemMap != nil {
|
||||
itemDirectory = fmt.Sprintf("%s/%s/", dirPath, itemType)
|
||||
if err := os.MkdirAll(itemDirectory, os.ModePerm); err != nil {
|
||||
return fmt.Errorf("error while creating %s : %s", itemDirectory, err)
|
||||
}
|
||||
upstreamParsers = []string{}
|
||||
for k, v := range itemMap {
|
||||
clog = clog.WithFields(log.Fields{
|
||||
"file": v.Name,
|
||||
})
|
||||
if !v.Installed { //only backup installed ones
|
||||
clog.Debugf("[%s] : not installed", k)
|
||||
continue
|
||||
}
|
||||
|
||||
//for the local/tainted ones, we backup the full file
|
||||
if v.Tainted || v.Local || !v.UpToDate {
|
||||
//we need to backup stages for parsers
|
||||
if itemType == cwhub.PARSERS || itemType == cwhub.PARSERS_OVFLW {
|
||||
fstagedir := fmt.Sprintf("%s%s", itemDirectory, v.Stage)
|
||||
if err := os.MkdirAll(fstagedir, os.ModePerm); err != nil {
|
||||
return fmt.Errorf("error while creating stage dir %s : %s", fstagedir, err)
|
||||
}
|
||||
}
|
||||
clog.Debugf("[%s] : backuping file (tainted:%t local:%t up-to-date:%t)", k, v.Tainted, v.Local, v.UpToDate)
|
||||
tfile := fmt.Sprintf("%s%s/%s", itemDirectory, v.Stage, v.FileName)
|
||||
if err = types.CopyFile(v.LocalPath, tfile); err != nil {
|
||||
return fmt.Errorf("failed copy %s %s to %s : %s", itemType, v.LocalPath, tfile, err)
|
||||
}
|
||||
clog.Infof("local/tainted saved %s to %s", v.LocalPath, tfile)
|
||||
continue
|
||||
}
|
||||
clog.Debugf("[%s] : from hub, just backup name (up-to-date:%t)", k, v.UpToDate)
|
||||
clog.Infof("saving, version:%s, up-to-date:%t", v.Version, v.UpToDate)
|
||||
upstreamParsers = append(upstreamParsers, v.Name)
|
||||
}
|
||||
//write the upstream items
|
||||
upstreamParsersFname := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itemType)
|
||||
upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed marshaling upstream parsers : %s", err)
|
||||
}
|
||||
err = ioutil.WriteFile(upstreamParsersFname, upstreamParsersContent, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write to %s %s : %s", itemType, upstreamParsersFname, err)
|
||||
}
|
||||
clog.Infof("Wrote %d entries for %s to %s", len(upstreamParsers), itemType, upstreamParsersFname)
|
||||
|
||||
} else {
|
||||
if itemMap == nil {
|
||||
clog.Infof("No %s to backup.", itemType)
|
||||
continue
|
||||
}
|
||||
itemDirectory = fmt.Sprintf("%s/%s/", dirPath, itemType)
|
||||
if err := os.MkdirAll(itemDirectory, os.ModePerm); err != nil {
|
||||
return fmt.Errorf("error while creating %s : %s", itemDirectory, err)
|
||||
}
|
||||
upstreamParsers = []string{}
|
||||
for k, v := range itemMap {
|
||||
clog = clog.WithFields(log.Fields{
|
||||
"file": v.Name,
|
||||
})
|
||||
if !v.Installed { //only backup installed ones
|
||||
clog.Debugf("[%s] : not installed", k)
|
||||
continue
|
||||
}
|
||||
|
||||
//for the local/tainted ones, we backup the full file
|
||||
if v.Tainted || v.Local || !v.UpToDate {
|
||||
//we need to backup stages for parsers
|
||||
if itemType == cwhub.PARSERS || itemType == cwhub.PARSERS_OVFLW {
|
||||
fstagedir := fmt.Sprintf("%s%s", itemDirectory, v.Stage)
|
||||
if err := os.MkdirAll(fstagedir, os.ModePerm); err != nil {
|
||||
return fmt.Errorf("error while creating stage dir %s : %s", fstagedir, err)
|
||||
}
|
||||
}
|
||||
clog.Debugf("[%s] : backuping file (tainted:%t local:%t up-to-date:%t)", k, v.Tainted, v.Local, v.UpToDate)
|
||||
tfile := fmt.Sprintf("%s%s/%s", itemDirectory, v.Stage, v.FileName)
|
||||
if err = types.CopyFile(v.LocalPath, tfile); err != nil {
|
||||
return fmt.Errorf("failed copy %s %s to %s : %s", itemType, v.LocalPath, tfile, err)
|
||||
}
|
||||
clog.Infof("local/tainted saved %s to %s", v.LocalPath, tfile)
|
||||
continue
|
||||
}
|
||||
clog.Debugf("[%s] : from hub, just backup name (up-to-date:%t)", k, v.UpToDate)
|
||||
clog.Infof("saving, version:%s, up-to-date:%t", v.Version, v.UpToDate)
|
||||
upstreamParsers = append(upstreamParsers, v.Name)
|
||||
}
|
||||
//write the upstream items
|
||||
upstreamParsersFname := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itemType)
|
||||
upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed marshaling upstream parsers : %s", err)
|
||||
}
|
||||
err = ioutil.WriteFile(upstreamParsersFname, upstreamParsersContent, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write to %s %s : %s", itemType, upstreamParsersFname, err)
|
||||
}
|
||||
clog.Infof("Wrote %d entries for %s to %s", len(upstreamParsers), itemType, upstreamParsersFname)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
package main
|
||||
|
||||
func bToMb(b uint64) uint64 {
|
||||
return b / 1024 / 1024
|
||||
}
|
|
@ -193,7 +193,6 @@ func (c *Controller) CreateAlert(gctx *gin.Context) {
|
|||
}
|
||||
|
||||
gctx.JSON(http.StatusCreated, alerts)
|
||||
return
|
||||
}
|
||||
|
||||
// FindAlerts : return alerts from database based on the specified filter
|
||||
|
@ -211,7 +210,6 @@ func (c *Controller) FindAlerts(gctx *gin.Context) {
|
|||
return
|
||||
}
|
||||
gctx.JSON(http.StatusOK, data)
|
||||
return
|
||||
}
|
||||
|
||||
// FindAlertByID return the alert assiocated to the ID
|
||||
|
@ -234,7 +232,6 @@ func (c *Controller) FindAlertByID(gctx *gin.Context) {
|
|||
return
|
||||
}
|
||||
gctx.JSON(http.StatusOK, data)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteAlerts : delete alerts from database based on the specified filter
|
||||
|
@ -254,5 +251,4 @@ func (c *Controller) DeleteAlerts(gctx *gin.Context) {
|
|||
NbDeleted: strconv.Itoa(nbDeleted),
|
||||
}
|
||||
gctx.JSON(http.StatusOK, deleteAlertsResp)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -61,7 +61,6 @@ func (c *Controller) GetDecision(gctx *gin.Context) {
|
|||
return
|
||||
}
|
||||
gctx.JSON(http.StatusOK, results)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Controller) DeleteDecisionById(gctx *gin.Context) {
|
||||
|
|
|
@ -210,7 +210,7 @@ func (s *ScenarioAssert) AutoGenScenarioAssert() string {
|
|||
}
|
||||
}
|
||||
ret += fmt.Sprintf(`results[%d].Overflow.Alert.GetScenario() == "%s"`+"\n", eventIndex, *event.Overflow.Alert.Scenario)
|
||||
ret += fmt.Sprintf(`results[%d].Overflow.Alert.Remediation == %t`+"\n", eventIndex, *&event.Overflow.Alert.Remediation)
|
||||
ret += fmt.Sprintf(`results[%d].Overflow.Alert.Remediation == %t`+"\n", eventIndex, event.Overflow.Alert.Remediation)
|
||||
ret += fmt.Sprintf(`results[%d].Overflow.Alert.GetEventsCount() == %d`+"\n", eventIndex, *event.Overflow.Alert.EventsCount)
|
||||
}
|
||||
return ret
|
||||
|
|
|
@ -93,7 +93,7 @@ func TestGetters(t *testing.T) {
|
|||
}
|
||||
|
||||
//Get item : good and bad
|
||||
for k, _ := range x {
|
||||
for k := range x {
|
||||
empty := GetItem(COLLECTIONS, k+"nope")
|
||||
if empty != nil {
|
||||
t.Fatalf("expected empty item")
|
||||
|
@ -198,7 +198,7 @@ func testInstallItem(cfg *csconfig.Hub, t *testing.T, item Item) {
|
|||
t.Fatalf("download: %s should be up-to-date", item.Name)
|
||||
}
|
||||
if hubIdx[item.Type][item.Name].Installed {
|
||||
t.Fatalf("download: %s should not be install", item.Name)
|
||||
t.Fatalf("download: %s should not be installed", item.Name)
|
||||
}
|
||||
if hubIdx[item.Type][item.Name].Tainted {
|
||||
t.Fatalf("download: %s should not be tainted", item.Name)
|
||||
|
@ -212,7 +212,7 @@ func testInstallItem(cfg *csconfig.Hub, t *testing.T, item Item) {
|
|||
t.Fatalf("taint: failed to run localSync : %s", err)
|
||||
}
|
||||
if !hubIdx[item.Type][item.Name].Installed {
|
||||
t.Fatalf("install: %s should be install", item.Name)
|
||||
t.Fatalf("install: %s should be installed", item.Name)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -849,6 +849,9 @@ func (c *Client) DeleteAlertWithFilter(filter map[string][]string) (int, error)
|
|||
|
||||
// Get all the alerts that match the filter
|
||||
alertsToDelete, err := c.QueryAlertWithFilter(filter)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(DeleteFail, "alert query failed")
|
||||
}
|
||||
|
||||
for _, alertItem := range alertsToDelete {
|
||||
err = c.DeleteAlertGraph(alertItem)
|
||||
|
|
|
@ -279,17 +279,16 @@ func NewAlert(leaky *Leaky, queue *Queue) (types.RuntimeAlert, error) {
|
|||
}
|
||||
runtimeAlert.Sources = sources
|
||||
//Include source info in format string
|
||||
sourceStr := ""
|
||||
sourceStr := "UNKNOWN"
|
||||
if len(sources) > 1 {
|
||||
sourceStr = fmt.Sprintf("%d sources", len(sources))
|
||||
} else if len(sources) == 1 {
|
||||
for k, _ := range sources {
|
||||
for k := range sources {
|
||||
sourceStr = k
|
||||
break
|
||||
}
|
||||
} else {
|
||||
sourceStr = "UNKNOWN"
|
||||
}
|
||||
|
||||
*apiAlert.Message = fmt.Sprintf("%s %s performed '%s' (%d events over %s) at %s", source_scope, sourceStr, leaky.Name, leaky.Total_count, leaky.Ovflw_ts.Sub(leaky.First_ts), leaky.Last_ts)
|
||||
//Get the events from Leaky/Queue
|
||||
apiAlert.Events = EventsFromQueue(queue)
|
||||
|
|
|
@ -155,7 +155,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
|||
srcs = append(srcs, net.ParseIP(p.Meta["source_ip"]))
|
||||
}
|
||||
} else if p.Type == types.OVFLW {
|
||||
for k, _ := range p.Overflow.Sources {
|
||||
for k := range p.Overflow.Sources {
|
||||
srcs = append(srcs, net.ParseIP(k))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ declare stderr
|
|||
assert_output --partial "Constraint_acquis:"
|
||||
}
|
||||
|
||||
@test "$FILE cscli alerts list: at startup returns one entry: community pull" {
|
||||
@test "$FILE cscli alerts list: at startup returns at least one entry: community pull" {
|
||||
loop_max=15
|
||||
for ((i=0; i<=loop_max; i++)); do
|
||||
sleep 2
|
||||
|
@ -47,7 +47,7 @@ declare stderr
|
|||
[[ "$output" != "null" ]] && break
|
||||
done
|
||||
run -0 jq -r '. | length' <(output)
|
||||
assert_output 1
|
||||
refute_output 0
|
||||
}
|
||||
|
||||
@test "$FILE cscli capi status" {
|
||||
|
|
40
tests/bats/80_alerts.bats
Normal file
40
tests/bats/80_alerts.bats
Normal file
|
@ -0,0 +1,40 @@
|
|||
#!/usr/bin/env bats
|
||||
# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si:
|
||||
|
||||
set -u
|
||||
|
||||
setup_file() {
|
||||
load "../lib/setup_file.sh" >&3 2>&1
|
||||
}
|
||||
|
||||
teardown_file() {
|
||||
load "../lib/teardown_file.sh" >&3 2>&1
|
||||
}
|
||||
|
||||
setup() {
|
||||
load "../lib/setup.sh"
|
||||
./instance-data load
|
||||
./instance-crowdsec start
|
||||
}
|
||||
|
||||
teardown() {
|
||||
./instance-crowdsec stop
|
||||
}
|
||||
|
||||
#----------
|
||||
|
||||
@test "$FILE cscli alerts list, with and without --machine" {
|
||||
run -0 cscli decisions add -i 10.20.30.40 -t ban
|
||||
|
||||
run -0 cscli alerts list
|
||||
refute_output --partial 'MACHINE'
|
||||
# machine name appears quoted in the "REASON" column
|
||||
assert_output --partial "| 'githubciXXXXXXXXXXXXXXXXXXXXXXXX' |"
|
||||
refute_output --partial "| githubciXXXXXXXXXXXXXXXXXXXXXXXX |"
|
||||
|
||||
run -0 cscli alerts list --machine
|
||||
assert_output --partial 'MACHINE'
|
||||
assert_output --partial "| 'githubciXXXXXXXXXXXXXXXXXXXXXXXX' |"
|
||||
assert_output --partial "| githubciXXXXXXXXXXXXXXXXXXXXXXXX |"
|
||||
}
|
||||
|
Loading…
Reference in a new issue