mmetc 3 éve
szülő
commit
4b9a0c4ef7

+ 1 - 1
cmd/crowdsec-cli/explain.go

@@ -60,7 +60,7 @@ cscli explain --dsn "file://myfile.log" --type nginx
 			if logFile != "" {
 				absolutePath, err := filepath.Abs(logFile)
 				if err != nil {
-					log.Fatalf("unable to get absolue path of '%s', exiting", logFile)
+					log.Fatalf("unable to get absolute path of '%s', exiting", logFile)
 				}
 				dsn = fmt.Sprintf("file://%s", absolutePath)
 				lineCount := types.GetLineCountForFile(absolutePath)

+ 2 - 2
cmd/crowdsec-cli/hubtest.go

@@ -463,7 +463,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios
 							status = emoji.GreenCircle.String()
 							parserTested += 1
 						}
-						table.Append([]string{test.Parser, status, fmt.Sprintf("%d times (accross %d tests)", test.TestsCount, len(test.PresentIn))})
+						table.Append([]string{test.Parser, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))})
 					}
 					table.Render()
 				}
@@ -482,7 +482,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios
 						if test.TestsCount > 0 {
 							status = emoji.GreenCircle.String()
 						}
-						table.Append([]string{test.Scenario, status, fmt.Sprintf("%d times (accross %d tests)", test.TestsCount, len(test.PresentIn))})
+						table.Append([]string{test.Scenario, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))})
 					}
 					table.Render()
 				}

+ 1 - 1
cmd/crowdsec-cli/metrics.go

@@ -23,7 +23,7 @@ import (
 func lapiMetricsToTable(table *tablewriter.Table, stats map[string]map[string]map[string]int) error {
 
 	//stats : machine -> route -> method -> count
-	/*we want consistant display order*/
+	/*we want consistent display order*/
 	machineKeys := []string{}
 	for k := range stats {
 		machineKeys = append(machineKeys, k)

+ 1 - 1
pkg/apiserver/apic.go

@@ -417,7 +417,7 @@ func (a *apic) PullTop() error {
 		return errors.Wrap(err, "get stream")
 	}
 	a.startup = false
-	/*to count additions/deletions accross lists*/
+	/*to count additions/deletions across lists*/
 
 	add_counters, delete_counters := makeAddAndDeleteCounters()
 	// process deleted decisions

+ 3 - 3
pkg/cstest/hubtest_item.go

@@ -390,7 +390,7 @@ func (t *HubTestItem) InstallHub() error {
 			if err := cwhub.DownloadDataIfNeeded(t.RuntimeHubConfig, item, true); err != nil {
 				return fmt.Errorf("unable to download data for parser '%s': %+v", parserName, err)
 			}
-			log.Debugf("parser '%s' installed succesfully in runtime environment", parserName)
+			log.Debugf("parser '%s' installed successfully in runtime environment", parserName)
 		}
 	}
 
@@ -401,7 +401,7 @@ func (t *HubTestItem) InstallHub() error {
 			if err := cwhub.DownloadDataIfNeeded(t.RuntimeHubConfig, item, true); err != nil {
 				return fmt.Errorf("unable to download data for parser '%s': %+v", scenarioName, err)
 			}
-			log.Debugf("scenario '%s' installed succesfully in runtime environment", scenarioName)
+			log.Debugf("scenario '%s' installed successfully in runtime environment", scenarioName)
 		}
 	}
 
@@ -412,7 +412,7 @@ func (t *HubTestItem) InstallHub() error {
 			if err := cwhub.DownloadDataIfNeeded(t.RuntimeHubConfig, item, true); err != nil {
 				return fmt.Errorf("unable to download data for parser '%s': %+v", postoverflowName, err)
 			}
-			log.Debugf("postoverflow '%s' installed succesfully in runtime environment", postoverflowName)
+			log.Debugf("postoverflow '%s' installed successfully in runtime environment", postoverflowName)
 		}
 	}
 

+ 3 - 3
pkg/cstest/parser_assert.go

@@ -309,7 +309,7 @@ func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo, opts Dum
 				state[evt.Line.Time] = make(map[string]map[string]ParserResult)
 				assoc[evt.Line.Time] = evt.Line.Raw
 			}
-			//there is a trick : to know if an event succesfully exit the parsers, we check if it reached the pour() phase
+			//there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase
 			//we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered
 			if _, ok := state[evt.Line.Time]["buckets"]; !ok {
 				state[evt.Line.Time]["buckets"] = make(map[string]ParserResult)
@@ -330,7 +330,7 @@ func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo, opts Dum
 		fmt.Printf("line: %s\n", rawstr)
 		skeys := make([]string, 0, len(state[tstamp]))
 		for k := range state[tstamp] {
-			//there is a trick : to know if an event succesfully exit the parsers, we check if it reached the pour() phase
+			//there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase
 			//we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered
 			if k == "buckets" {
 				continue
@@ -444,7 +444,7 @@ func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo, opts Dum
 		}
 		bnames := make([]string, 0, len(state[tstamp]["buckets"]))
 		for k := range state[tstamp]["buckets"] {
-			//there is a trick : to know if an event succesfully exit the parsers, we check if it reached the pour() phase
+			//there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase
 			//we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered
 			if k == "OK" {
 				continue

+ 2 - 2
pkg/cwhub/download.go

@@ -69,7 +69,7 @@ func DownloadHubIdx(hub *csconfig.Hub) ([]byte, error) {
 
 	wsize, err := file.WriteString(string(body))
 	if err != nil {
-		return nil, errors.Wrap(err, "while writting hub index file")
+		return nil, errors.Wrap(err, "while writing hub index file")
 	}
 	log.Infof("Wrote new %d bytes index to %s", wsize, hub.HubIndexFile)
 	return body, nil
@@ -208,7 +208,7 @@ func DownloadItem(hub *csconfig.Hub, target Item, overwrite bool) (Item, error)
 	defer f.Close()
 	_, err = f.WriteString(string(body))
 	if err != nil {
-		return target, errors.Wrap(err, "while writting file")
+		return target, errors.Wrap(err, "while writing file")
 	}
 	target.Downloaded = true
 	target.Tainted = false

+ 2 - 2
pkg/database/alerts.go

@@ -299,7 +299,7 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([
 	ret := []string{}
 	bulkSize := 20
 
-	c.Log.Debugf("writting %d items", len(alertList))
+	c.Log.Debugf("writing %d items", len(alertList))
 	bulk := make([]*ent.AlertCreate, 0, bulkSize)
 	alertDecisions := make([][]*ent.Decision, 0, bulkSize)
 	for i, alertItem := range alertList {
@@ -922,7 +922,7 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error {
 	}
 	if MaxItems > 0 {
 		//We get the highest id for the alerts
-		//We substract MaxItems to avoid deleting alerts that are not old enough
+		//We subtract MaxItems to avoid deleting alerts that are not old enough
 		//This gives us the oldest alert that we want to keep
 		//We then delete all the alerts with an id lower than this one
 		//We can do this because the id is auto-increment, and the database won't reuse the same id twice

+ 1 - 1
pkg/leakybucket/buckets_test.go

@@ -223,7 +223,7 @@ POLL_AGAIN:
 
 	for {
 		if len(tf.Results) == 0 && len(results) == 0 {
-			log.Warningf("Test is successfull")
+			log.Warningf("Test is successful")
 			if dump {
 				if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil {
 					t.Fatalf("Failed dumping bucket state : %s", err)

+ 4 - 4
pkg/parser/node.go

@@ -32,9 +32,9 @@ type Node struct {
 	Rerferences []string `yaml:"references,omitempty"`
 	//if debug is present in the node, keep its specific Logger in runtime structure
 	Logger *log.Entry `yaml:"-"`
-	//This is mostly a hack to make writting less repetive.
+	//This is mostly a hack to make writing less repetitive.
 	//relying on stage, we know which field to parse, and we
-	//can as well promote log to next stage on success
+	//can also promote log to next stage on success
 	Stage string `yaml:"stage,omitempty"`
 	//OnSuccess allows to tag a node to be able to move log to next stage on success
 	OnSuccess string `yaml:"onsuccess,omitempty"`
@@ -259,7 +259,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
 		}
 		grok := n.Grok.RunTimeRegexp.Parse(gstr)
 		if len(grok) > 0 {
-			/*tag explicitely that the *current* node had a successful grok pattern. it's important to know success state*/
+			/*tag explicitly that the *current* node had a successful grok pattern. it's important to know success state*/
 			NodeHasOKGrok = true
 			clog.Debugf("+ Grok '%s' returned %d entries to merge in Parsed", groklabel, len(grok))
 			//We managed to grok stuff, merged into parse
@@ -301,7 +301,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
 				}
 			} else {
 				/*
-					If the parent node has a successful grok pattern, it's state will stay successfull even if one or more chil fails.
+					If the parent node has a successful grok pattern, it's state will stay successful even if one or more chil fails.
 					If the parent node is a skeleton node (no grok pattern), then at least one child must be successful for it to be a success.
 				*/
 				if !NodeHasOKGrok {

+ 1 - 1
pkg/types/grok_pattern.go

@@ -36,6 +36,6 @@ type GrokPattern struct {
 	//the output of the expression is going to be the source for regexp
 	ExpValue     string      `yaml:"expression,omitempty"`
 	RunTimeValue *vm.Program `json:"-"` //the actual compiled filter
-	//a grok can contain statics that apply if pattern is successfull
+	//a grok can contain statics that apply if pattern is successful
 	Statics []ExtraField `yaml:"statics,omitempty"`
 }

+ 1 - 1
tests/bats.mk

@@ -63,7 +63,7 @@ endef
 bats-all: bats-clean bats-build bats-fixture bats-test bats-test-hub
 
 # Source this to run the scripts outside of the Makefile
-# Old version of make don't have $(file) directive
+# Old versions of make don't have $(file) directive
 bats-environment: export ENV:=$(ENV)
 bats-environment:
 	@echo "$${ENV}" > $(TEST_DIR)/.environment.sh

+ 1 - 1
tests/lib/db/instance-sqlite

@@ -30,7 +30,7 @@ exec_sql() {
 [ -z "${CONFIG_YAML-}" ] && die "\$CONFIG_YAML must be defined."
 
 # ---------------------------
-# In most cases this called with setup argument, and it shouldn't fail for missinf config file.
+# In most cases this is called with setup argument, and it shouldn't fail for missing config file.
 if [ -f "${CONFIG_YAML}" ] ; then
     DATA_DIR=$(yq '.config_paths.data_dir' <"${CONFIG_YAML}")
     DB_FILE="${DATA_DIR}/crowdsec.db"