Browse Source

typos/grammar (#2561)

mmetc 1 year ago
parent
commit
5cd4406f5e

+ 1 - 1
.github/workflows/bats.yml

@@ -31,7 +31,7 @@ jobs:
 
 
   # Jobs for Postgres (and sometimes MySQL) can have failing tests on GitHub
   # Jobs for Postgres (and sometimes MySQL) can have failing tests on GitHub
   # CI, but they pass when run on devs' machines or in the release checks. We
   # CI, but they pass when run on devs' machines or in the release checks. We
-  # disable them here by default. Remove the if..false to enable them.
+  # disable them here by default. Remove if...false to enable them.
 
 
   mariadb:
   mariadb:
     uses: ./.github/workflows/bats-mysql.yml
     uses: ./.github/workflows/bats-mysql.yml

+ 2 - 2
cmd/crowdsec-cli/config_backup.go

@@ -41,9 +41,9 @@ func backupHub(dirPath string) error {
 				continue
 				continue
 			}
 			}
 
 
-			//for the local/tainted ones, we backup the full file
+			//for the local/tainted ones, we back up the full file
 			if v.Tainted || v.Local || !v.UpToDate {
 			if v.Tainted || v.Local || !v.UpToDate {
-				//we need to backup stages for parsers
+				//we need to back up stages for parsers
 				if itemType == cwhub.PARSERS || itemType == cwhub.PARSERS_OVFLW {
 				if itemType == cwhub.PARSERS || itemType == cwhub.PARSERS_OVFLW {
 					fstagedir := fmt.Sprintf("%s%s", itemDirectory, v.Stage)
 					fstagedir := fmt.Sprintf("%s%s", itemDirectory, v.Stage)
 					if err := os.MkdirAll(fstagedir, os.ModePerm); err != nil {
 					if err := os.MkdirAll(fstagedir, os.ModePerm); err != nil {

+ 1 - 1
cmd/crowdsec-cli/dashboard.go

@@ -37,7 +37,7 @@ var (
 
 
 	forceYes bool
 	forceYes bool
 
 
-	/*informations needed to setup a random password on user's behalf*/
+	// information needed to set up a random password on user's behalf
 )
 )
 
 
 func NewDashboardCmd() *cobra.Command {
 func NewDashboardCmd() *cobra.Command {

+ 1 - 1
pkg/acquisition/modules/cloudwatch/cloudwatch_test.go

@@ -199,7 +199,7 @@ stream_regexp: test_bad[0-9]+`),
 			},
 			},
 			expectedResLen: 0,
 			expectedResLen: 0,
 		},
 		},
-		// require a group name that does exist and contains a stream in which we gonna put events
+		// require a group name that does exist and contains a stream in which we are going to put events
 		{
 		{
 			name: "group_exists_stream_exists_has_events",
 			name: "group_exists_stream_exists_has_events",
 			config: []byte(`
 			config: []byte(`

+ 1 - 1
pkg/apiserver/apic.go

@@ -213,7 +213,7 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con
 	}
 	}
 
 
 	// The watcher will be authenticated by the RoundTripper the first time it will call CAPI
 	// The watcher will be authenticated by the RoundTripper the first time it will call CAPI
-	// Explicit authentication will provoke an useless supplementary call to CAPI
+	// Explicit authentication will provoke a useless supplementary call to CAPI
 	scenarios, err := ret.FetchScenariosListFromDB()
 	scenarios, err := ret.FetchScenariosListFromDB()
 	if err != nil {
 	if err != nil {
 		return ret, fmt.Errorf("get scenario in db: %w", err)
 		return ret, fmt.Errorf("get scenario in db: %w", err)

+ 2 - 2
pkg/apiserver/apic_test.go

@@ -572,7 +572,7 @@ func TestAPICWhitelists(t *testing.T) {
 					&modelscapi.GetDecisionsStreamResponseDeletedItem{
 					&modelscapi.GetDecisionsStreamResponseDeletedItem{
 						Decisions: []string{
 						Decisions: []string{
 							"9.9.9.9", // This is already present in DB
 							"9.9.9.9", // This is already present in DB
-							"9.1.9.9", // This not present in DB
+							"9.1.9.9", // This is not present in DB
 						},
 						},
 						Scope: ptr.Of("Ip"),
 						Scope: ptr.Of("Ip"),
 					}, // This is already present in DB
 					}, // This is already present in DB
@@ -734,7 +734,7 @@ func TestAPICPullTop(t *testing.T) {
 					&modelscapi.GetDecisionsStreamResponseDeletedItem{
 					&modelscapi.GetDecisionsStreamResponseDeletedItem{
 						Decisions: []string{
 						Decisions: []string{
 							"9.9.9.9", // This is already present in DB
 							"9.9.9.9", // This is already present in DB
-							"9.1.9.9", // This not present in DB
+							"9.1.9.9", // This is not present in DB
 						},
 						},
 						Scope: ptr.Of("Ip"),
 						Scope: ptr.Of("Ip"),
 					}, // This is already present in DB
 					}, // This is already present in DB

+ 1 - 1
pkg/csconfig/config.go

@@ -23,7 +23,7 @@ var defaultDataDir = "/var/lib/crowdsec/data/"
 
 
 // Config contains top-level defaults -> overridden by configuration file -> overridden by CLI flags
 // Config contains top-level defaults -> overridden by configuration file -> overridden by CLI flags
 type Config struct {
 type Config struct {
-	//just a path to ourself :p
+	//just a path to ourselves :p
 	FilePath     *string             `yaml:"-"`
 	FilePath     *string             `yaml:"-"`
 	Self         []byte              `yaml:"-"`
 	Self         []byte              `yaml:"-"`
 	Common       *CommonCfg          `yaml:"common,omitempty"`
 	Common       *CommonCfg          `yaml:"common,omitempty"`

+ 4 - 4
pkg/csplugin/broker.go

@@ -110,7 +110,7 @@ loop:
 			pb.addProfileAlert(profileAlert)
 			pb.addProfileAlert(profileAlert)
 
 
 		case pluginName := <-pb.watcher.PluginEvents:
 		case pluginName := <-pb.watcher.PluginEvents:
-			// this can be ran in goroutine, but then locks will be needed
+			// this can be run in goroutine, but then locks will be needed
 			pluginMutex.Lock()
 			pluginMutex.Lock()
 			log.Tracef("going to deliver %d alerts to plugin %s", len(pb.alertsByPluginName[pluginName]), pluginName)
 			log.Tracef("going to deliver %d alerts to plugin %s", len(pb.alertsByPluginName[pluginName]), pluginName)
 			tmpAlerts := pb.alertsByPluginName[pluginName]
 			tmpAlerts := pb.alertsByPluginName[pluginName]
@@ -139,7 +139,7 @@ loop:
 					pb.Kill()
 					pb.Kill()
 					break loop
 					break loop
 				case pluginName := <-pb.watcher.PluginEvents:
 				case pluginName := <-pb.watcher.PluginEvents:
-					// this can be ran in goroutine, but then locks will be needed
+					// this can be run in goroutine, but then locks will be needed
 					pluginMutex.Lock()
 					pluginMutex.Lock()
 					log.Tracef("going to deliver %d alerts to plugin %s", len(pb.alertsByPluginName[pluginName]), pluginName)
 					log.Tracef("going to deliver %d alerts to plugin %s", len(pb.alertsByPluginName[pluginName]), pluginName)
 					tmpAlerts := pb.alertsByPluginName[pluginName]
 					tmpAlerts := pb.alertsByPluginName[pluginName]
@@ -206,7 +206,7 @@ func (pb *PluginBroker) loadConfig(path string) error {
 	return err
 	return err
 }
 }
 
 
-// checks whether every notification in profile has it's own config file
+// checks whether every notification in profile has its own config file
 func (pb *PluginBroker) verifyPluginConfigsWithProfile() error {
 func (pb *PluginBroker) verifyPluginConfigsWithProfile() error {
 	for _, profileCfg := range pb.profileConfigs {
 	for _, profileCfg := range pb.profileConfigs {
 		for _, pluginName := range profileCfg.Notifications {
 		for _, pluginName := range profileCfg.Notifications {
@@ -219,7 +219,7 @@ func (pb *PluginBroker) verifyPluginConfigsWithProfile() error {
 	return nil
 	return nil
 }
 }
 
 
-// check whether each plugin in profile has it's own binary
+// check whether each plugin in profile has its own binary
 func (pb *PluginBroker) verifyPluginBinaryWithProfile() error {
 func (pb *PluginBroker) verifyPluginBinaryWithProfile() error {
 	for _, profileCfg := range pb.profileConfigs {
 	for _, profileCfg := range pb.profileConfigs {
 		for _, pluginName := range profileCfg.Notifications {
 		for _, pluginName := range profileCfg.Notifications {

+ 3 - 3
pkg/leakybucket/bucket.go

@@ -30,13 +30,13 @@ type Leaky struct {
 	//the limiter is what holds the proper "leaky aspect", it determines when/if we can pour objects
 	//the limiter is what holds the proper "leaky aspect", it determines when/if we can pour objects
 	Limiter         rate.RateLimiter `json:"-"`
 	Limiter         rate.RateLimiter `json:"-"`
 	SerializedState rate.Lstate
 	SerializedState rate.Lstate
-	//Queue is used to held the cache of objects in the bucket, it is used to know 'how many' objects we have in buffer.
+	//Queue is used to hold the cache of objects in the bucket, it is used to know 'how many' objects we have in buffer.
 	Queue *Queue
 	Queue *Queue
 	//Leaky buckets are receiving message through a chan
 	//Leaky buckets are receiving message through a chan
 	In chan *types.Event `json:"-"`
 	In chan *types.Event `json:"-"`
 	//Leaky buckets are pushing their overflows through a chan
 	//Leaky buckets are pushing their overflows through a chan
 	Out chan *Queue `json:"-"`
 	Out chan *Queue `json:"-"`
-	// shared for all buckets (the idea is to kill this afterwards)
+	// shared for all buckets (the idea is to kill this afterward)
 	AllOut chan types.Event `json:"-"`
 	AllOut chan types.Event `json:"-"`
 	//max capacity (for burst)
 	//max capacity (for burst)
 	Capacity int
 	Capacity int
@@ -332,7 +332,7 @@ func LeakRoutine(leaky *Leaky) error {
 
 
 			}
 			}
 			if leaky.logger.Level >= log.TraceLevel {
 			if leaky.logger.Level >= log.TraceLevel {
-				/*don't sdump if it's not going to printed, it's expensive*/
+				/*don't sdump if it's not going to be printed, it's expensive*/
 				leaky.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: alert}))
 				leaky.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: alert}))
 			}
 			}
 
 

+ 1 - 1
pkg/metabase/api.go

@@ -79,7 +79,7 @@ func (h *MBClient) Do(method string, route string, body interface{}) (interface{
 	return Success, Error, err
 	return Success, Error, err
 }
 }
 
 
-// Set set headers as key:value
+// Set headers as key:value
 func (h *MBClient) Set(key string, value string) {
 func (h *MBClient) Set(key string, value string) {
 	h.CTX = h.CTX.Set(key, value)
 	h.CTX = h.CTX.Set(key, value)
 }
 }