typos/grammar (#2561)
This commit is contained in:
parent
4934fce769
commit
5cd4406f5e
10 changed files with 17 additions and 17 deletions
2
.github/workflows/bats.yml
vendored
2
.github/workflows/bats.yml
vendored
|
@ -31,7 +31,7 @@ jobs:
|
|||
|
||||
# Jobs for Postgres (and sometimes MySQL) can have failing tests on GitHub
|
||||
# CI, but they pass when run on devs' machines or in the release checks. We
|
||||
# disable them here by default. Remove the if..false to enable them.
|
||||
# disable them here by default. Remove if...false to enable them.
|
||||
|
||||
mariadb:
|
||||
uses: ./.github/workflows/bats-mysql.yml
|
||||
|
|
|
@ -41,9 +41,9 @@ func backupHub(dirPath string) error {
|
|||
continue
|
||||
}
|
||||
|
||||
//for the local/tainted ones, we backup the full file
|
||||
//for the local/tainted ones, we back up the full file
|
||||
if v.Tainted || v.Local || !v.UpToDate {
|
||||
//we need to backup stages for parsers
|
||||
//we need to back up stages for parsers
|
||||
if itemType == cwhub.PARSERS || itemType == cwhub.PARSERS_OVFLW {
|
||||
fstagedir := fmt.Sprintf("%s%s", itemDirectory, v.Stage)
|
||||
if err := os.MkdirAll(fstagedir, os.ModePerm); err != nil {
|
||||
|
|
|
@ -37,7 +37,7 @@ var (
|
|||
|
||||
forceYes bool
|
||||
|
||||
/*informations needed to setup a random password on user's behalf*/
|
||||
// information needed to set up a random password on user's behalf
|
||||
)
|
||||
|
||||
func NewDashboardCmd() *cobra.Command {
|
||||
|
|
|
@ -199,7 +199,7 @@ stream_regexp: test_bad[0-9]+`),
|
|||
},
|
||||
expectedResLen: 0,
|
||||
},
|
||||
// require a group name that does exist and contains a stream in which we gonna put events
|
||||
// require a group name that does exist and contains a stream in which we are going to put events
|
||||
{
|
||||
name: "group_exists_stream_exists_has_events",
|
||||
config: []byte(`
|
||||
|
|
|
@ -213,7 +213,7 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con
|
|||
}
|
||||
|
||||
// The watcher will be authenticated by the RoundTripper the first time it will call CAPI
|
||||
// Explicit authentication will provoke an useless supplementary call to CAPI
|
||||
// Explicit authentication will provoke a useless supplementary call to CAPI
|
||||
scenarios, err := ret.FetchScenariosListFromDB()
|
||||
if err != nil {
|
||||
return ret, fmt.Errorf("get scenario in db: %w", err)
|
||||
|
|
|
@ -572,7 +572,7 @@ func TestAPICWhitelists(t *testing.T) {
|
|||
&modelscapi.GetDecisionsStreamResponseDeletedItem{
|
||||
Decisions: []string{
|
||||
"9.9.9.9", // This is already present in DB
|
||||
"9.1.9.9", // This not present in DB
|
||||
"9.1.9.9", // This is not present in DB
|
||||
},
|
||||
Scope: ptr.Of("Ip"),
|
||||
}, // This is already present in DB
|
||||
|
@ -734,7 +734,7 @@ func TestAPICPullTop(t *testing.T) {
|
|||
&modelscapi.GetDecisionsStreamResponseDeletedItem{
|
||||
Decisions: []string{
|
||||
"9.9.9.9", // This is already present in DB
|
||||
"9.1.9.9", // This not present in DB
|
||||
"9.1.9.9", // This is not present in DB
|
||||
},
|
||||
Scope: ptr.Of("Ip"),
|
||||
}, // This is already present in DB
|
||||
|
|
|
@ -23,7 +23,7 @@ var defaultDataDir = "/var/lib/crowdsec/data/"
|
|||
|
||||
// Config contains top-level defaults -> overridden by configuration file -> overridden by CLI flags
|
||||
type Config struct {
|
||||
//just a path to ourself :p
|
||||
//just a path to ourselves :p
|
||||
FilePath *string `yaml:"-"`
|
||||
Self []byte `yaml:"-"`
|
||||
Common *CommonCfg `yaml:"common,omitempty"`
|
||||
|
|
|
@ -110,7 +110,7 @@ loop:
|
|||
pb.addProfileAlert(profileAlert)
|
||||
|
||||
case pluginName := <-pb.watcher.PluginEvents:
|
||||
// this can be ran in goroutine, but then locks will be needed
|
||||
// this can be run in goroutine, but then locks will be needed
|
||||
pluginMutex.Lock()
|
||||
log.Tracef("going to deliver %d alerts to plugin %s", len(pb.alertsByPluginName[pluginName]), pluginName)
|
||||
tmpAlerts := pb.alertsByPluginName[pluginName]
|
||||
|
@ -139,7 +139,7 @@ loop:
|
|||
pb.Kill()
|
||||
break loop
|
||||
case pluginName := <-pb.watcher.PluginEvents:
|
||||
// this can be ran in goroutine, but then locks will be needed
|
||||
// this can be run in goroutine, but then locks will be needed
|
||||
pluginMutex.Lock()
|
||||
log.Tracef("going to deliver %d alerts to plugin %s", len(pb.alertsByPluginName[pluginName]), pluginName)
|
||||
tmpAlerts := pb.alertsByPluginName[pluginName]
|
||||
|
@ -206,7 +206,7 @@ func (pb *PluginBroker) loadConfig(path string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// checks whether every notification in profile has it's own config file
|
||||
// checks whether every notification in profile has its own config file
|
||||
func (pb *PluginBroker) verifyPluginConfigsWithProfile() error {
|
||||
for _, profileCfg := range pb.profileConfigs {
|
||||
for _, pluginName := range profileCfg.Notifications {
|
||||
|
@ -219,7 +219,7 @@ func (pb *PluginBroker) verifyPluginConfigsWithProfile() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// check whether each plugin in profile has it's own binary
|
||||
// check whether each plugin in profile has its own binary
|
||||
func (pb *PluginBroker) verifyPluginBinaryWithProfile() error {
|
||||
for _, profileCfg := range pb.profileConfigs {
|
||||
for _, pluginName := range profileCfg.Notifications {
|
||||
|
|
|
@ -30,13 +30,13 @@ type Leaky struct {
|
|||
//the limiter is what holds the proper "leaky aspect", it determines when/if we can pour objects
|
||||
Limiter rate.RateLimiter `json:"-"`
|
||||
SerializedState rate.Lstate
|
||||
//Queue is used to held the cache of objects in the bucket, it is used to know 'how many' objects we have in buffer.
|
||||
//Queue is used to hold the cache of objects in the bucket, it is used to know 'how many' objects we have in buffer.
|
||||
Queue *Queue
|
||||
//Leaky buckets are receiving message through a chan
|
||||
In chan *types.Event `json:"-"`
|
||||
//Leaky buckets are pushing their overflows through a chan
|
||||
Out chan *Queue `json:"-"`
|
||||
// shared for all buckets (the idea is to kill this afterwards)
|
||||
// shared for all buckets (the idea is to kill this afterward)
|
||||
AllOut chan types.Event `json:"-"`
|
||||
//max capacity (for burst)
|
||||
Capacity int
|
||||
|
@ -332,7 +332,7 @@ func LeakRoutine(leaky *Leaky) error {
|
|||
|
||||
}
|
||||
if leaky.logger.Level >= log.TraceLevel {
|
||||
/*don't sdump if it's not going to printed, it's expensive*/
|
||||
/*don't sdump if it's not going to be printed, it's expensive*/
|
||||
leaky.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: alert}))
|
||||
}
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ func (h *MBClient) Do(method string, route string, body interface{}) (interface{
|
|||
return Success, Error, err
|
||||
}
|
||||
|
||||
// Set set headers as key:value
|
||||
// Set headers as key:value
|
||||
func (h *MBClient) Set(key string, value string) {
|
||||
h.CTX = h.CTX.Set(key, value)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue