lint (intrange)
This commit is contained in:
parent
2abc078e53
commit
1f5298f0f1
19 changed files with 29 additions and 29 deletions
|
@ -31,7 +31,7 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin
|
|||
spamLimit := make(map[string]bool)
|
||||
skipped := 0
|
||||
|
||||
for aIdx := 0; aIdx < len(*alerts); aIdx++ {
|
||||
for aIdx := range len(*alerts) {
|
||||
alertItem := (*alerts)[aIdx]
|
||||
newDecisions := make([]*models.Decision, 0)
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ func generatePassword(length int) string {
|
|||
|
||||
buf := make([]byte, length)
|
||||
|
||||
for i := 0; i < length; i++ {
|
||||
for i := range length {
|
||||
rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength)))
|
||||
if err != nil {
|
||||
log.Fatalf("failed getting data from prng for password generation : %s", err)
|
||||
|
|
|
@ -65,7 +65,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H
|
|||
parsersTomb.Go(func() error {
|
||||
parserWg.Add(1)
|
||||
|
||||
for i := 0; i < cConfig.Crowdsec.ParserRoutinesCount; i++ {
|
||||
for range cConfig.Crowdsec.ParserRoutinesCount {
|
||||
parsersTomb.Go(func() error {
|
||||
defer trace.CatchPanic("crowdsec/runParse")
|
||||
|
||||
|
@ -97,7 +97,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H
|
|||
}
|
||||
}
|
||||
|
||||
for i := 0; i < cConfig.Crowdsec.BucketsRoutinesCount; i++ {
|
||||
for range cConfig.Crowdsec.BucketsRoutinesCount {
|
||||
bucketsTomb.Go(func() error {
|
||||
defer trace.CatchPanic("crowdsec/runPour")
|
||||
|
||||
|
@ -128,7 +128,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H
|
|||
outputsTomb.Go(func() error {
|
||||
outputWg.Add(1)
|
||||
|
||||
for i := 0; i < cConfig.Crowdsec.OutputRoutinesCount; i++ {
|
||||
for range cConfig.Crowdsec.OutputRoutinesCount {
|
||||
outputsTomb.Go(func() error {
|
||||
defer trace.CatchPanic("crowdsec/runOutput")
|
||||
|
||||
|
|
|
@ -269,7 +269,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig
|
|||
|
||||
func GetMetrics(sources []DataSource, aggregated bool) error {
|
||||
var metrics []prometheus.Collector
|
||||
for i := 0; i < len(sources); i++ {
|
||||
for i := range len(sources) {
|
||||
if aggregated {
|
||||
metrics = sources[i].GetMetrics()
|
||||
} else {
|
||||
|
@ -343,7 +343,7 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb
|
|||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(sources); i++ {
|
||||
for i := range len(sources) {
|
||||
subsrc := sources[i] //ensure its a copy
|
||||
log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc)
|
||||
|
||||
|
|
|
@ -322,7 +322,7 @@ func (f *MockCat) UnmarshalConfig(cfg []byte) error { return nil }
|
|||
func (f *MockCat) GetName() string { return "mock_cat" }
|
||||
func (f *MockCat) GetMode() string { return "cat" }
|
||||
func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error {
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
evt := types.Event{}
|
||||
evt.Line.Src = "test"
|
||||
out <- evt
|
||||
|
@ -369,7 +369,7 @@ func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) err
|
|||
return fmt.Errorf("can't run in cat mode")
|
||||
}
|
||||
func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
evt := types.Event{}
|
||||
evt.Line.Src = "test"
|
||||
out <- evt
|
||||
|
@ -452,7 +452,7 @@ type MockTailError struct {
|
|||
}
|
||||
|
||||
func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
evt := types.Event{}
|
||||
evt.Line.Src = "test"
|
||||
out <- evt
|
||||
|
|
|
@ -202,7 +202,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe
|
|||
|
||||
w.AppsecRunners = make([]AppsecRunner, w.config.Routines)
|
||||
|
||||
for nbRoutine := 0; nbRoutine < w.config.Routines; nbRoutine++ {
|
||||
for nbRoutine := range w.config.Routines {
|
||||
appsecRunnerUUID := uuid.New().String()
|
||||
//we copy AppsecRutime for each runner
|
||||
wrt := *w.AppsecRuntime
|
||||
|
|
|
@ -413,7 +413,7 @@ force_inotify: true`, testPattern),
|
|||
fd, err := os.Create("test_files/stream.log")
|
||||
require.NoError(t, err, "could not create test file")
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
_, err = fmt.Fprintf(fd, "%d\n", i)
|
||||
if err != nil {
|
||||
t.Fatalf("could not write test file : %s", err)
|
||||
|
|
|
@ -208,7 +208,7 @@ func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubsc
|
|||
|
||||
func (k *KinesisSource) WaitForConsumerDeregistration(consumerName string, streamARN string) error {
|
||||
maxTries := k.Config.MaxRetries
|
||||
for i := 0; i < maxTries; i++ {
|
||||
for i := range maxTries {
|
||||
_, err := k.kClient.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{
|
||||
ConsumerName: aws.String(consumerName),
|
||||
StreamARN: aws.String(streamARN),
|
||||
|
@ -249,7 +249,7 @@ func (k *KinesisSource) DeregisterConsumer() error {
|
|||
|
||||
func (k *KinesisSource) WaitForConsumerRegistration(consumerARN string) error {
|
||||
maxTries := k.Config.MaxRetries
|
||||
for i := 0; i < maxTries; i++ {
|
||||
for i := range maxTries {
|
||||
describeOutput, err := k.kClient.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{
|
||||
ConsumerARN: aws.String(consumerARN),
|
||||
})
|
||||
|
|
|
@ -71,7 +71,7 @@ func WriteToStream(streamName string, count int, shards int, sub bool) {
|
|||
}
|
||||
sess := session.Must(session.NewSession())
|
||||
kinesisClient := kinesis.New(sess, aws.NewConfig().WithEndpoint(endpoint).WithRegion("us-east-1"))
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
partition := "partition"
|
||||
if shards != 1 {
|
||||
partition = fmt.Sprintf("partition-%d", i%shards)
|
||||
|
@ -186,7 +186,7 @@ stream_name: stream-1-shard`,
|
|||
//Allow the datasource to start listening to the stream
|
||||
time.Sleep(4 * time.Second)
|
||||
WriteToStream(f.Config.StreamName, test.count, test.shards, false)
|
||||
for i := 0; i < test.count; i++ {
|
||||
for i := range test.count {
|
||||
e := <-out
|
||||
assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw)
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ stream_name: stream-2-shards`,
|
|||
time.Sleep(4 * time.Second)
|
||||
WriteToStream(f.Config.StreamName, test.count, test.shards, false)
|
||||
c := 0
|
||||
for i := 0; i < test.count; i++ {
|
||||
for range test.count {
|
||||
<-out
|
||||
c += 1
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ from_subscription: true`,
|
|||
//Allow the datasource to start listening to the stream
|
||||
time.Sleep(4 * time.Second)
|
||||
WriteToStream(f.Config.StreamName, test.count, test.shards, true)
|
||||
for i := 0; i < test.count; i++ {
|
||||
for i := range test.count {
|
||||
e := <-out
|
||||
assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw)
|
||||
}
|
||||
|
|
|
@ -276,7 +276,7 @@ func feedLoki(logger *log.Entry, n int, title string) error {
|
|||
},
|
||||
},
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
streams.Streams[0].Values[i] = LogValue{
|
||||
Time: time.Now(),
|
||||
Line: fmt.Sprintf("Log line #%d %v", i, title),
|
||||
|
|
|
@ -34,7 +34,7 @@ func isValidHostname(s string) bool {
|
|||
last := byte('.')
|
||||
nonNumeric := false // true once we've seen a letter or hyphen
|
||||
partlen := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
c := s[i]
|
||||
switch {
|
||||
default:
|
||||
|
|
|
@ -41,7 +41,7 @@ func (r retryRoundTripper) RoundTrip(req *http.Request) (*http.Response, error)
|
|||
maxAttempts = 1
|
||||
}
|
||||
|
||||
for i := 0; i < maxAttempts; i++ {
|
||||
for i := range maxAttempts {
|
||||
if i > 0 {
|
||||
if r.withBackOff {
|
||||
//nolint:gosec
|
||||
|
|
|
@ -1076,7 +1076,7 @@ func TestAPICPush(t *testing.T) {
|
|||
expectedCalls: 2,
|
||||
alerts: func() []*models.Alert {
|
||||
alerts := make([]*models.Alert, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
alerts[i] = &models.Alert{
|
||||
Scenario: ptr.Of("crowdsec/test"),
|
||||
ScenarioHash: ptr.Of("certified"),
|
||||
|
|
|
@ -109,7 +109,7 @@ func FormatAlerts(result []*ent.Alert) models.AddAlertsRequest {
|
|||
func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uint) {
|
||||
if c.PluginChannel != nil {
|
||||
RETRY:
|
||||
for try := 0; try < 3; try++ {
|
||||
for try := range 3 {
|
||||
select {
|
||||
case c.PluginChannel <- csplugin.ProfileAlert{ProfileID: profileID, Alert: alert}:
|
||||
log.Debugf("alert sent to Plugin channel")
|
||||
|
|
|
@ -34,7 +34,7 @@ func resetWatcherAlertCounter(pw *PluginWatcher) {
|
|||
}
|
||||
|
||||
func insertNAlertsToPlugin(pw *PluginWatcher, n int, pluginName string) {
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
pw.Inserts <- pluginName
|
||||
}
|
||||
}
|
||||
|
|
|
@ -346,7 +346,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part
|
|||
}
|
||||
|
||||
func (erp ExprRuntimeDebug) ipSeek(ip int) []string {
|
||||
for i := 0; i < len(erp.Lines); i++ {
|
||||
for i := range len(erp.Lines) {
|
||||
parts := strings.Split(erp.Lines[i], "\t")
|
||||
if parts[0] == strconv.Itoa(ip) {
|
||||
return parts
|
||||
|
|
|
@ -216,7 +216,7 @@ func flatten(args []interface{}, v reflect.Value) []interface{} {
|
|||
}
|
||||
|
||||
if v.Kind() == reflect.Array || v.Kind() == reflect.Slice {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
for i := range v.Len() {
|
||||
args = flatten(args, v.Index(i))
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -298,7 +298,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc
|
|||
BucketPourCache["OK"] = append(BucketPourCache["OK"], evt.(types.Event))
|
||||
}
|
||||
//find the relevant holders (scenarios)
|
||||
for idx := 0; idx < len(holders); idx++ {
|
||||
for idx := range len(holders) {
|
||||
//for idx, holder := range holders {
|
||||
|
||||
//evaluate bucket's condition
|
||||
|
|
|
@ -129,7 +129,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing
|
|||
count = b.N
|
||||
b.ResetTimer()
|
||||
}
|
||||
for n := 0; n < count; n++ {
|
||||
for range count {
|
||||
if testFile(tests, *pctx, pnodes) != true {
|
||||
return fmt.Errorf("test failed !")
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo
|
|||
valid = true
|
||||
}
|
||||
|
||||
for mapIdx := 0; mapIdx < len(expectMaps); mapIdx++ {
|
||||
for mapIdx := range len(expectMaps) {
|
||||
for expKey, expVal := range expectMaps[mapIdx] {
|
||||
if outVal, ok := outMaps[mapIdx][expKey]; ok {
|
||||
if outVal == expVal { //ok entry
|
||||
|
|
Loading…
Reference in a new issue