lint (intrange)
This commit is contained in:
parent
2abc078e53
commit
1f5298f0f1
19 changed files with 29 additions and 29 deletions
|
@ -31,7 +31,7 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin
|
||||||
spamLimit := make(map[string]bool)
|
spamLimit := make(map[string]bool)
|
||||||
skipped := 0
|
skipped := 0
|
||||||
|
|
||||||
for aIdx := 0; aIdx < len(*alerts); aIdx++ {
|
for aIdx := range len(*alerts) {
|
||||||
alertItem := (*alerts)[aIdx]
|
alertItem := (*alerts)[aIdx]
|
||||||
newDecisions := make([]*models.Decision, 0)
|
newDecisions := make([]*models.Decision, 0)
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ func generatePassword(length int) string {
|
||||||
|
|
||||||
buf := make([]byte, length)
|
buf := make([]byte, length)
|
||||||
|
|
||||||
for i := 0; i < length; i++ {
|
for i := range length {
|
||||||
rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength)))
|
rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed getting data from prng for password generation : %s", err)
|
log.Fatalf("failed getting data from prng for password generation : %s", err)
|
||||||
|
|
|
@ -65,7 +65,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H
|
||||||
parsersTomb.Go(func() error {
|
parsersTomb.Go(func() error {
|
||||||
parserWg.Add(1)
|
parserWg.Add(1)
|
||||||
|
|
||||||
for i := 0; i < cConfig.Crowdsec.ParserRoutinesCount; i++ {
|
for range cConfig.Crowdsec.ParserRoutinesCount {
|
||||||
parsersTomb.Go(func() error {
|
parsersTomb.Go(func() error {
|
||||||
defer trace.CatchPanic("crowdsec/runParse")
|
defer trace.CatchPanic("crowdsec/runParse")
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < cConfig.Crowdsec.BucketsRoutinesCount; i++ {
|
for range cConfig.Crowdsec.BucketsRoutinesCount {
|
||||||
bucketsTomb.Go(func() error {
|
bucketsTomb.Go(func() error {
|
||||||
defer trace.CatchPanic("crowdsec/runPour")
|
defer trace.CatchPanic("crowdsec/runPour")
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H
|
||||||
outputsTomb.Go(func() error {
|
outputsTomb.Go(func() error {
|
||||||
outputWg.Add(1)
|
outputWg.Add(1)
|
||||||
|
|
||||||
for i := 0; i < cConfig.Crowdsec.OutputRoutinesCount; i++ {
|
for range cConfig.Crowdsec.OutputRoutinesCount {
|
||||||
outputsTomb.Go(func() error {
|
outputsTomb.Go(func() error {
|
||||||
defer trace.CatchPanic("crowdsec/runOutput")
|
defer trace.CatchPanic("crowdsec/runOutput")
|
||||||
|
|
||||||
|
|
|
@ -269,7 +269,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig
|
||||||
|
|
||||||
func GetMetrics(sources []DataSource, aggregated bool) error {
|
func GetMetrics(sources []DataSource, aggregated bool) error {
|
||||||
var metrics []prometheus.Collector
|
var metrics []prometheus.Collector
|
||||||
for i := 0; i < len(sources); i++ {
|
for i := range len(sources) {
|
||||||
if aggregated {
|
if aggregated {
|
||||||
metrics = sources[i].GetMetrics()
|
metrics = sources[i].GetMetrics()
|
||||||
} else {
|
} else {
|
||||||
|
@ -343,7 +343,7 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(sources); i++ {
|
for i := range len(sources) {
|
||||||
subsrc := sources[i] //ensure its a copy
|
subsrc := sources[i] //ensure its a copy
|
||||||
log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc)
|
log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc)
|
||||||
|
|
||||||
|
|
|
@ -322,7 +322,7 @@ func (f *MockCat) UnmarshalConfig(cfg []byte) error { return nil }
|
||||||
func (f *MockCat) GetName() string { return "mock_cat" }
|
func (f *MockCat) GetName() string { return "mock_cat" }
|
||||||
func (f *MockCat) GetMode() string { return "cat" }
|
func (f *MockCat) GetMode() string { return "cat" }
|
||||||
func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error {
|
func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error {
|
||||||
for i := 0; i < 10; i++ {
|
for range 10 {
|
||||||
evt := types.Event{}
|
evt := types.Event{}
|
||||||
evt.Line.Src = "test"
|
evt.Line.Src = "test"
|
||||||
out <- evt
|
out <- evt
|
||||||
|
@ -369,7 +369,7 @@ func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) err
|
||||||
return fmt.Errorf("can't run in cat mode")
|
return fmt.Errorf("can't run in cat mode")
|
||||||
}
|
}
|
||||||
func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||||
for i := 0; i < 10; i++ {
|
for range 10 {
|
||||||
evt := types.Event{}
|
evt := types.Event{}
|
||||||
evt.Line.Src = "test"
|
evt.Line.Src = "test"
|
||||||
out <- evt
|
out <- evt
|
||||||
|
@ -452,7 +452,7 @@ type MockTailError struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||||
for i := 0; i < 10; i++ {
|
for range 10 {
|
||||||
evt := types.Event{}
|
evt := types.Event{}
|
||||||
evt.Line.Src = "test"
|
evt.Line.Src = "test"
|
||||||
out <- evt
|
out <- evt
|
||||||
|
|
|
@ -202,7 +202,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe
|
||||||
|
|
||||||
w.AppsecRunners = make([]AppsecRunner, w.config.Routines)
|
w.AppsecRunners = make([]AppsecRunner, w.config.Routines)
|
||||||
|
|
||||||
for nbRoutine := 0; nbRoutine < w.config.Routines; nbRoutine++ {
|
for nbRoutine := range w.config.Routines {
|
||||||
appsecRunnerUUID := uuid.New().String()
|
appsecRunnerUUID := uuid.New().String()
|
||||||
//we copy AppsecRutime for each runner
|
//we copy AppsecRutime for each runner
|
||||||
wrt := *w.AppsecRuntime
|
wrt := *w.AppsecRuntime
|
||||||
|
|
|
@ -413,7 +413,7 @@ force_inotify: true`, testPattern),
|
||||||
fd, err := os.Create("test_files/stream.log")
|
fd, err := os.Create("test_files/stream.log")
|
||||||
require.NoError(t, err, "could not create test file")
|
require.NoError(t, err, "could not create test file")
|
||||||
|
|
||||||
for i := 0; i < 5; i++ {
|
for i := range 5 {
|
||||||
_, err = fmt.Fprintf(fd, "%d\n", i)
|
_, err = fmt.Fprintf(fd, "%d\n", i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not write test file : %s", err)
|
t.Fatalf("could not write test file : %s", err)
|
||||||
|
|
|
@ -208,7 +208,7 @@ func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubsc
|
||||||
|
|
||||||
func (k *KinesisSource) WaitForConsumerDeregistration(consumerName string, streamARN string) error {
|
func (k *KinesisSource) WaitForConsumerDeregistration(consumerName string, streamARN string) error {
|
||||||
maxTries := k.Config.MaxRetries
|
maxTries := k.Config.MaxRetries
|
||||||
for i := 0; i < maxTries; i++ {
|
for i := range maxTries {
|
||||||
_, err := k.kClient.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{
|
_, err := k.kClient.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{
|
||||||
ConsumerName: aws.String(consumerName),
|
ConsumerName: aws.String(consumerName),
|
||||||
StreamARN: aws.String(streamARN),
|
StreamARN: aws.String(streamARN),
|
||||||
|
@ -249,7 +249,7 @@ func (k *KinesisSource) DeregisterConsumer() error {
|
||||||
|
|
||||||
func (k *KinesisSource) WaitForConsumerRegistration(consumerARN string) error {
|
func (k *KinesisSource) WaitForConsumerRegistration(consumerARN string) error {
|
||||||
maxTries := k.Config.MaxRetries
|
maxTries := k.Config.MaxRetries
|
||||||
for i := 0; i < maxTries; i++ {
|
for i := range maxTries {
|
||||||
describeOutput, err := k.kClient.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{
|
describeOutput, err := k.kClient.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{
|
||||||
ConsumerARN: aws.String(consumerARN),
|
ConsumerARN: aws.String(consumerARN),
|
||||||
})
|
})
|
||||||
|
|
|
@ -71,7 +71,7 @@ func WriteToStream(streamName string, count int, shards int, sub bool) {
|
||||||
}
|
}
|
||||||
sess := session.Must(session.NewSession())
|
sess := session.Must(session.NewSession())
|
||||||
kinesisClient := kinesis.New(sess, aws.NewConfig().WithEndpoint(endpoint).WithRegion("us-east-1"))
|
kinesisClient := kinesis.New(sess, aws.NewConfig().WithEndpoint(endpoint).WithRegion("us-east-1"))
|
||||||
for i := 0; i < count; i++ {
|
for i := range count {
|
||||||
partition := "partition"
|
partition := "partition"
|
||||||
if shards != 1 {
|
if shards != 1 {
|
||||||
partition = fmt.Sprintf("partition-%d", i%shards)
|
partition = fmt.Sprintf("partition-%d", i%shards)
|
||||||
|
@ -186,7 +186,7 @@ stream_name: stream-1-shard`,
|
||||||
//Allow the datasource to start listening to the stream
|
//Allow the datasource to start listening to the stream
|
||||||
time.Sleep(4 * time.Second)
|
time.Sleep(4 * time.Second)
|
||||||
WriteToStream(f.Config.StreamName, test.count, test.shards, false)
|
WriteToStream(f.Config.StreamName, test.count, test.shards, false)
|
||||||
for i := 0; i < test.count; i++ {
|
for i := range test.count {
|
||||||
e := <-out
|
e := <-out
|
||||||
assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw)
|
assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw)
|
||||||
}
|
}
|
||||||
|
@ -233,7 +233,7 @@ stream_name: stream-2-shards`,
|
||||||
time.Sleep(4 * time.Second)
|
time.Sleep(4 * time.Second)
|
||||||
WriteToStream(f.Config.StreamName, test.count, test.shards, false)
|
WriteToStream(f.Config.StreamName, test.count, test.shards, false)
|
||||||
c := 0
|
c := 0
|
||||||
for i := 0; i < test.count; i++ {
|
for range test.count {
|
||||||
<-out
|
<-out
|
||||||
c += 1
|
c += 1
|
||||||
}
|
}
|
||||||
|
@ -281,7 +281,7 @@ from_subscription: true`,
|
||||||
//Allow the datasource to start listening to the stream
|
//Allow the datasource to start listening to the stream
|
||||||
time.Sleep(4 * time.Second)
|
time.Sleep(4 * time.Second)
|
||||||
WriteToStream(f.Config.StreamName, test.count, test.shards, true)
|
WriteToStream(f.Config.StreamName, test.count, test.shards, true)
|
||||||
for i := 0; i < test.count; i++ {
|
for i := range test.count {
|
||||||
e := <-out
|
e := <-out
|
||||||
assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw)
|
assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw)
|
||||||
}
|
}
|
||||||
|
|
|
@ -276,7 +276,7 @@ func feedLoki(logger *log.Entry, n int, title string) error {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i := 0; i < n; i++ {
|
for i := range n {
|
||||||
streams.Streams[0].Values[i] = LogValue{
|
streams.Streams[0].Values[i] = LogValue{
|
||||||
Time: time.Now(),
|
Time: time.Now(),
|
||||||
Line: fmt.Sprintf("Log line #%d %v", i, title),
|
Line: fmt.Sprintf("Log line #%d %v", i, title),
|
||||||
|
|
|
@ -34,7 +34,7 @@ func isValidHostname(s string) bool {
|
||||||
last := byte('.')
|
last := byte('.')
|
||||||
nonNumeric := false // true once we've seen a letter or hyphen
|
nonNumeric := false // true once we've seen a letter or hyphen
|
||||||
partlen := 0
|
partlen := 0
|
||||||
for i := 0; i < len(s); i++ {
|
for i := range len(s) {
|
||||||
c := s[i]
|
c := s[i]
|
||||||
switch {
|
switch {
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -41,7 +41,7 @@ func (r retryRoundTripper) RoundTrip(req *http.Request) (*http.Response, error)
|
||||||
maxAttempts = 1
|
maxAttempts = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < maxAttempts; i++ {
|
for i := range maxAttempts {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
if r.withBackOff {
|
if r.withBackOff {
|
||||||
//nolint:gosec
|
//nolint:gosec
|
||||||
|
|
|
@ -1076,7 +1076,7 @@ func TestAPICPush(t *testing.T) {
|
||||||
expectedCalls: 2,
|
expectedCalls: 2,
|
||||||
alerts: func() []*models.Alert {
|
alerts: func() []*models.Alert {
|
||||||
alerts := make([]*models.Alert, 100)
|
alerts := make([]*models.Alert, 100)
|
||||||
for i := 0; i < 100; i++ {
|
for i := range 100 {
|
||||||
alerts[i] = &models.Alert{
|
alerts[i] = &models.Alert{
|
||||||
Scenario: ptr.Of("crowdsec/test"),
|
Scenario: ptr.Of("crowdsec/test"),
|
||||||
ScenarioHash: ptr.Of("certified"),
|
ScenarioHash: ptr.Of("certified"),
|
||||||
|
|
|
@ -109,7 +109,7 @@ func FormatAlerts(result []*ent.Alert) models.AddAlertsRequest {
|
||||||
func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uint) {
|
func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uint) {
|
||||||
if c.PluginChannel != nil {
|
if c.PluginChannel != nil {
|
||||||
RETRY:
|
RETRY:
|
||||||
for try := 0; try < 3; try++ {
|
for try := range 3 {
|
||||||
select {
|
select {
|
||||||
case c.PluginChannel <- csplugin.ProfileAlert{ProfileID: profileID, Alert: alert}:
|
case c.PluginChannel <- csplugin.ProfileAlert{ProfileID: profileID, Alert: alert}:
|
||||||
log.Debugf("alert sent to Plugin channel")
|
log.Debugf("alert sent to Plugin channel")
|
||||||
|
|
|
@ -34,7 +34,7 @@ func resetWatcherAlertCounter(pw *PluginWatcher) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func insertNAlertsToPlugin(pw *PluginWatcher, n int, pluginName string) {
|
func insertNAlertsToPlugin(pw *PluginWatcher, n int, pluginName string) {
|
||||||
for i := 0; i < n; i++ {
|
for range n {
|
||||||
pw.Inserts <- pluginName
|
pw.Inserts <- pluginName
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -346,7 +346,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part
|
||||||
}
|
}
|
||||||
|
|
||||||
func (erp ExprRuntimeDebug) ipSeek(ip int) []string {
|
func (erp ExprRuntimeDebug) ipSeek(ip int) []string {
|
||||||
for i := 0; i < len(erp.Lines); i++ {
|
for i := range len(erp.Lines) {
|
||||||
parts := strings.Split(erp.Lines[i], "\t")
|
parts := strings.Split(erp.Lines[i], "\t")
|
||||||
if parts[0] == strconv.Itoa(ip) {
|
if parts[0] == strconv.Itoa(ip) {
|
||||||
return parts
|
return parts
|
||||||
|
|
|
@ -216,7 +216,7 @@ func flatten(args []interface{}, v reflect.Value) []interface{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.Kind() == reflect.Array || v.Kind() == reflect.Slice {
|
if v.Kind() == reflect.Array || v.Kind() == reflect.Slice {
|
||||||
for i := 0; i < v.Len(); i++ {
|
for i := range v.Len() {
|
||||||
args = flatten(args, v.Index(i))
|
args = flatten(args, v.Index(i))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -298,7 +298,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc
|
||||||
BucketPourCache["OK"] = append(BucketPourCache["OK"], evt.(types.Event))
|
BucketPourCache["OK"] = append(BucketPourCache["OK"], evt.(types.Event))
|
||||||
}
|
}
|
||||||
//find the relevant holders (scenarios)
|
//find the relevant holders (scenarios)
|
||||||
for idx := 0; idx < len(holders); idx++ {
|
for idx := range len(holders) {
|
||||||
//for idx, holder := range holders {
|
//for idx, holder := range holders {
|
||||||
|
|
||||||
//evaluate bucket's condition
|
//evaluate bucket's condition
|
||||||
|
|
|
@ -129,7 +129,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing
|
||||||
count = b.N
|
count = b.N
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
}
|
}
|
||||||
for n := 0; n < count; n++ {
|
for range count {
|
||||||
if testFile(tests, *pctx, pnodes) != true {
|
if testFile(tests, *pctx, pnodes) != true {
|
||||||
return fmt.Errorf("test failed !")
|
return fmt.Errorf("test failed !")
|
||||||
}
|
}
|
||||||
|
@ -239,7 +239,7 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo
|
||||||
valid = true
|
valid = true
|
||||||
}
|
}
|
||||||
|
|
||||||
for mapIdx := 0; mapIdx < len(expectMaps); mapIdx++ {
|
for mapIdx := range len(expectMaps) {
|
||||||
for expKey, expVal := range expectMaps[mapIdx] {
|
for expKey, expVal := range expectMaps[mapIdx] {
|
||||||
if outVal, ok := outMaps[mapIdx][expKey]; ok {
|
if outVal, ok := outMaps[mapIdx][expKey]; ok {
|
||||||
if outVal == expVal { //ok entry
|
if outVal == expVal { //ok entry
|
||||||
|
|
Loading…
Reference in a new issue