enabled linters and fixes for: misspell, predeclared, unconvert, ineffassign, gosimple, govet (#1595)
This commit is contained in:
parent
b572f64dc6
commit
10585bfecc
25 changed files with 41 additions and 77 deletions
|
@ -62,12 +62,15 @@ linters:
|
|||
# - grouper # An analyzer to analyze expression groups.
|
||||
# - importas # Enforces consistent import aliases
|
||||
# - makezero # Finds slice declarations with non-zero initial length
|
||||
# - misspell # Finds commonly misspelled English words in comments
|
||||
# - nolintlint # Reports ill-formed or insufficient nolint directives
|
||||
# - predeclared # find code that shadows one of Go's predeclared identifiers
|
||||
# - rowserrcheck # checks whether Err of rows is checked successfully
|
||||
# - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed.
|
||||
# - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17
|
||||
# - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes
|
||||
# - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code
|
||||
# - unconvert # Remove unnecessary type conversions
|
||||
# - varcheck # Finds unused global variables and constants
|
||||
|
||||
#
|
||||
|
@ -94,15 +97,12 @@ linters:
|
|||
- gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
|
||||
- goimports # In addition to fixing imports, goimports also formats your code in the same style as gofmt.
|
||||
- gosec # (gas): Inspects source code for security problems
|
||||
- misspell # Finds commonly misspelled English words in comments
|
||||
- nakedret # Finds naked returns in functions greater than a specified function length
|
||||
- nilerr # Finds the code that returns nil even if it checks that the error is not nil.
|
||||
- nonamedreturns # Reports all named returns
|
||||
- nosprintfhostport # Checks for misuse of Sprintf to construct a host with port in a URL.
|
||||
- predeclared # find code that shadows one of Go's predeclared identifiers
|
||||
- nosprintfhostport # Checks for misuse of Sprintf to construct a host with port in a URL.
|
||||
- promlinter # Check Prometheus metrics naming via promlint
|
||||
- revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint.
|
||||
- unconvert # Remove unnecessary type conversions
|
||||
- wastedassign # wastedassign finds wasted assignment statements.
|
||||
- gocritic # Provides diagnostics that check for bugs, performance and style issues.
|
||||
- exhaustive # check exhaustiveness of enum switch statements
|
||||
|
@ -170,6 +170,8 @@ linters:
|
|||
|
||||
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 10
|
||||
exclude-rules:
|
||||
- path: go.mod
|
||||
text: "replacement are not allowed: golang.org/x/time/rate"
|
||||
|
@ -179,20 +181,6 @@ issues:
|
|||
- govet
|
||||
text: "shadow: declaration of \"err\" shadows declaration"
|
||||
|
||||
#
|
||||
# govet
|
||||
#
|
||||
|
||||
- linters:
|
||||
- govet
|
||||
text: "shadow: declaration of .* shadows declaration"
|
||||
- linters:
|
||||
- govet
|
||||
text: "copylocks: assignment copies lock value to newStream:"
|
||||
- linters:
|
||||
- govet
|
||||
text: "composites: .* composite literal uses unkeyed fields"
|
||||
|
||||
#
|
||||
# errcheck
|
||||
#
|
||||
|
@ -217,25 +205,3 @@ issues:
|
|||
- linters:
|
||||
- staticcheck
|
||||
text: "SA1006: printf-style function with dynamic format string and no further arguments should use print-style function instead"
|
||||
|
||||
#
|
||||
# gosimple
|
||||
#
|
||||
|
||||
- linters:
|
||||
- gosimple
|
||||
text: "S1023: redundant .* statement"
|
||||
- linters:
|
||||
- gosimple
|
||||
text: "S1000: should use a simple channel send/receive instead of `select` with a single case"
|
||||
- linters:
|
||||
- gosimple
|
||||
text: "S1028: should use .* instead of .*"
|
||||
|
||||
#
|
||||
# ineffassign
|
||||
#
|
||||
|
||||
- linters:
|
||||
- ineffassign
|
||||
text: "ineffectual assignment to .*"
|
||||
|
|
|
@ -41,12 +41,13 @@ cscli explain --dsn "file://myfile.log" --type nginx
|
|||
fmt.Printf("Please provide --type flag\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var f *os.File
|
||||
|
||||
// we create a temporary log file if a log line has been provided
|
||||
if logLine != "" {
|
||||
logFile = "./cscli_test_tmp.log"
|
||||
f, err := os.Create(logFile)
|
||||
f, err := os.Create(logFile) // nolint: govet
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -297,7 +297,7 @@ func ShowMetrics(hubItem *cwhub.Item) {
|
|||
ShowScenarioMetric(item, metrics)
|
||||
}
|
||||
for _, item := range hubItem.Collections {
|
||||
hubItem := cwhub.GetItem(cwhub.COLLECTIONS, item)
|
||||
hubItem = cwhub.GetItem(cwhub.COLLECTIONS, item)
|
||||
if hubItem == nil {
|
||||
log.Fatalf("unable to retrieve item '%s' from collection '%s'", item, hubItem.Name)
|
||||
}
|
||||
|
@ -550,7 +550,7 @@ func RestoreHub(dirPath string) error {
|
|||
return fmt.Errorf("error while opening %s : %s", upstreamListFN, err)
|
||||
}
|
||||
var upstreamList []string
|
||||
err = json.Unmarshal([]byte(file), &upstreamList)
|
||||
err = json.Unmarshal(file, &upstreamList)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error unmarshaling %s : %s", upstreamListFN, err)
|
||||
}
|
||||
|
|
|
@ -125,8 +125,6 @@ LOOP:
|
|||
if len(cache) > 0 {
|
||||
cacheMutex.Lock()
|
||||
cachecopy := cache
|
||||
newcache := make([]types.RuntimeAlert, 0)
|
||||
cache = newcache
|
||||
cacheMutex.Unlock()
|
||||
if err := PushAlerts(cachecopy, Client); err != nil {
|
||||
log.Errorf("while pushing leftovers to api : %s", err)
|
||||
|
|
|
@ -337,7 +337,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha
|
|||
|
||||
for {
|
||||
select {
|
||||
case newStream := <-in:
|
||||
case newStream := <-in: //nolint:govet // copylocks won't matter if the tomb is not initialized
|
||||
shouldCreate := true
|
||||
cw.logger.Tracef("received new streams to monitor : %s/%s", newStream.GroupName, newStream.StreamName)
|
||||
|
||||
|
|
|
@ -484,7 +484,7 @@ func TestAlertsDeleteAsMachine(t *testing.T) {
|
|||
alerts, resp, err := client.Alerts.Delete(context.Background(), alert)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := &models.DeleteAlertsResponse{""}
|
||||
expected := &models.DeleteAlertsResponse{NbDeleted: ""}
|
||||
if resp.Response.StatusCode != http.StatusOK {
|
||||
t.Errorf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusOK)
|
||||
}
|
||||
|
|
|
@ -256,7 +256,7 @@ func (a *apic) CAPIPullIsOld() (bool, error) {
|
|||
/*only pull community blocklist if it's older than 1h30 */
|
||||
alerts := a.dbClient.Ent.Alert.Query()
|
||||
alerts = alerts.Where(alert.HasDecisionsWith(decision.OriginEQ(database.CapiMachineID)))
|
||||
alerts = alerts.Where(alert.CreatedAtGTE(time.Now().UTC().Add(-time.Duration(1*time.Hour + 30*time.Minute))))
|
||||
alerts = alerts.Where(alert.CreatedAtGTE(time.Now().UTC().Add(-time.Duration(1*time.Hour + 30*time.Minute)))) //nolint:unconvert
|
||||
count, err := alerts.Count(a.dbClient.CTX)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "while looking for CAPI alert")
|
||||
|
|
|
@ -102,7 +102,7 @@ func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uin
|
|||
RETRY:
|
||||
for try := 0; try < 3; try++ {
|
||||
select {
|
||||
case c.PluginChannel <- csplugin.ProfileAlert{ProfileID: uint(profileID), Alert: alert}:
|
||||
case c.PluginChannel <- csplugin.ProfileAlert{ProfileID: profileID, Alert: alert}:
|
||||
log.Debugf("alert sent to Plugin channel")
|
||||
break RETRY
|
||||
default:
|
||||
|
|
|
@ -132,7 +132,7 @@ func (c *Controller) StreamDecision(gctx *gin.Context) {
|
|||
// if the blocker just start, return all decisions
|
||||
if val, ok := gctx.Request.URL.Query()["startup"]; ok {
|
||||
if val[0] == "true" {
|
||||
data, err := c.DBClient.QueryAllDecisionsWithFilters(filters)
|
||||
data, err = c.DBClient.QueryAllDecisionsWithFilters(filters)
|
||||
if err != nil {
|
||||
log.Errorf("failed querying decisions: %v", err)
|
||||
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
|
||||
|
|
|
@ -53,7 +53,6 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) {
|
|||
var scenariosInput []string
|
||||
var clientMachine *ent.Machine
|
||||
var machineID string
|
||||
var password strfmt.Password
|
||||
|
||||
if c.Request.TLS != nil && len(c.Request.TLS.PeerCertificates) > 0 {
|
||||
if j.TlsAuth == nil {
|
||||
|
@ -124,7 +123,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) {
|
|||
return "", errors.New("input format error")
|
||||
}
|
||||
machineID = *loginInput.MachineID
|
||||
password = *loginInput.Password
|
||||
password := *loginInput.Password
|
||||
scenariosInput = loginInput.Scenarios
|
||||
|
||||
clientMachine, err = j.DbClient.Ent.Machine.Query().
|
||||
|
|
|
@ -68,7 +68,7 @@ func NewConfig(configFile string, disableAgent bool, disableAPI bool) (*Config,
|
|||
|
||||
func NewDefaultConfig() *Config {
|
||||
logLevel := log.InfoLevel
|
||||
CommonCfg := CommonCfg{
|
||||
commonCfg := CommonCfg{
|
||||
Daemonize: false,
|
||||
PidDir: "/tmp/",
|
||||
LogMedia: "stdout",
|
||||
|
@ -116,7 +116,7 @@ func NewDefaultConfig() *Config {
|
|||
}
|
||||
|
||||
globalCfg := Config{
|
||||
Common: &CommonCfg,
|
||||
Common: &commonCfg,
|
||||
Prometheus: &prometheus,
|
||||
Crowdsec: &crowdsecCfg,
|
||||
Cscli: &cscliCfg,
|
||||
|
|
|
@ -495,7 +495,7 @@ func TestBrokerRunTimeThreshold(t *testing.T) {
|
|||
})
|
||||
//set groupwait
|
||||
raw, cfg := readconfig(t, "tests/notifications/dummy.yaml")
|
||||
cfg.GroupWait = time.Duration(1 * time.Second)
|
||||
cfg.GroupWait = time.Duration(1 * time.Second) //nolint:unconvert
|
||||
writeconfig(t, cfg, "tests/notifications/dummy.yaml")
|
||||
err := pb.Init(&pluginCfg, profiles, &csconfig.ConfigurationPaths{
|
||||
PluginDir: testPath,
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
/*
|
||||
PluginWatcher is here to allow grouping and threshold features for notification plugins :
|
||||
by frequency : it will signal the plugin to deliver notifications at this frequence (watchPluginTicker)
|
||||
by frequency : it will signal the plugin to deliver notifications at this frequency (watchPluginTicker)
|
||||
by threshold : it will signal the plugin to deliver notifications when the number of alerts for this plugin reaches this threshold (watchPluginAlertCounts)
|
||||
*/
|
||||
|
||||
|
|
|
@ -267,7 +267,7 @@ func LoadScenarioDump(filepath string) (*BucketResults, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
sort.Sort(BucketResults(bucketDump))
|
||||
sort.Sort(bucketDump)
|
||||
|
||||
return &bucketDump, nil
|
||||
}
|
||||
|
|
|
@ -161,7 +161,7 @@ func DownloadItem(hub *csconfig.Hub, target Item, overwrite bool) (Item, error)
|
|||
return target, errors.Wrap(err, fmt.Sprintf("while reading %s", req.URL.String()))
|
||||
}
|
||||
h := sha256.New()
|
||||
if _, err := h.Write([]byte(body)); err != nil {
|
||||
if _, err := h.Write(body); err != nil {
|
||||
return target, errors.Wrap(err, fmt.Sprintf("while hashing %s", target.Name))
|
||||
}
|
||||
meow := fmt.Sprintf("%x", h.Sum(nil))
|
||||
|
|
|
@ -799,7 +799,7 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert,
|
|||
func (c *Client) DeleteAlertGraphBatch(alertItems []*ent.Alert) (int, error) {
|
||||
idList := make([]int, 0)
|
||||
for _, alert := range alertItems {
|
||||
idList = append(idList, int(alert.ID))
|
||||
idList = append(idList, alert.ID)
|
||||
}
|
||||
|
||||
deleted, err := c.Ent.Alert.Delete().
|
||||
|
|
|
@ -47,8 +47,7 @@ func JsonExtractUnescape(jsblob string, target ...string) string {
|
|||
return ""
|
||||
}
|
||||
log.Tracef("extract path %+v", target)
|
||||
strvalue := string(value)
|
||||
return strvalue
|
||||
return value
|
||||
}
|
||||
|
||||
func JsonExtract(jsblob string, target string) string {
|
||||
|
|
|
@ -226,7 +226,7 @@ func LeakRoutine(leaky *Leaky) error {
|
|||
case msg := <-leaky.In:
|
||||
/*the msg var use is confusing and is redeclared in a different type :/*/
|
||||
for _, processor := range leaky.BucketConfig.processors {
|
||||
msg := processor.OnBucketPour(leaky.BucketConfig)(*msg, leaky)
|
||||
msg = processor.OnBucketPour(leaky.BucketConfig)(*msg, leaky)
|
||||
// if &msg == nil we stop processing
|
||||
if msg == nil {
|
||||
goto End
|
||||
|
@ -342,7 +342,7 @@ func (leaky *Leaky) overflow(ofw *Queue) {
|
|||
}
|
||||
}
|
||||
if leaky.logger.Level >= log.TraceLevel {
|
||||
leaky.logger.Tracef("Overflow event: %s", spew.Sdump(types.RuntimeAlert(alert)))
|
||||
leaky.logger.Tracef("Overflow event: %s", spew.Sdump(alert))
|
||||
}
|
||||
mt, _ := leaky.Ovflw_ts.MarshalText()
|
||||
leaky.logger.Tracef("overflow time : %s", mt)
|
||||
|
|
|
@ -289,7 +289,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc
|
|||
if BucketPourCache == nil {
|
||||
BucketPourCache = make(map[string][]types.Event)
|
||||
}
|
||||
if _, ok := BucketPourCache["OK"]; !ok {
|
||||
if _, ok = BucketPourCache["OK"]; !ok {
|
||||
BucketPourCache["OK"] = make([]types.Event, 0)
|
||||
}
|
||||
evt := deepcopy.Copy(parsed)
|
||||
|
|
|
@ -222,7 +222,7 @@ func (m *Metabase) Login(username string, password string) error {
|
|||
if !ok {
|
||||
return fmt.Errorf("login: bad response type: %+v", successmsg)
|
||||
}
|
||||
if _, ok := resp["id"]; !ok {
|
||||
if _, ok = resp["id"]; !ok {
|
||||
return fmt.Errorf("login: can't update session id, no id in response: %v", successmsg)
|
||||
}
|
||||
id, ok := resp["id"].(string)
|
||||
|
@ -245,10 +245,10 @@ func (m *Metabase) Scan() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *Metabase) ResetPassword(current string, new string) error {
|
||||
func (m *Metabase) ResetPassword(current string, newPassword string) error {
|
||||
body := map[string]string{
|
||||
"id": "1",
|
||||
"password": new,
|
||||
"password": newPassword,
|
||||
"old_password": current,
|
||||
}
|
||||
_, errormsg, err := m.Client.Do("PUT", routes[resetPasswordEndpoint], body)
|
||||
|
|
|
@ -58,7 +58,7 @@ func ParseDate(in string, p *types.Event, x interface{}) (map[string]string, err
|
|||
var ret map[string]string = make(map[string]string)
|
||||
tstr, tbin := GenDateParse(in)
|
||||
if !tbin.IsZero() {
|
||||
ret["MarshaledTime"] = string(tstr)
|
||||
ret["MarshaledTime"] = tstr
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ type Node struct {
|
|||
EnrichFunctions EnricherCtx
|
||||
|
||||
/* If the node is actually a leaf, it can have : grok, enrich, statics */
|
||||
//pattern_syntax are named grok patterns that are re-utilised over several grok patterns
|
||||
//pattern_syntax are named grok patterns that are re-utilized over several grok patterns
|
||||
SubGroks yaml.MapSlice `yaml:"pattern_syntax,omitempty"`
|
||||
|
||||
//Holds a grok pattern
|
||||
|
@ -314,7 +314,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
|
|||
}
|
||||
}
|
||||
/*todo : check if a node made the state change ?*/
|
||||
/* should the childs inherit the on_success behaviour */
|
||||
/* should the childs inherit the on_success behavior */
|
||||
|
||||
clog.Tracef("State after nodes : %v", NodeState)
|
||||
|
||||
|
|
|
@ -71,6 +71,7 @@ func SetTargetByName(target string, value string, evt *types.Event) bool {
|
|||
tmp = reflect.Indirect(tmp)
|
||||
}
|
||||
iter = tmp
|
||||
//nolint: gosimple
|
||||
break
|
||||
case reflect.Ptr:
|
||||
tmp := iter.Elem()
|
||||
|
|
|
@ -65,7 +65,7 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx, ectx EnricherCtx) (
|
|||
nodesCount := 0
|
||||
for {
|
||||
node := Node{}
|
||||
node.OnSuccess = "continue" //default behaviour is to continue
|
||||
node.OnSuccess = "continue" //default behavior is to continue
|
||||
err = dec.Decode(&node)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
|
|
|
@ -31,23 +31,23 @@ func LastAddress(n net.IPNet) net.IP {
|
|||
}
|
||||
|
||||
/*returns a range for any ip or range*/
|
||||
func Addr2Ints(any string) (int, int64, int64, int64, int64, error) {
|
||||
if strings.Contains(any, "/") {
|
||||
_, net, err := net.ParseCIDR(any)
|
||||
func Addr2Ints(anyIP string) (int, int64, int64, int64, int64, error) {
|
||||
if strings.Contains(anyIP, "/") {
|
||||
_, net, err := net.ParseCIDR(anyIP)
|
||||
if err != nil {
|
||||
return -1, 0, 0, 0, 0, errors.Wrapf(err, "while parsing range %s", any)
|
||||
return -1, 0, 0, 0, 0, errors.Wrapf(err, "while parsing range %s", anyIP)
|
||||
}
|
||||
return Range2Ints(*net)
|
||||
}
|
||||
|
||||
ip := net.ParseIP(any)
|
||||
ip := net.ParseIP(anyIP)
|
||||
if ip == nil {
|
||||
return -1, 0, 0, 0, 0, fmt.Errorf("invalid address")
|
||||
}
|
||||
|
||||
sz, start, end, err := IP2Ints(ip)
|
||||
if err != nil {
|
||||
return -1, 0, 0, 0, 0, errors.Wrapf(err, "while parsing ip %s", any)
|
||||
return -1, 0, 0, 0, 0, errors.Wrapf(err, "while parsing ip %s", anyIP)
|
||||
}
|
||||
|
||||
return sz, start, end, start, end, nil
|
||||
|
|
Loading…
Reference in a new issue