Explorar el Código

Linter upgrade to v1.30.0 (#526)

* fixed godot linter errors
* reformatted source with gofumpt
* disabled some linters
* fixed nolintlint warnings
* fixed gci warnings
* lint: fixed 'nestif' warnings
* lint: fixed 'exhaustive' warnings
* lint: fixed 'gocritic' warnings
* lint: fixed 'noctx' warnings
* lint: fixed 'wsl' warnings
* lint: fixed 'goerr113' warnings
* lint: fixed 'gosec' warnings
* lint: upgraded linter to 1.30.0
* lint: more 'exhaustive' warnings

Co-authored-by: Nick <nick@kasten.io>
Jarek Kowalski hace 5 años
padre
commit
9a6dea898b
Se han modificado 100 ficheros con 386 adiciones y 349 borrados
  1. 9 1
      .golangci.yml
  2. 4 2
      cli/cli_progress.go
  3. 2 2
      cli/command_benchmark_compression.go
  4. 2 2
      cli/command_benchmark_crypto.go
  5. 3 3
      cli/command_benchmark_splitters.go
  6. 0 1
      cli/command_blob_gc.go
  7. 2 4
      cli/command_cache_clear.go
  8. 1 3
      cli/command_cache_sync.go
  9. 0 1
      cli/command_content_list.go
  10. 0 1
      cli/command_content_verify.go
  11. 1 3
      cli/command_mount_browse.go
  12. 1 4
      cli/command_mount_fuse.go
  13. 1 3
      cli/command_mount_nofuse.go
  14. 1 3
      cli/command_mount_webdav.go
  15. 2 2
      cli/command_policy_edit.go
  16. 1 3
      cli/command_policy_ls.go
  17. 2 2
      cli/command_policy_set.go
  18. 1 2
      cli/command_repository_connect.go
  19. 5 3
      cli/command_repository_connect_from_config.go
  20. 2 2
      cli/command_repository_create.go
  21. 1 3
      cli/command_repository_disconnect.go
  22. 17 15
      cli/command_repository_status.go
  23. 1 3
      cli/command_repository_upgrade.go
  24. 1 3
      cli/command_server_cancel.go
  25. 1 3
      cli/command_server_flush.go
  26. 1 3
      cli/command_server_pause.go
  27. 1 3
      cli/command_server_refresh.go
  28. 1 3
      cli/command_server_resume.go
  29. 2 4
      cli/command_server_start.go
  30. 1 3
      cli/command_server_status.go
  31. 35 25
      cli/command_server_tls.go
  32. 1 3
      cli/command_server_upload.go
  33. 0 1
      cli/command_snapshot_list.go
  34. 2 2
      cli/command_snapshot_migrate.go
  35. 2 1
      cli/command_snapshot_verify.go
  36. 2 2
      cli/config.go
  37. 1 3
      cli/password.go
  38. 1 1
      cli/profile.go
  39. 1 1
      cli/profile_disabled.go
  40. 2 2
      cli/storage_filesystem.go
  41. 1 0
      cli/storage_sftp.go
  42. 64 36
      cli/update_check.go
  43. 1 1
      examples/upload_download/setup_repository.go
  44. 1 1
      fs/cachefs/cache_test.go
  45. 5 3
      fs/cachefs/cachefs.go
  46. 4 4
      fs/entry.go
  47. 2 1
      fs/entry_test.go
  48. 2 2
      fs/ignorefs/ignorefs.go
  49. 8 4
      fs/localfs/local_fs.go
  50. 7 8
      fs/localfs/local_fs_test.go
  51. 6 4
      fs/loggingfs/loggingfs.go
  52. 5 3
      internal/blobtesting/asserts.go
  53. 4 4
      internal/blobtesting/concurrent.go
  54. 1 1
      internal/blobtesting/eventually_consistent.go
  55. 8 8
      internal/blobtesting/faulty.go
  56. 5 4
      internal/blobtesting/map.go
  57. 1 1
      internal/blobtesting/verify.go
  58. 1 1
      internal/buf/pool.go
  59. 2 1
      internal/buf/pool_metrics.go
  60. 1 1
      internal/buf/pool_test.go
  61. 5 4
      internal/diff/diff.go
  62. 4 3
      internal/editor/editor.go
  63. 5 5
      internal/faketime/faketime.go
  64. 1 1
      internal/fshasher/fshasher.go
  65. 10 10
      internal/fshasher/fshasher_test.go
  66. 5 6
      internal/fusemount/fusefs.go
  67. 2 1
      internal/hmac/hmac.go
  68. 1 1
      internal/iocopy/copy.go
  69. 9 7
      internal/logfile/logfile.go
  70. 3 3
      internal/mockfs/mockfs.go
  71. 1 1
      internal/parallelwork/parallel_work_queue.go
  72. 2 2
      internal/remoterepoapi/remoterepoapi.go
  73. 1 1
      internal/repotesting/repotesting.go
  74. 4 4
      internal/repotesting/repotesting_test.go
  75. 1 3
      internal/retry/retry_test.go
  76. 2 1
      internal/scrubber/scrub_sensitive.go
  77. 2 1
      internal/server/api_content.go
  78. 4 2
      internal/server/api_manifest.go
  79. 2 1
      internal/server/api_object_get.go
  80. 2 1
      internal/server/api_policies.go
  81. 18 17
      internal/server/htmlui_fallback.go
  82. 1 1
      internal/server/source_manager.go
  83. 1 1
      internal/serverapi/serverapi.go
  84. 2 2
      internal/stats/countsum.go
  85. 2 2
      internal/stats/countsum_mutex.go
  86. 5 1
      internal/testlogging/ctx.go
  87. 8 8
      internal/testutil/retriable_t.go
  88. 1 1
      internal/testutil/retriable_t_test.go
  89. 1 1
      internal/throttle/round_tripper_test.go
  90. 4 2
      internal/units/units.go
  91. 5 3
      internal/webdavmount/webdavmount.go
  92. 1 2
      main.go
  93. 2 2
      repo/api_server_repository.go
  94. 5 3
      repo/blob/azure/azure_storage.go
  95. 3 3
      repo/blob/azure/azure_storage_test.go
  96. 3 3
      repo/blob/b2/b2_storage.go
  97. 2 4
      repo/blob/b2/b2_storage_test.go
  98. 7 9
      repo/blob/filesystem/filesystem_storage.go
  99. 1 3
      repo/blob/filesystem/filesystem_storage_test.go
  100. 5 4
      repo/blob/gcs/gcs_storage.go

+ 9 - 1
.golangci.yml

@@ -37,6 +37,8 @@ linters:
     - gochecknoglobals
     - gochecknoinits
     - whitespace
+    - nlreturn
+    - testpackage
 
 run:
   skip-dirs:
@@ -45,16 +47,22 @@ run:
 issues:
   exclude-use-default: false
   exclude-rules:
-    - path: _test\.go|testing
+    - path: _test\.go|testing|test_env
       linters:
       - gomnd
       - gocognit
       - funlen
       - errcheck
       - gosec
+      - nestif
+      - goerr113
     - text: "Magic number: 1e"
       linters:
       - gomnd
+    - text: "unnecessaryDefer"
+      linters: gocritic
+    - text: "filepathJoin"
+      linters: gocritic
     - text: "weak cryptographic primitive"
       linters:
         - gosec

+ 4 - 2
cli/cli_progress.go

@@ -19,8 +19,10 @@ var (
 	progressUpdateInterval = app.Flag("progress-update-interval", "How ofter to update progress information").Hidden().Default("300ms").Duration()
 )
 
-const spinner = `|/-\`
-const hundredPercent = 100.0
+const (
+	spinner        = `|/-\`
+	hundredPercent = 100.0
+)
 
 type cliProgress struct {
 	snapshotfs.NullUploadProgress

+ 2 - 2
cli/command_benchmark_compression.go

@@ -7,10 +7,10 @@ import (
 	"sort"
 	"time"
 
+	kingpin "gopkg.in/alecthomas/kingpin.v2"
+
 	"github.com/kopia/kopia/internal/units"
 	"github.com/kopia/kopia/repo/compression"
-
-	kingpin "gopkg.in/alecthomas/kingpin.v2"
 )
 
 var (

+ 2 - 2
cli/command_benchmark_crypto.go

@@ -4,12 +4,12 @@ import (
 	"sort"
 	"time"
 
+	kingpin "gopkg.in/alecthomas/kingpin.v2"
+
 	"github.com/kopia/kopia/internal/units"
 	"github.com/kopia/kopia/repo/content"
 	"github.com/kopia/kopia/repo/encryption"
 	"github.com/kopia/kopia/repo/hashing"
-
-	kingpin "gopkg.in/alecthomas/kingpin.v2"
 )
 
 var (

+ 3 - 3
cli/command_benchmark_splitters.go

@@ -5,9 +5,9 @@ import (
 	"sort"
 	"time"
 
-	"github.com/kopia/kopia/repo/splitter"
-
 	kingpin "gopkg.in/alecthomas/kingpin.v2"
+
+	"github.com/kopia/kopia/repo/splitter"
 )
 
 var (
@@ -36,7 +36,7 @@ func runBenchmarkSplitterAction(ctx *kingpin.ParseContext) error {
 	// generate data blocks
 	var dataBlocks [][]byte
 
-	rnd := rand.New(rand.NewSource(*benchmarkSplitterRandSeed))
+	rnd := rand.New(rand.NewSource(*benchmarkSplitterRandSeed)) //nolint:gosec
 
 	for i := 0; i < *benchmarkSplitterBlockCount; i++ {
 		b := make([]byte, *benchmarkSplitterBlockSize)

+ 0 - 1
cli/command_blob_gc.go

@@ -25,7 +25,6 @@ func runBlobGarbageCollectCommand(ctx context.Context, rep *repo.DirectRepositor
 	}
 
 	n, err := maintenance.DeleteUnreferencedBlobs(ctx, rep, opts)
-
 	if err != nil {
 		return err
 	}

+ 2 - 4
cli/command_cache_clear.go

@@ -10,9 +10,7 @@ import (
 	"github.com/kopia/kopia/repo"
 )
 
-var (
-	cacheClearCommand = cacheCommands.Command("clear", "Clears the cache")
-)
+var cacheClearCommand = cacheCommands.Command("clear", "Clears the cache")
 
 func runCacheClearCommand(ctx context.Context, rep *repo.DirectRepository) error {
 	if d := rep.Content.CachingOptions.CacheDirectory; d != "" {
@@ -30,7 +28,7 @@ func runCacheClearCommand(ctx context.Context, rep *repo.DirectRepository) error
 			return err
 		}
 
-		if err := os.MkdirAll(d, 0700); err != nil {
+		if err := os.MkdirAll(d, 0o700); err != nil {
 			return err
 		}
 

+ 1 - 3
cli/command_cache_sync.go

@@ -6,9 +6,7 @@ import (
 	"github.com/kopia/kopia/repo"
 )
 
-var (
-	cacheSyncCommand = cacheCommands.Command("sync", "Synchronizes the metadata cache with blobs in storage")
-)
+var cacheSyncCommand = cacheCommands.Command("sync", "Synchronizes the metadata cache with blobs in storage")
 
 func runCacheSyncCommand(ctx context.Context, rep *repo.DirectRepository) error {
 	return rep.Content.SyncMetadataCache(ctx)

+ 0 - 1
cli/command_content_list.go

@@ -54,7 +54,6 @@ func runContentListCommand(ctx context.Context, rep *repo.DirectRepository) erro
 
 			return nil
 		})
-
 	if err != nil {
 		return errors.Wrap(err, "error iterating")
 	}

+ 0 - 1
cli/command_content_verify.go

@@ -60,7 +60,6 @@ func runContentVerifyCommand(ctx context.Context, rep *repo.DirectRepository) er
 
 		return nil
 	})
-
 	if err != nil {
 		return errors.Wrap(err, "iterate contents")
 	}

+ 1 - 3
cli/command_mount_browse.go

@@ -9,9 +9,7 @@ import (
 	"github.com/skratchdot/open-golang/open"
 )
 
-var (
-	mountBrowser = mountCommand.Flag("browse", "Browse mounted filesystem using the provided method").Default("OS").Enum("NONE", "WEB", "OS")
-)
+var mountBrowser = mountCommand.Flag("browse", "Browse mounted filesystem using the provided method").Default("OS").Enum("NONE", "WEB", "OS")
 
 var mountBrowsers = map[string]func(ctx context.Context, mountPoint, addr string) error{
 	"NONE": nil,

+ 1 - 4
cli/command_mount_fuse.go

@@ -20,9 +20,7 @@ func (r *root) Root() (fusefs.Node, error) {
 	return r.Node, nil
 }
 
-var (
-	mountMode = mountCommand.Flag("mode", "Mount mode").Default("FUSE").Enum("WEBDAV", "FUSE")
-)
+var mountMode = mountCommand.Flag("mode", "Mount mode").Default("FUSE").Enum("WEBDAV", "FUSE")
 
 func mountDirectoryFUSE(ctx context.Context, entry fs.Directory, mountPoint string) error {
 	rootNode := fusemount.NewDirectoryNode(entry)
@@ -34,7 +32,6 @@ func mountDirectoryFUSE(ctx context.Context, entry fs.Directory, mountPoint stri
 		fuse.Subtype("kopia"),
 		fuse.VolumeName("Kopia"),
 	)
-
 	if err != nil {
 		return err
 	}

+ 1 - 3
cli/command_mount_nofuse.go

@@ -10,9 +10,7 @@ import (
 	"github.com/kopia/kopia/fs"
 )
 
-var (
-	mountMode = mountCommand.Flag("mode", "Mount mode").Default("WEBDAV").Enum("WEBDAV")
-)
+var mountMode = mountCommand.Flag("mode", "Mount mode").Default("WEBDAV").Enum("WEBDAV")
 
 func mountDirectoryFUSE(ctx context.Context, entry fs.Directory, mountPoint string) error {
 	return errors.New("FUSE is not supported")

+ 1 - 3
cli/command_mount_webdav.go

@@ -12,9 +12,7 @@ import (
 	"github.com/kopia/kopia/internal/webdavmount"
 )
 
-var (
-	traceWebDAVServer = mountCommand.Flag("trace-webdav", "Enable tracing on WebDAV server").Bool()
-)
+var traceWebDAVServer = mountCommand.Flag("trace-webdav", "Enable tracing on WebDAV server").Bool()
 
 func webdavServerLogger(r *http.Request, err error) {
 	var maybeRange string

+ 2 - 2
cli/command_policy_edit.go

@@ -66,7 +66,7 @@ func editPolicy(ctx context.Context, rep repo.Repository) error {
 
 	for _, target := range targets {
 		original, err := policy.GetDefinedPolicy(ctx, rep, target)
-		if err == policy.ErrPolicyNotFound {
+		if errors.Is(err, policy.ErrPolicyNotFound) {
 			original = &policy.Policy{}
 		}
 
@@ -99,7 +99,7 @@ func editPolicy(ctx context.Context, rep repo.Repository) error {
 
 		var shouldSave string
 
-		fmt.Scanf("%v", &shouldSave) //nolint:errcheck
+		fmt.Scanf("%v", &shouldSave)
 
 		if strings.HasPrefix(strings.ToLower(shouldSave), "y") {
 			if err := policy.SetPolicy(ctx, rep, target, updated); err != nil {

+ 1 - 3
cli/command_policy_ls.go

@@ -9,9 +9,7 @@ import (
 	"github.com/kopia/kopia/snapshot/policy"
 )
 
-var (
-	policyListCommand = policyCommands.Command("list", "List policies.").Alias("ls")
-)
+var policyListCommand = policyCommands.Command("list", "List policies.").Alias("ls")
 
 func init() {
 	policyListCommand.Action(repositoryAction(listPolicies))

+ 2 - 2
cli/command_policy_set.go

@@ -20,7 +20,7 @@ var (
 	policySetTargets = policySetCommand.Arg("target", "Target of a policy ('global','user@host','@host') or a path").Strings()
 	policySetGlobal  = policySetCommand.Flag("global", "Set global policy").Bool()
 
-	// Frequency
+	// Frequency.
 	policySetInterval   = policySetCommand.Flag("snapshot-interval", "Interval between snapshots").DurationList()
 	policySetTimesOfDay = policySetCommand.Flag("snapshot-time", "Times of day when to take snapshot (HH:mm)").Strings()
 
@@ -84,7 +84,7 @@ func setPolicy(ctx context.Context, rep repo.Repository) error {
 		p, err := policy.GetDefinedPolicy(ctx, rep, target)
 
 		switch {
-		case err == policy.ErrPolicyNotFound:
+		case errors.Is(err, policy.ErrPolicyNotFound):
 			p = &policy.Policy{}
 		case err != nil:
 			return errors.Wrap(err, "could not get defined policy")

+ 1 - 2
cli/command_repository_connect.go

@@ -5,12 +5,11 @@ import (
 	"time"
 
 	"github.com/pkg/errors"
+	"gopkg.in/alecthomas/kingpin.v2"
 
 	"github.com/kopia/kopia/repo"
 	"github.com/kopia/kopia/repo/blob"
 	"github.com/kopia/kopia/repo/content"
-
-	"gopkg.in/alecthomas/kingpin.v2"
 )
 
 var (

+ 5 - 3
cli/command_repository_connect_from_config.go

@@ -11,8 +11,10 @@ import (
 	"github.com/kopia/kopia/repo/blob"
 )
 
-var connectFromConfigFile string
-var connectFromConfigToken string
+var (
+	connectFromConfigFile  string
+	connectFromConfigToken string
+)
 
 func connectToStorageFromConfig(ctx context.Context, isNew bool) (blob.Storage, error) {
 	if isNew {
@@ -37,7 +39,7 @@ func connectToStorageFromConfigFile(ctx context.Context) (blob.Storage, error) {
 	if err != nil {
 		return nil, errors.Wrap(err, "unable to open config")
 	}
-	defer f.Close() //nolint:errcheck
+	defer f.Close() //nolint:errcheck,gosec
 
 	if err := cfg.Load(f); err != nil {
 		return nil, errors.Wrap(err, "unable to load config")

+ 2 - 2
cli/command_repository_create.go

@@ -44,12 +44,12 @@ func newRepositoryOptionsFromFlags() *repo.NewRepositoryOptions {
 }
 
 func ensureEmpty(ctx context.Context, s blob.Storage) error {
-	hasDataError := errors.New("has data")
+	hasDataError := errors.Errorf("has data")
 
 	err := s.ListBlobs(ctx, "", func(cb blob.Metadata) error {
 		return hasDataError
 	})
-	if err == hasDataError {
+	if err == hasDataError { //nolint:goerr113
 		return errors.New("found existing data in storage location")
 	}
 

+ 1 - 3
cli/command_repository_disconnect.go

@@ -6,9 +6,7 @@ import (
 	"github.com/kopia/kopia/repo"
 )
 
-var (
-	disconnectCommand = repositoryCommands.Command("disconnect", "Disconnect from a repository.")
-)
+var disconnectCommand = repositoryCommands.Command("disconnect", "Disconnect from a repository.")
 
 func init() {
 	disconnectCommand.Action(noRepositoryAction(runDisconnectCommand))

+ 17 - 15
cli/command_repository_status.go

@@ -42,28 +42,30 @@ func runStatusCommand(ctx context.Context, rep *repo.DirectRepository) error {
 	fmt.Printf("Format version:      %v\n", rep.Content.Format.Version)
 	fmt.Printf("Max pack length:     %v\n", units.BytesStringBase2(int64(rep.Content.Format.MaxPackSize)))
 
-	if *statusReconnectToken {
-		pass := ""
+	if !*statusReconnectToken {
+		return nil
+	}
 
-		if *statusReconnectTokenIncludePassword {
-			var err error
+	pass := ""
 
-			pass, err = getPasswordFromFlags(ctx, false, true)
-			if err != nil {
-				return errors.Wrap(err, "getting password")
-			}
-		}
+	if *statusReconnectTokenIncludePassword {
+		var err error
 
-		tok, err := rep.Token(pass)
+		pass, err = getPasswordFromFlags(ctx, false, true)
 		if err != nil {
-			return err
+			return errors.Wrap(err, "getting password")
 		}
+	}
 
-		fmt.Printf("\nTo reconnect to the repository use:\n\n$ kopia repository connect from-config --token %v\n\n", tok)
+	tok, err := rep.Token(pass)
+	if err != nil {
+		return err
+	}
 
-		if pass != "" {
-			fmt.Printf("NOTICE: The token printed above can be trivially decoded to reveal the repository password. Do not store it in an unsecured place.\n")
-		}
+	fmt.Printf("\nTo reconnect to the repository use:\n\n$ kopia repository connect from-config --token %v\n\n", tok)
+
+	if pass != "" {
+		fmt.Printf("NOTICE: The token printed above can be trivially decoded to reveal the repository password. Do not store it in an unsecured place.\n")
 	}
 
 	return nil

+ 1 - 3
cli/command_repository_upgrade.go

@@ -6,9 +6,7 @@ import (
 	"github.com/kopia/kopia/repo"
 )
 
-var (
-	upgradeCommand = repositoryCommands.Command("upgrade", "Upgrade repository format.")
-)
+var upgradeCommand = repositoryCommands.Command("upgrade", "Upgrade repository format.")
 
 func runUpgradeCommand(ctx context.Context, rep *repo.DirectRepository) error {
 	return rep.Upgrade(ctx)

+ 1 - 3
cli/command_server_cancel.go

@@ -7,9 +7,7 @@ import (
 	"github.com/kopia/kopia/internal/serverapi"
 )
 
-var (
-	serverCancelUploadCommand = serverCommands.Command("cancel", "Cancels in-progress uploads for one or more sources")
-)
+var serverCancelUploadCommand = serverCommands.Command("cancel", "Cancels in-progress uploads for one or more sources")
 
 func init() {
 	serverCancelUploadCommand.Action(serverAction(runServerCancelUpload))

+ 1 - 3
cli/command_server_flush.go

@@ -7,9 +7,7 @@ import (
 	"github.com/kopia/kopia/internal/serverapi"
 )
 
-var (
-	serverFlushCommand = serverCommands.Command("flush", "Flush the state of Kopia server to persistent storage, etc.")
-)
+var serverFlushCommand = serverCommands.Command("flush", "Flush the state of Kopia server to persistent storage, etc.")
 
 func init() {
 	serverFlushCommand.Action(serverAction(runServerFlush))

+ 1 - 3
cli/command_server_pause.go

@@ -7,9 +7,7 @@ import (
 	"github.com/kopia/kopia/internal/serverapi"
 )
 
-var (
-	serverPauseCommand = serverCommands.Command("pause", "Pause the scheduled snapshots for one or more sources")
-)
+var serverPauseCommand = serverCommands.Command("pause", "Pause the scheduled snapshots for one or more sources")
 
 func init() {
 	serverPauseCommand.Action(serverAction(runServerPause))

+ 1 - 3
cli/command_server_refresh.go

@@ -7,9 +7,7 @@ import (
 	"github.com/kopia/kopia/internal/serverapi"
 )
 
-var (
-	serverRefreshCommand = serverCommands.Command("refresh", "Refresh the cache in Kopia server to observe new sources, etc.")
-)
+var serverRefreshCommand = serverCommands.Command("refresh", "Refresh the cache in Kopia server to observe new sources, etc.")
 
 func init() {
 	serverRefreshCommand.Action(serverAction(runServerRefresh))

+ 1 - 3
cli/command_server_resume.go

@@ -7,9 +7,7 @@ import (
 	"github.com/kopia/kopia/internal/serverapi"
 )
 
-var (
-	serverResumeCommand = serverCommands.Command("resume", "Resume the scheduled snapshots for one or more sources")
-)
+var serverResumeCommand = serverCommands.Command("resume", "Resume the scheduled snapshots for one or more sources")
 
 func init() {
 	serverResumeCommand.Action(serverAction(runServerResume))

+ 2 - 4
cli/command_server_start.go

@@ -16,11 +16,10 @@ import (
 	"strings"
 	"time"
 
-	htpasswd "github.com/tg123/go-htpasswd"
-
 	"contrib.go.opencensus.io/exporter/prometheus"
 	"github.com/pkg/errors"
 	prom "github.com/prometheus/client_golang/prometheus"
+	htpasswd "github.com/tg123/go-htpasswd"
 
 	"github.com/kopia/kopia/internal/server"
 	"github.com/kopia/kopia/repo"
@@ -105,7 +104,7 @@ func runServer(ctx context.Context, rep repo.Repository) error {
 	httpServer.Handler = handler
 
 	err = startServerWithOptionalTLS(ctx, httpServer)
-	if err != http.ErrServerClosed {
+	if !errors.Is(err, http.ErrServerClosed) {
 		return err
 	}
 
@@ -125,7 +124,6 @@ func initPrometheus(mux *http.ServeMux) error {
 	pe, err := prometheus.NewExporter(prometheus.Options{
 		Registry: reg,
 	})
-
 	if err != nil {
 		return errors.Wrap(err, "unable to initialize prometheus exporter")
 	}

+ 1 - 3
cli/command_server_status.go

@@ -8,9 +8,7 @@ import (
 	"github.com/kopia/kopia/internal/serverapi"
 )
 
-var (
-	serverStatusCommand = serverCommands.Command("status", "Status of Kopia server")
-)
+var serverStatusCommand = serverCommands.Command("status", "Status of Kopia server")
 
 func init() {
 	serverStatusCommand.Action(serverAction(runServerStatus))

+ 35 - 25
cli/command_server_tls.go

@@ -88,7 +88,7 @@ func writePrivateKeyToFile(fname string, priv *rsa.PrivateKey) error {
 	if err != nil {
 		return err
 	}
-	defer f.Close() //nolint:errcheck
+	defer f.Close() //nolint:errcheck,gosec
 
 	privBytes, err := x509.MarshalPKCS8PrivateKey(priv)
 	if err != nil {
@@ -107,7 +107,7 @@ func writeCertificateToFile(fname string, cert *x509.Certificate) error {
 	if err != nil {
 		return err
 	}
-	defer f.Close() //nolint:errcheck
+	defer f.Close() //nolint:errcheck,gosec
 
 	if err := pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil {
 		return errors.Wrap(err, "Failed to write data")
@@ -128,36 +128,45 @@ func startServerWithOptionalTLS(ctx context.Context, httpServer *http.Server) er
 	return startServerWithOptionalTLSAndListener(ctx, httpServer, l)
 }
 
-func startServerWithOptionalTLSAndListener(ctx context.Context, httpServer *http.Server, listener net.Listener) error {
-	// generate and save to PEM files
-	if *serverStartTLSGenerateCert && *serverStartTLSCertFile != "" && *serverStartTLSKeyFile != "" {
-		if _, err := os.Stat(*serverStartTLSCertFile); err == nil {
-			return errors.Errorf("TLS cert file already exists: %q", *serverStartTLSCertFile)
-		}
+func maybeGenerateTLS(ctx context.Context) error {
+	if !*serverStartTLSGenerateCert || *serverStartTLSCertFile == "" || *serverStartTLSKeyFile == "" {
+		return nil
+	}
 
-		if _, err := os.Stat(*serverStartTLSKeyFile); err == nil {
-			return errors.Errorf("TLS key file already exists: %q", *serverStartTLSKeyFile)
-		}
+	if _, err := os.Stat(*serverStartTLSCertFile); err == nil {
+		return errors.Errorf("TLS cert file already exists: %q", *serverStartTLSCertFile)
+	}
 
-		cert, key, err := generateServerCertificate(ctx)
-		if err != nil {
-			return errors.Wrap(err, "unable to generate server cert")
-		}
+	if _, err := os.Stat(*serverStartTLSKeyFile); err == nil {
+		return errors.Errorf("TLS key file already exists: %q", *serverStartTLSKeyFile)
+	}
 
-		fingerprint := sha256.Sum256(cert.Raw)
-		fmt.Fprintf(os.Stderr, "SERVER CERT SHA256: %v\n", hex.EncodeToString(fingerprint[:]))
+	cert, key, err := generateServerCertificate(ctx)
+	if err != nil {
+		return errors.Wrap(err, "unable to generate server cert")
+	}
 
-		log(ctx).Infof("writing TLS certificate to %v", *serverStartTLSCertFile)
+	fingerprint := sha256.Sum256(cert.Raw)
+	fmt.Fprintf(os.Stderr, "SERVER CERT SHA256: %v\n", hex.EncodeToString(fingerprint[:]))
 
-		if err := writeCertificateToFile(*serverStartTLSCertFile, cert); err != nil {
-			return errors.Wrap(err, "unable to write private key")
-		}
+	log(ctx).Infof("writing TLS certificate to %v", *serverStartTLSCertFile)
+
+	if err := writeCertificateToFile(*serverStartTLSCertFile, cert); err != nil {
+		return errors.Wrap(err, "unable to write private key")
+	}
 
-		log(ctx).Infof("writing TLS private key to %v", *serverStartTLSKeyFile)
+	log(ctx).Infof("writing TLS private key to %v", *serverStartTLSKeyFile)
 
-		if err := writePrivateKeyToFile(*serverStartTLSKeyFile, key); err != nil {
-			return errors.Wrap(err, "unable to write private key")
-		}
+	if err := writePrivateKeyToFile(*serverStartTLSKeyFile, key); err != nil {
+		return errors.Wrap(err, "unable to write private key")
+	}
+
+	return nil
+}
+
+func startServerWithOptionalTLSAndListener(ctx context.Context, httpServer *http.Server, listener net.Listener) error {
+	if err := maybeGenerateTLS(ctx); err != nil {
+		return err
 	}
 
 	switch {
@@ -176,6 +185,7 @@ func startServerWithOptionalTLSAndListener(ctx context.Context, httpServer *http
 		}
 
 		httpServer.TLSConfig = &tls.Config{
+			MinVersion: tls.VersionTLS13,
 			Certificates: []tls.Certificate{
 				{
 					Certificate: [][]byte{cert.Raw},

+ 1 - 3
cli/command_server_upload.go

@@ -8,9 +8,7 @@ import (
 	"github.com/kopia/kopia/internal/serverapi"
 )
 
-var (
-	serverStartUploadCommand = serverCommands.Command("upload", "Trigger upload for one or more sources")
-)
+var serverStartUploadCommand = serverCommands.Command("upload", "Trigger upload for one or more sources")
 
 func init() {
 	serverStartUploadCommand.Action(serverAction(runServerStartUpload))

+ 0 - 1
cli/command_snapshot_list.go

@@ -146,7 +146,6 @@ func outputManifestGroups(ctx context.Context, rep repo.Repository, manifests []
 	return nil
 }
 
-//nolint:gocyclo,funlen
 func outputManifestFromSingleSource(ctx context.Context, rep repo.Repository, manifests []*snapshot.Manifest, parts []string) error {
 	var (
 		count             int

+ 2 - 2
cli/command_snapshot_migrate.go

@@ -158,7 +158,7 @@ func migrateAllPolicies(ctx context.Context, sourceRepo, destRepo repo.Repositor
 
 func migrateSinglePolicy(ctx context.Context, sourceRepo, destRepo repo.Repository, si snapshot.SourceInfo) error {
 	pol, err := policy.GetDefinedPolicy(ctx, sourceRepo, si)
-	if err == policy.ErrPolicyNotFound {
+	if errors.Is(err, policy.ErrPolicyNotFound) {
 		return nil
 	}
 
@@ -173,7 +173,7 @@ func migrateSinglePolicy(ctx context.Context, sourceRepo, destRepo repo.Reposito
 			// already have destination policy
 			return nil
 		}
-	} else if err != policy.ErrPolicyNotFound {
+	} else if !errors.Is(err, policy.ErrPolicyNotFound) {
 		return errors.Wrapf(err, "unable to migrate policy for %v", si)
 	}
 

+ 2 - 1
cli/command_snapshot_verify.go

@@ -150,7 +150,8 @@ func (v *verifier) doVerifyObject(ctx context.Context, oid object.ID, path strin
 		v.reportError(ctx, path, errors.Wrapf(err, "error verifying %v", oid))
 	}
 
-	if rand.Intn(100) < *verifyCommandFilesPercent { //nolint:gomnd
+	//nolint:gomnd,gosec
+	if rand.Intn(100) < *verifyCommandFilesPercent {
 		if err := v.readEntireObject(ctx, oid, path); err != nil {
 			v.reportError(ctx, path, errors.Wrapf(err, "error reading object %v", oid))
 		}

+ 2 - 2
cli/config.go

@@ -29,11 +29,11 @@ var (
 )
 
 func printStderr(msg string, args ...interface{}) {
-	fmt.Fprintf(os.Stderr, msg, args...) //nolint:errcheck
+	fmt.Fprintf(os.Stderr, msg, args...)
 }
 
 func printStdout(msg string, args ...interface{}) {
-	fmt.Fprintf(os.Stdout, msg, args...) //nolint:errcheck
+	fmt.Fprintf(os.Stdout, msg, args...)
 }
 
 func onCtrlC(f func()) {

+ 1 - 3
cli/password.go

@@ -11,9 +11,7 @@ import (
 	"github.com/kopia/kopia/repo"
 )
 
-var (
-	password = app.Flag("password", "Repository password.").Envar("KOPIA_PASSWORD").Short('p').String()
-)
+var password = app.Flag("password", "Repository password.").Envar("KOPIA_PASSWORD").Short('p').String()
 
 func askForNewRepositoryPassword() (string, error) {
 	for {

+ 1 - 1
cli/profile.go

@@ -12,7 +12,7 @@ var (
 	profileMutex    = app.Flag("profile-mutex", "Enable mutex profiling").Hidden().Bool()
 )
 
-// withProfiling runs the given callback with profiling enabled, configured according to command line flags
+// withProfiling runs the given callback with profiling enabled, configured according to command line flags.
 func withProfiling(callback func() error) error {
 	if *profileDir != "" {
 		pp := profile.ProfilePath(*profileDir)

+ 1 - 1
cli/profile_disabled.go

@@ -2,7 +2,7 @@
 
 package cli
 
-// withProfiling runs the given callback with profiling enabled, configured according to command line flags
+// withProfiling runs the given callback with profiling enabled, configured according to command line flags.
 func withProfiling(callback func() error) error {
 	return callback()
 }

+ 2 - 2
cli/storage_filesystem.go

@@ -14,8 +14,8 @@ import (
 var options filesystem.Options
 
 const (
-	defaultFileMode = 0600
-	defaultDirMode  = 0700
+	defaultFileMode = 0o600
+	defaultDirMode  = 0o700
 )
 
 var (

+ 1 - 0
cli/storage_sftp.go

@@ -36,6 +36,7 @@ func init() {
 		func(ctx context.Context, isNew bool) (blob.Storage, error) {
 			sftpo := options
 
+			// nolint:nestif
 			if embedCredentials {
 				if sftpo.KeyData == "" {
 					d, err := ioutil.ReadFile(sftpo.Keyfile)

+ 64 - 36
cli/update_check.go

@@ -20,9 +20,12 @@ import (
 	"github.com/kopia/kopia/repo"
 )
 
-const checkForUpdatesEnvar = "KOPIA_CHECK_FOR_UPDATES"
+const (
+	checkForUpdatesEnvar = "KOPIA_CHECK_FOR_UPDATES"
+	githubTimeout        = 10 * time.Second
+)
 
-// hidden flags to control auto-update behavior
+// hidden flags to control auto-update behavior.
 var (
 	initialUpdateCheckDelay       = app.Flag("initial-update-check-delay", "Initial delay before first time update check").Default("24h").Hidden().Envar("KOPIA_INITIAL_UPDATE_CHECK_DELAY").Duration()
 	updateCheckInterval           = app.Flag("update-check-interval", "Interval between update checks").Default("168h").Hidden().Envar("KOPIA_UPDATE_CHECK_INTERVAL").Duration()
@@ -81,7 +84,7 @@ func getUpdateState() (*updateState, error) {
 	if err != nil {
 		return nil, errors.Wrap(err, "unable to open update state file")
 	}
-	defer f.Close() //nolint:errcheck
+	defer f.Close() //nolint:errcheck,gosec
 
 	us := &updateState{}
 	if err := json.NewDecoder(f).Decode(us); err != nil {
@@ -111,8 +114,16 @@ func maybeInitializeUpdateCheck(ctx context.Context) {
 }
 
 // getLatestReleaseNameFromGitHub gets the name of the release marked 'latest' on GitHub.
-func getLatestReleaseNameFromGitHub() (string, error) {
-	resp, err := http.DefaultClient.Get(latestReleaseGitHubURL)
+func getLatestReleaseNameFromGitHub(ctx context.Context) (string, error) {
+	ctx, cancel := context.WithTimeout(ctx, githubTimeout)
+	defer cancel()
+
+	req, err := http.NewRequestWithContext(ctx, "GET", latestReleaseGitHubURL, nil)
+	if err != nil {
+		return "", errors.Wrap(err, "unable to get latest release from github")
+	}
+
+	resp, err := http.DefaultClient.Do(req)
 	if err != nil {
 		return "", errors.Wrap(err, "unable to get latest release from github")
 	}
@@ -134,10 +145,16 @@ func getLatestReleaseNameFromGitHub() (string, error) {
 }
 
 // verifyGitHubReleaseIsComplete downloads checksum file to verify that the release is complete.
-func verifyGitHubReleaseIsComplete(releaseName string) error {
-	u := fmt.Sprintf(checksumsURL, releaseName)
+func verifyGitHubReleaseIsComplete(ctx context.Context, releaseName string) error {
+	ctx, cancel := context.WithTimeout(ctx, githubTimeout)
+	defer cancel()
+
+	req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf(checksumsURL, releaseName), nil)
+	if err != nil {
+		return errors.Wrap(err, "unable to download releases checksum")
+	}
 
-	resp, err := http.DefaultClient.Get(u)
+	resp, err := http.DefaultClient.Do(req)
 	if err != nil {
 		return errors.Wrap(err, "unable to download releases checksum")
 	}
@@ -164,34 +181,8 @@ func maybeCheckForUpdates(ctx context.Context) (string, error) {
 		return "", err
 	}
 
-	if time.Now().After(us.NextCheckTime) {
-		log(ctx).Debugf("time for next update check has been reached")
-		// before we check for update, write update state file again, so if this fails
-		// we won't bother GitHub for a while
-		us.NextCheckTime = time.Now().Add(*updateCheckInterval)
-		if err = writeUpdateState(us); err != nil {
-			return "", errors.Wrap(err, "unable to write update state")
-		}
-
-		newAvailableVersion, err := getLatestReleaseNameFromGitHub()
-		if err != nil {
-			return "", errors.Wrap(err, "update to get latest release from GitHub")
-		}
-
-		log(ctx).Debugf("latest version on github: %v previous %v", newAvailableVersion, us.AvailableVersion)
-
-		// we got updated version from GitHub, write it in a state file again
-		if newAvailableVersion != us.AvailableVersion {
-			if err = verifyGitHubReleaseIsComplete(newAvailableVersion); err != nil {
-				return "", errors.Wrap(err, "unable to validate GitHub release")
-			}
-
-			us.AvailableVersion = newAvailableVersion
-
-			if err := writeUpdateState(us); err != nil {
-				return "", errors.Wrap(err, "unable to write update state")
-			}
-		}
+	if err := maybeCheckGithub(ctx, us); err != nil {
+		return "", errors.Wrap(err, "error checking github")
 	}
 
 	log(ctx).Debugf("build version %v, available %v", ensureVPrefix(repo.BuildVersion), ensureVPrefix(us.AvailableVersion))
@@ -214,6 +205,43 @@ func maybeCheckForUpdates(ctx context.Context) (string, error) {
 	return "", nil
 }
 
+func maybeCheckGithub(ctx context.Context, us *updateState) error {
+	if !time.Now().After(us.NextCheckTime) {
+		return nil
+	}
+
+	log(ctx).Debugf("time for next update check has been reached")
+
+	// before we check for update, write update state file again, so if this fails
+	// we won't bother GitHub for a while
+	us.NextCheckTime = time.Now().Add(*updateCheckInterval)
+	if err := writeUpdateState(us); err != nil {
+		return errors.Wrap(err, "unable to write update state")
+	}
+
+	newAvailableVersion, err := getLatestReleaseNameFromGitHub(ctx)
+	if err != nil {
+		return errors.Wrap(err, "update to get latest release from GitHub")
+	}
+
+	log(ctx).Debugf("latest version on github: %v previous %v", newAvailableVersion, us.AvailableVersion)
+
+	// we got updated version from GitHub, write it in a state file again
+	if newAvailableVersion != us.AvailableVersion {
+		if err = verifyGitHubReleaseIsComplete(ctx, newAvailableVersion); err != nil {
+			return errors.Wrap(err, "unable to validate GitHub release")
+		}
+
+		us.AvailableVersion = newAvailableVersion
+
+		if err := writeUpdateState(us); err != nil {
+			return errors.Wrap(err, "unable to write update state")
+		}
+	}
+
+	return nil
+}
+
 // maybePrintUpdateNotification prints notification about available version.
 func maybePrintUpdateNotification(ctx context.Context) {
 	updatedVersion, err := maybeCheckForUpdates(ctx)

+ 1 - 1
examples/upload_download/setup_repository.go

@@ -23,7 +23,7 @@ const (
 )
 
 func setupRepositoryAndConnect(ctx context.Context, password string) error {
-	if err := os.MkdirAll(storageDir, 0700); err != nil {
+	if err := os.MkdirAll(storageDir, 0o700); err != nil {
 		return errors.Wrap(err, "unable to create directory")
 	}
 

+ 1 - 1
fs/cachefs/cache_test.go

@@ -234,7 +234,7 @@ func TestCache(t *testing.T) {
 	cv.verifyCacheOrdering(t, id6)
 }
 
-// Simple test for getEntries() locking/unlocking. Related to PRs #130 and #132
+// Simple test for getEntries() locking/unlocking. Related to PRs #130 and #132.
 func TestCacheGetEntriesLocking(t *testing.T) {
 	ctx := testlogging.Context(t)
 	c := NewCache(&Options{

+ 5 - 3
fs/cachefs/cachefs.go

@@ -75,6 +75,8 @@ func wrapWithContext(e fs.Entry, opts *cacheContext) fs.Entry {
 	}
 }
 
-var _ fs.Directory = &directory{}
-var _ fs.File = &file{}
-var _ fs.Symlink = &symlink{}
+var (
+	_ fs.Directory = &directory{}
+	_ fs.File      = &file{}
+	_ fs.Symlink   = &symlink{}
+)

+ 4 - 4
fs/entry.go

@@ -9,13 +9,13 @@ import (
 	"time"
 )
 
-// Entry represents a filesystem entry, which can be Directory, File, or Symlink
+// Entry represents a filesystem entry, which can be Directory, File, or Symlink.
 type Entry interface {
 	os.FileInfo
 	Owner() OwnerInfo
 }
 
-// OwnerInfo describes owner of a filesystem entry
+// OwnerInfo describes owner of a filesystem entry.
 type OwnerInfo struct {
 	UserID  uint32
 	GroupID uint32
@@ -50,7 +50,7 @@ type Directory interface {
 var ErrEntryNotFound = errors.New("entry not found")
 
 // ReadDirAndFindChild reads all entries from a directory and returns one by name.
-// This is a convenience function that may be helpful in implementations of Directory.Child()
+// This is a convenience function that may be helpful in implementations of Directory.Child().
 func ReadDirAndFindChild(ctx context.Context, d Directory, name string) (Entry, error) {
 	children, err := d.Readdir(ctx)
 	if err != nil {
@@ -68,7 +68,7 @@ func ReadDirAndFindChild(ctx context.Context, d Directory, name string) (Entry,
 // MaxFailedEntriesPerDirectorySummary is the maximum number of failed entries per directory summary.
 const MaxFailedEntriesPerDirectorySummary = 10
 
-// EntryWithError describes error encountered when processing an entry
+// EntryWithError describes error encountered when processing an entry.
 type EntryWithError struct {
 	EntryPath string `json:"path"`
 	Error     string `json:"error"`

+ 2 - 1
fs/entry_test.go

@@ -141,7 +141,8 @@ func TestEntriesUpdate(t *testing.T) {
 				testEntry{n: "cc"},
 				testEntry{n: "dd"},
 			},
-		}, {
+		},
+		{
 			desc: "insert after last first",
 			base: Entries{
 				testEntry{n: "aa"},

+ 2 - 2
fs/ignorefs/ignorefs.go

@@ -85,7 +85,7 @@ func (d *ignoreDirectory) Readdir(ctx context.Context) (fs.Entries, error) {
 }
 
 func (d *ignoreDirectory) buildContext(ctx context.Context, entries fs.Entries) (*ignoreContext, error) {
-	var effectiveDotIgnoreFiles = d.parentContext.dotIgnoreFiles
+	effectiveDotIgnoreFiles := d.parentContext.dotIgnoreFiles
 
 	pol := d.policyTree.DefinedPolicy()
 	if pol != nil {
@@ -231,7 +231,7 @@ func parseIgnoreFile(ctx context.Context, baseDir string, file fs.File) ([]ignor
 	return matchers, nil
 }
 
-// Option modifies the behavior of ignorefs
+// Option modifies the behavior of ignorefs.
 type Option func(parentContext *ignoreContext)
 
 // New returns a fs.Directory that wraps another fs.Directory and hides files specified in the ignore dotfiles.

+ 8 - 4
fs/localfs/local_fs.go

@@ -133,7 +133,7 @@ func (fsd *filesystemDirectory) Readdir(ctx context.Context) (fs.Entries, error)
 	if direrr != nil {
 		return nil, direrr
 	}
-	defer f.Close() //nolint:errcheck
+	defer f.Close() //nolint:errcheck,gosec
 
 	// start feeding directory entry names to namesCh
 	namesCh := make(chan string, dirListingPrefetch)
@@ -259,6 +259,7 @@ func NewEntry(path string) (fs.Entry, error) {
 		return nil, err
 	}
 
+	// nolint:exhaustive
 	switch fi.Mode() & os.ModeType {
 	case os.ModeDir:
 		return &filesystemDirectory{newEntry(fi, filepath.Dir(path))}, nil
@@ -289,6 +290,7 @@ func Directory(path string) (fs.Directory, error) {
 }
 
 func entryFromChildFileInfo(fi os.FileInfo, parentDir string) (fs.Entry, error) {
+	// nolint:exhaustive
 	switch fi.Mode() & os.ModeType {
 	case os.ModeDir:
 		return &filesystemDirectory{newEntry(fi, parentDir)}, nil
@@ -304,6 +306,8 @@ func entryFromChildFileInfo(fi os.FileInfo, parentDir string) (fs.Entry, error)
 	}
 }
 
-var _ fs.Directory = &filesystemDirectory{}
-var _ fs.File = &filesystemFile{}
-var _ fs.Symlink = &filesystemSymlink{}
+var (
+	_ fs.Directory = &filesystemDirectory{}
+	_ fs.File      = &filesystemFile{}
+	_ fs.Symlink   = &filesystemSymlink{}
+)

+ 7 - 8
fs/localfs/local_fs_test.go

@@ -5,15 +5,14 @@ import (
 	"io/ioutil"
 	"os"
 	"path/filepath"
+	"testing"
 	"time"
 
 	"github.com/kopia/kopia/fs"
 	"github.com/kopia/kopia/internal/testlogging"
-
-	"testing"
 )
 
-//nolint:gocyclo,gocognit
+//nolint:gocyclo
 func TestFiles(t *testing.T) {
 	ctx := testlogging.Context(t)
 
@@ -51,12 +50,12 @@ func TestFiles(t *testing.T) {
 	}
 
 	// Now list a directory with 3 files.
-	assertNoError(t, ioutil.WriteFile(filepath.Join(tmp, "f3"), []byte{1, 2, 3}, 0777))
-	assertNoError(t, ioutil.WriteFile(filepath.Join(tmp, "f2"), []byte{1, 2, 3, 4}, 0777))
-	assertNoError(t, ioutil.WriteFile(filepath.Join(tmp, "f1"), []byte{1, 2, 3, 4, 5}, 0777))
+	assertNoError(t, ioutil.WriteFile(filepath.Join(tmp, "f3"), []byte{1, 2, 3}, 0o777))
+	assertNoError(t, ioutil.WriteFile(filepath.Join(tmp, "f2"), []byte{1, 2, 3, 4}, 0o777))
+	assertNoError(t, ioutil.WriteFile(filepath.Join(tmp, "f1"), []byte{1, 2, 3, 4, 5}, 0o777))
 
-	assertNoError(t, os.Mkdir(filepath.Join(tmp, "z"), 0777))
-	assertNoError(t, os.Mkdir(filepath.Join(tmp, "y"), 0777))
+	assertNoError(t, os.Mkdir(filepath.Join(tmp, "z"), 0o777))
+	assertNoError(t, os.Mkdir(filepath.Join(tmp, "y"), 0o777))
 
 	dir, err = Directory(tmp)
 	if err != nil {

+ 6 - 4
fs/loggingfs/loggingfs.go

@@ -92,7 +92,7 @@ func applyOptions(printf func(msg string, args ...interface{}), opts []Option) *
 	return o
 }
 
-// Output is an option that causes all output to be sent to a given function instead of log.Printf()
+// Output is an option that causes all output to be sent to a given function instead of log.Printf().
 func Output(outputFunc func(fmt string, args ...interface{})) Option {
 	return func(o *loggingOptions) {
 		o.printf = outputFunc
@@ -106,6 +106,8 @@ func Prefix(prefix string) Option {
 	}
 }
 
-var _ fs.Directory = &loggingDirectory{}
-var _ fs.File = &loggingFile{}
-var _ fs.Symlink = &loggingSymlink{}
+var (
+	_ fs.Directory = &loggingDirectory{}
+	_ fs.File      = &loggingFile{}
+	_ fs.Symlink   = &loggingSymlink{}
+)

+ 5 - 3
internal/blobtesting/asserts.go

@@ -7,6 +7,8 @@ import (
 	"sort"
 	"time"
 
+	"github.com/pkg/errors"
+
 	"github.com/kopia/kopia/repo/blob"
 )
 
@@ -31,7 +33,7 @@ func AssertGetBlob(ctx context.Context, t testingT, s blob.Storage, blobID blob.
 		t.Errorf("GetBlob(%v) returned %x, but expected %x", blobID, b, expected)
 	}
 
-	half := int64(len(expected) / 2) //nolint:gomnd
+	half := int64(len(expected) / 2)
 	if half == 0 {
 		return
 	}
@@ -73,7 +75,7 @@ func AssertGetBlob(ctx context.Context, t testingT, s blob.Storage, blobID blob.
 	AssertInvalidOffsetLength(ctx, t, s, blobID, int64(len(expected)+1), 3)
 }
 
-// AssertInvalidOffsetLength verifies that the given combination of (offset,length) fails on GetBlob()
+// AssertInvalidOffsetLength verifies that the given combination of (offset,length) fails on GetBlob().
 func AssertInvalidOffsetLength(ctx context.Context, t testingT, s blob.Storage, blobID blob.ID, offset, length int64) {
 	if _, err := s.GetBlob(ctx, blobID, offset, length); err == nil {
 		t.Errorf("GetBlob(%v,%v,%v) did not return error for invalid offset/length", blobID, offset, length)
@@ -85,7 +87,7 @@ func AssertGetBlobNotFound(ctx context.Context, t testingT, s blob.Storage, blob
 	t.Helper()
 
 	b, err := s.GetBlob(ctx, blobID, 0, -1)
-	if err != blob.ErrBlobNotFound || b != nil {
+	if !errors.Is(err, blob.ErrBlobNotFound) || b != nil {
 		t.Errorf("GetBlob(%v) returned %v, %v but expected ErrNotFound", blobID, b, err)
 	}
 }

+ 4 - 4
internal/blobtesting/concurrent.go

@@ -15,7 +15,7 @@ import (
 	"github.com/kopia/kopia/repo/blob"
 )
 
-// ConcurrentAccessOptions encapsulates parameters for VerifyConcurrentAccess
+// ConcurrentAccessOptions encapsulates parameters for VerifyConcurrentAccess.
 type ConcurrentAccessOptions struct {
 	NumBlobs int // number of shared blos in the pool
 
@@ -40,7 +40,7 @@ func VerifyConcurrentAccess(t testingT, st blob.Storage, options ConcurrentAcces
 
 	for i := 0; i < options.NumBlobs; i++ {
 		blobIDBytes := make([]byte, 32)
-		cryptorand.Read(blobIDBytes) // nolint:errcheck
+		cryptorand.Read(blobIDBytes)
 		blobs = append(blobs, blob.ID(hex.EncodeToString(blobIDBytes)))
 	}
 
@@ -58,7 +58,7 @@ func VerifyConcurrentAccess(t testingT, st blob.Storage, options ConcurrentAcces
 				offset := int64(0)
 				length := int64(-1)
 
-				if rand.Intn(100) < options.RangeGetPercentage { //nolint:gomnd
+				if rand.Intn(100) < options.RangeGetPercentage {
 					offset = 10
 					length = 3
 				}
@@ -130,7 +130,7 @@ func VerifyConcurrentAccess(t testingT, st blob.Storage, options ConcurrentAcces
 			for i := 0; i < options.Iterations; i++ {
 				blobID := randomBlobID()
 				prefix := blobID[0:rand.Intn(len(blobID))]
-				if rand.Intn(100) < options.NonExistentListPrefixPercentage { //nolint:gomnd
+				if rand.Intn(100) < options.NonExistentListPrefixPercentage {
 					prefix = "zzz"
 				}
 

+ 1 - 1
internal/blobtesting/eventually_consistent.go

@@ -121,7 +121,7 @@ func (s *eventuallyConsistentStorage) GetBlob(ctx context.Context, id blob.ID, o
 	// fetch from the underlying storage.
 	v, err := s.realStorage.GetBlob(ctx, id, offset, length)
 	if err != nil {
-		if err == blob.ErrBlobNotFound {
+		if errors.Is(err, blob.ErrBlobNotFound) {
 			c.put(id, nil)
 		}
 

+ 8 - 8
internal/blobtesting/faulty.go

@@ -28,7 +28,7 @@ type FaultyStorage struct {
 	mu sync.Mutex
 }
 
-// GetBlob implements blob.Storage
+// GetBlob implements blob.Storage.
 func (s *FaultyStorage) GetBlob(ctx context.Context, id blob.ID, offset, length int64) ([]byte, error) {
 	if err := s.getNextFault(ctx, "GetBlob", id, offset, length); err != nil {
 		return nil, err
@@ -37,7 +37,7 @@ func (s *FaultyStorage) GetBlob(ctx context.Context, id blob.ID, offset, length
 	return s.Base.GetBlob(ctx, id, offset, length)
 }
 
-// GetMetadata implements blob.Storage
+// GetMetadata implements blob.Storage.
 func (s *FaultyStorage) GetMetadata(ctx context.Context, id blob.ID) (blob.Metadata, error) {
 	if err := s.getNextFault(ctx, "GetMetadata", id); err != nil {
 		return blob.Metadata{}, err
@@ -46,7 +46,7 @@ func (s *FaultyStorage) GetMetadata(ctx context.Context, id blob.ID) (blob.Metad
 	return s.Base.GetMetadata(ctx, id)
 }
 
-// PutBlob implements blob.Storage
+// PutBlob implements blob.Storage.
 func (s *FaultyStorage) PutBlob(ctx context.Context, id blob.ID, data blob.Bytes) error {
 	if err := s.getNextFault(ctx, "PutBlob", id); err != nil {
 		return err
@@ -55,7 +55,7 @@ func (s *FaultyStorage) PutBlob(ctx context.Context, id blob.ID, data blob.Bytes
 	return s.Base.PutBlob(ctx, id, data)
 }
 
-// DeleteBlob implements blob.Storage
+// DeleteBlob implements blob.Storage.
 func (s *FaultyStorage) DeleteBlob(ctx context.Context, id blob.ID) error {
 	if err := s.getNextFault(ctx, "DeleteBlob", id); err != nil {
 		return err
@@ -64,7 +64,7 @@ func (s *FaultyStorage) DeleteBlob(ctx context.Context, id blob.ID) error {
 	return s.Base.DeleteBlob(ctx, id)
 }
 
-// ListBlobs implements blob.Storage
+// ListBlobs implements blob.Storage.
 func (s *FaultyStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(blob.Metadata) error) error {
 	if err := s.getNextFault(ctx, "ListBlobs", prefix); err != nil {
 		return err
@@ -78,7 +78,7 @@ func (s *FaultyStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback
 	})
 }
 
-// Close implements blob.Storage
+// Close implements blob.Storage.
 func (s *FaultyStorage) Close(ctx context.Context) error {
 	if err := s.getNextFault(ctx, "Close"); err != nil {
 		return err
@@ -87,12 +87,12 @@ func (s *FaultyStorage) Close(ctx context.Context) error {
 	return s.Base.Close(ctx)
 }
 
-// ConnectionInfo implements blob.Storage
+// ConnectionInfo implements blob.Storage.
 func (s *FaultyStorage) ConnectionInfo() blob.ConnectionInfo {
 	return s.Base.ConnectionInfo()
 }
 
-// DisplayName implements blob.Storage
+// DisplayName implements blob.Storage.
 func (s *FaultyStorage) DisplayName() string {
 	return s.Base.DisplayName()
 }

+ 5 - 4
internal/blobtesting/map.go

@@ -3,12 +3,13 @@ package blobtesting
 import (
 	"bytes"
 	"context"
-	"errors"
 	"sort"
 	"strings"
 	"sync"
 	"time"
 
+	"github.com/pkg/errors"
+
 	"github.com/kopia/kopia/repo/blob"
 )
 
@@ -35,12 +36,12 @@ func (s *mapStorage) GetBlob(ctx context.Context, id blob.ID, offset, length int
 		}
 
 		if int(offset) > len(data) || offset < 0 {
-			return nil, errors.New("invalid offset")
+			return nil, errors.Errorf("invalid offset: %v", offset)
 		}
 
 		data = data[offset:]
 		if int(length) > len(data) {
-			return nil, errors.New("invalid length")
+			return nil, errors.Errorf("invalid length: %v", length)
 		}
 
 		return data[0:length], nil
@@ -73,7 +74,7 @@ func (s *mapStorage) PutBlob(ctx context.Context, id blob.ID, data blob.Bytes) e
 
 	var b bytes.Buffer
 
-	data.WriteTo(&b) //nolint:errcheck
+	data.WriteTo(&b)
 
 	s.data[id] = b.Bytes()
 

+ 1 - 1
internal/blobtesting/verify.go

@@ -63,7 +63,7 @@ func VerifyStorage(ctx context.Context, t testingT, r blob.Storage) {
 }
 
 // AssertConnectionInfoRoundTrips verifies that the ConnectionInfo returned by a given storage can be used to create
-// equivalent storage
+// equivalent storage.
 func AssertConnectionInfoRoundTrips(ctx context.Context, t testingT, s blob.Storage) {
 	t.Helper()
 

+ 1 - 1
internal/buf/pool.go

@@ -155,7 +155,7 @@ func NewPool(ctx context.Context, segmentSize int, poolID string) *Pool {
 	return p
 }
 
-// Close closes the pool
+// Close closes the pool.
 func (p *Pool) Close() {
 	close(p.closed)
 }

+ 2 - 1
internal/buf/pool_metrics.go

@@ -8,7 +8,7 @@ import (
 
 var tagKeyPool = tag.MustNewKey("pool")
 
-// buffer pool metrics
+// buffer pool metrics.
 var (
 	metricPoolAllocatedBuffers = stats.Int64(
 		"kopia/bufferpool/allocated_buffers",
@@ -62,6 +62,7 @@ func aggregateByPool(m stats.Measure, agg *view.Aggregation) *view.View {
 		TagKeys:     []tag.Key{tagKeyPool},
 	}
 }
+
 func init() {
 	if err := view.Register(
 		aggregateByPool(metricPoolAllocatedBytes, view.LastValue()),

+ 1 - 1
internal/buf/pool_test.go

@@ -13,7 +13,7 @@ func TestPool(t *testing.T) {
 	ctx := context.Background()
 
 	// 20 buffers of 1 MB each
-	var a = NewPool(ctx, 1000000, "testing-pool")
+	a := NewPool(ctx, 1000000, "testing-pool")
 	defer a.Close()
 
 	a.AddSegments(20)

+ 5 - 4
internal/diff/diff.go

@@ -63,7 +63,7 @@ func (c *Comparer) compareDirectories(ctx context.Context, dir1, dir2 fs.Directo
 	return c.compareDirectoryEntries(ctx, entries1, entries2, parent)
 }
 
-// nolint:gocyclo,gocognit
+// nolint:gocyclo
 func (c *Comparer) compareEntry(ctx context.Context, e1, e2 fs.Entry, path string) error {
 	// see if we have the same object IDs, which implies identical objects, thanks to content-addressable-storage
 	if h1, ok := e1.(object.HasObjectID); ok {
@@ -265,7 +265,7 @@ func (c *Comparer) compareFiles(ctx context.Context, f1, f2 fs.File, fname strin
 }
 
 func downloadFile(ctx context.Context, f fs.File, fname string) error {
-	if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil {
+	if err := os.MkdirAll(filepath.Dir(fname), 0o700); err != nil {
 		return err
 	}
 
@@ -279,7 +279,8 @@ func downloadFile(ctx context.Context, f fs.File, fname string) error {
 	if err != nil {
 		return err
 	}
-	defer dst.Close() //nolint:errcheck
+
+	defer dst.Close() //nolint:errcheck,gosec
 
 	_, err = iocopy.Copy(dst, src)
 
@@ -287,7 +288,7 @@ func downloadFile(ctx context.Context, f fs.File, fname string) error {
 }
 
 func (c *Comparer) output(msg string, args ...interface{}) {
-	fmt.Fprintf(c.out, msg, args...) //nolint:errcheck
+	fmt.Fprintf(c.out, msg, args...)
 }
 
 // NewComparer creates a comparer for a given repository that will output the results to a given writer.

+ 4 - 3
internal/editor/editor.go

@@ -4,7 +4,6 @@ package editor
 import (
 	"bufio"
 	"context"
-	"errors"
 	"fmt"
 	"io/ioutil"
 	"os"
@@ -13,6 +12,8 @@ import (
 	"runtime"
 	"strings"
 
+	"github.com/pkg/errors"
+
 	"github.com/kopia/kopia/repo/logging"
 )
 
@@ -30,7 +31,7 @@ func EditLoop(ctx context.Context, fname, initial string, parse func(updated str
 	tmpFile := filepath.Join(tmpDir, fname)
 	defer os.RemoveAll(tmpDir) //nolint:errcheck
 
-	if err := ioutil.WriteFile(tmpFile, []byte(initial), 0600); err != nil {
+	if err := ioutil.WriteFile(tmpFile, []byte(initial), 0o600); err != nil {
 		return err
 	}
 
@@ -67,7 +68,7 @@ func readAndStripComments(fname string) (string, error) {
 	if err != nil {
 		return "", err
 	}
-	defer f.Close() //nolint:errcheck
+	defer f.Close() //nolint:errcheck,gosec
 
 	var result []string
 

+ 5 - 5
internal/faketime/faketime.go

@@ -7,7 +7,7 @@ import (
 	"time"
 )
 
-// Frozen returns a function that always returns t
+// Frozen returns a function that always returns t.
 func Frozen(t time.Time) func() time.Time {
 	return func() time.Time {
 		return t
@@ -17,7 +17,7 @@ func Frozen(t time.Time) func() time.Time {
 // AutoAdvance returns a time source function that returns a time equal to
 // 't + ((n - 1) * dt)' wheren n is the number of serialized invocations of
 // the returned function. The returned function will generate a time series of
-// the form [t, t+dt, t+2dt, t+3dt, ...]
+// the form [t, t+dt, t+2dt, t+3dt, ...].
 func AutoAdvance(t time.Time, dt time.Duration) func() time.Time {
 	var mu sync.Mutex
 
@@ -39,12 +39,12 @@ type TimeAdvance struct {
 	base  time.Time
 }
 
-// NewTimeAdvance creates a TimeAdvance with the given start time
+// NewTimeAdvance creates a TimeAdvance with the given start time.
 func NewTimeAdvance(start time.Time) *TimeAdvance {
 	return &TimeAdvance{base: start}
 }
 
-// NowFunc returns a time provider function for t
+// NowFunc returns a time provider function for t.
 func (t *TimeAdvance) NowFunc() func() time.Time {
 	return func() time.Time {
 		dt := atomic.LoadInt64(&t.delta)
@@ -54,7 +54,7 @@ func (t *TimeAdvance) NowFunc() func() time.Time {
 }
 
 // Advance advances t by dt, such that the next call to t.NowFunc()() returns
-// current t + dt
+// current t + dt.
 func (t *TimeAdvance) Advance(dt time.Duration) time.Time {
 	advance := atomic.AddInt64(&t.delta, int64(dt))
 

+ 1 - 1
internal/fshasher/fshasher.go

@@ -18,7 +18,7 @@ import (
 
 var log = logging.GetContextLoggerFunc("kopia/internal/fshasher")
 
-// Hash computes a recursive hash of e using the given hasher h
+// Hash computes a recursive hash of e using the given hasher h.
 func Hash(ctx context.Context, e fs.Entry) ([]byte, error) {
 	h, err := blake2s.New256(nil)
 	if err != nil {

+ 10 - 10
internal/fshasher/fshasher_test.go

@@ -14,18 +14,18 @@ func TestHash(t *testing.T) {
 	const expectDifferentHashes = "Expected different hashes, got the same"
 
 	root := mockfs.NewDirectory()
-	root.AddFile("file1", []byte("foo-bar"), 0444)
+	root.AddFile("file1", []byte("foo-bar"), 0o444)
 
-	d1 := root.AddDir("dir1", 0755)
-	d1.AddFile("d1-f1", []byte("d1-f1-content"), 0644)
+	d1 := root.AddDir("dir1", 0o755)
+	d1.AddFile("d1-f1", []byte("d1-f1-content"), 0o644)
 
 	ensure := require.New(t)
 	ctx := testlogging.Context(t)
 	h1, err := Hash(ctx, root)
 	ensure.NoError(err)
 
-	d2 := root.AddDir("dir2", 0755)
-	d2.AddFile("d1-f1", []byte("d1-f1-content"), 0644)
+	d2 := root.AddDir("dir2", 0o755)
+	d2.AddFile("d1-f1", []byte("d1-f1-content"), 0o644)
 
 	h2, err := Hash(ctx, root)
 	ensure.NoError(err)
@@ -40,23 +40,23 @@ func TestHash(t *testing.T) {
 	ensure.Equal(hd1, hd2, "Expected same hashes, got the different ones")
 
 	// Add an entry to d2
-	d2.AddFile("f2", []byte("f2-content"), 0444)
+	d2.AddFile("f2", []byte("f2-content"), 0o444)
 	hd2, err = Hash(ctx, d2)
 	ensure.NoError(err)
 	ensure.NotEqual(hd1, hd2, expectDifferentHashes)
 
 	// Test different permission attributes for the top directory
 	// d3 is the same as d1, but with different permissions
-	d3 := root.AddDir("dir3", 0700)
-	d3.AddFile("d1-f1", []byte("d1-f1-content"), 0644)
+	d3 := root.AddDir("dir3", 0o700)
+	d3.AddFile("d1-f1", []byte("d1-f1-content"), 0o644)
 	hd3, err := Hash(ctx, d3)
 	ensure.NoError(err)
 	ensure.NotEqual(hd3, hd1, expectDifferentHashes)
 
 	// Test different permission attributes for file
 	// d4 is the same as d2, but with different permissions in d1-f1
-	d4 := root.AddDir("dir4", 0700)
-	d4.AddFile("d1-f1", []byte("d1-f1-content"), 0644)
+	d4 := root.AddDir("dir4", 0o700)
+	d4.AddFile("d1-f1", []byte("d1-f1-content"), 0o644)
 	hd4, err := Hash(ctx, d4)
 	ensure.NoError(err)
 	ensure.NotEqual(hd4, hd1, expectDifferentHashes)

+ 5 - 6
internal/fusemount/fusefs.go

@@ -9,14 +9,12 @@ import (
 	"io/ioutil"
 	"os"
 
-	"github.com/pkg/errors"
-
-	"github.com/kopia/kopia/fs"
-
 	"bazil.org/fuse"
 	fusefs "bazil.org/fuse/fs"
-
+	"github.com/pkg/errors"
 	"golang.org/x/net/context"
+
+	"github.com/kopia/kopia/fs"
 )
 
 type fuseNode struct {
@@ -87,6 +85,7 @@ func (dir *fuseDirectoryNode) ReadDirAll(ctx context.Context) ([]fuse.Dirent, er
 			Name: e.Name(),
 		}
 
+		// nolint:exhaustive
 		switch e.Mode() & os.ModeType {
 		case os.ModeDir:
 			dirent.Type = fuse.DT_Dir
@@ -127,7 +126,7 @@ func newDirectoryNode(dir fs.Directory) fusefs.Node {
 	return &fuseDirectoryNode{fuseNode{dir}}
 }
 
-// NewDirectoryNode returns FUSE Node for a given fs.Directory
+// NewDirectoryNode returns FUSE Node for a given fs.Directory.
 func NewDirectoryNode(dir fs.Directory) fusefs.Node {
 	return newDirectoryNode(dir)
 }

+ 2 - 1
internal/hmac/hmac.go

@@ -4,7 +4,8 @@ package hmac
 import (
 	"crypto/hmac"
 	"crypto/sha256"
-	"errors"
+
+	"github.com/pkg/errors"
 )
 
 // Append computes HMAC-SHA256 checksum for a given block of bytes and appends it.

+ 1 - 1
internal/iocopy/copy.go

@@ -16,7 +16,7 @@ var bufferPool = sync.Pool{
 	},
 }
 
-// Copy is equivalent to io.Copy()
+// Copy is equivalent to io.Copy().
 func Copy(dst io.Writer, src io.Reader) (int64, error) {
 	bufPtr := bufferPool.Get().(*[]byte)
 

+ 9 - 7
internal/logfile/logfile.go

@@ -3,7 +3,6 @@ package logfile
 
 import (
 	"context"
-	"errors"
 	"fmt"
 	"io/ioutil"
 	"math"
@@ -15,6 +14,7 @@ import (
 	"time"
 
 	logging "github.com/op/go-logging"
+	"github.com/pkg/errors"
 	"gopkg.in/alecthomas/kingpin.v2"
 
 	"github.com/kopia/kopia/cli"
@@ -40,8 +40,10 @@ var (
 
 var log = repologging.GetContextLoggerFunc("kopia")
 
-const logFileNamePrefix = "kopia-"
-const logFileNameSuffix = ".log"
+const (
+	logFileNamePrefix = "kopia-"
+	logFileNameSuffix = ".log"
+)
 
 // Initialize is invoked as part of command execution to create log file just before it's needed.
 func Initialize(ctx *kingpin.ParseContext) error {
@@ -74,8 +76,8 @@ func Initialize(ctx *kingpin.ParseContext) error {
 		logFileDir := filepath.Dir(logFileName)
 		logFileBaseName := filepath.Base(logFileName)
 
-		if err := os.MkdirAll(logFileDir, 0700); err != nil {
-			fmt.Fprintln(os.Stderr, "Unable to create logs directory:", err) // nolint:errcheck
+		if err := os.MkdirAll(logFileDir, 0o700); err != nil {
+			fmt.Fprintln(os.Stderr, "Unable to create logs directory:", err)
 		}
 
 		logBackends = append(
@@ -126,7 +128,7 @@ func sweepLogDir(ctx context.Context, dirname string, maxCount int, maxAge time.
 		return entries[i].ModTime().After(entries[j].ModTime())
 	})
 
-	var cnt = 0
+	cnt := 0
 
 	for _, e := range entries {
 		if !strings.HasPrefix(e.Name(), logFileNamePrefix) {
@@ -180,7 +182,7 @@ func (w *onDemandBackend) Log(level logging.Level, depth int, rec *logging.Recor
 		lf := filepath.Join(w.logDir, w.logFileBaseName)
 		f, err := os.Create(lf)
 		if err != nil {
-			fmt.Fprintf(os.Stderr, "unable to open log file: %v\n", err) //nolint:errcheck
+			fmt.Fprintf(os.Stderr, "unable to open log file: %v\n", err)
 			return
 		}
 

+ 3 - 3
internal/mockfs/mockfs.go

@@ -13,7 +13,7 @@ import (
 	"github.com/kopia/kopia/fs"
 )
 
-// ReaderSeekerCloser implements io.Reader, io.Seeker and io.Closer
+// ReaderSeekerCloser implements io.Reader, io.Seeker and io.Closer.
 type ReaderSeekerCloser interface {
 	io.Reader
 	io.Seeker
@@ -64,7 +64,7 @@ func (e entry) Owner() fs.OwnerInfo {
 	return e.owner
 }
 
-// Directory is mock in-memory implementation of fs.Directory
+// Directory is mock in-memory implementation of fs.Directory.
 type Directory struct {
 	entry
 
@@ -246,7 +246,7 @@ func NewDirectory() *Directory {
 	return &Directory{
 		entry: entry{
 			name: "<root>",
-			mode: 0777 | os.ModeDir, // nolint:gomnd
+			mode: 0o777 | os.ModeDir, // nolint:gomnd
 		},
 	}
 }

+ 1 - 1
internal/parallelwork/parallel_work_queue.go

@@ -24,7 +24,7 @@ type Queue struct {
 	ProgressCallback func(enqueued, active, completed int64)
 }
 
-// CallbackFunc is a callback function
+// CallbackFunc is a callback function.
 type CallbackFunc func() error
 
 // EnqueueFront adds the work to the front of the queue.

+ 2 - 2
internal/remoterepoapi/remoterepoapi.go

@@ -9,7 +9,7 @@ import (
 )
 
 // Parameters encapsulates all parameters for repository.
-// returned by /api/v1/repo/parameters
+// returned by /api/v1/repo/parameters.
 type Parameters struct {
 	HashFunction string `json:"hash"`
 	HMACSecret   []byte `json:"hmacSecret"`
@@ -23,7 +23,7 @@ func (p *Parameters) GetHashFunction() string { return p.HashFunction }
 // GetHMACSecret returns the HMAC secret for the remote repository.
 func (p *Parameters) GetHMACSecret() []byte { return p.HMACSecret }
 
-// ManifestWithMetadata represents manifest payload and metadata
+// ManifestWithMetadata represents manifest payload and metadata.
 type ManifestWithMetadata struct {
 	Payload  json.RawMessage         `json:"payload"`
 	Metadata *manifest.EntryMetadata `json:"metadata"`

+ 1 - 1
internal/repotesting/repotesting.go

@@ -86,7 +86,7 @@ func (e *Environment) Setup(t *testing.T, opts ...func(*repo.NewRepositoryOption
 	return e
 }
 
-// Close closes testing environment
+// Close closes testing environment.
 func (e *Environment) Close(ctx context.Context, t *testing.T) {
 	if err := e.Repository.Close(ctx); err != nil {
 		t.Fatalf("unable to close: %v", err)

+ 4 - 4
internal/repotesting/repotesting_test.go

@@ -61,15 +61,15 @@ func TestTimeFuncWiring(t *testing.T) {
 
 	// verify wiring for the manifest layer
 	nt = ft.Advance(3 * time.Minute)
+
 	labels := map[string]string{"l1": "v1", "l2": "v2", "type": "my-manifest"}
-	mid, err := r.Manifests.Put(ctx, labels, "manifest content")
 
+	mid, err := r.Manifests.Put(ctx, labels, "manifest content")
 	if err != nil {
 		t.Fatal("failed to put manifest:", err)
 	}
 
 	meta, err := r.Manifests.GetMetadata(ctx, mid)
-
 	if err != nil {
 		t.Fatal("failed to get manifest metadata:", err)
 	}
@@ -78,7 +78,7 @@ func TestTimeFuncWiring(t *testing.T) {
 		t.Errorf("manifest time does not match, got %v, want %v", got, want)
 	}
 
-	const defaultPermissions = 0777
+	const defaultPermissions = 0o777
 
 	// verify wiring for the snapshot layer
 	sourceDir := mockfs.NewDirectory()
@@ -87,8 +87,8 @@ func TestTimeFuncWiring(t *testing.T) {
 	nt = ft.Advance(1 * time.Hour)
 	u := snapshotfs.NewUploader(r)
 	policyTree := policy.BuildTree(nil, policy.DefaultPolicy)
-	s1, err := u.Upload(ctx, sourceDir, policyTree, snapshot.SourceInfo{})
 
+	s1, err := u.Upload(ctx, sourceDir, policyTree, snapshot.SourceInfo{})
 	if err != nil {
 		t.Fatal("failed to create snapshot:", err)
 	}

+ 1 - 3
internal/retry/retry_test.go

@@ -9,9 +9,7 @@ import (
 	"github.com/kopia/kopia/internal/testlogging"
 )
 
-var (
-	errRetriable = errors.New("retriable")
-)
+var errRetriable = errors.New("retriable")
 
 func isRetriable(e error) bool {
 	return e == errRetriable

+ 2 - 1
internal/scrubber/scrub_sensitive.go

@@ -7,8 +7,9 @@ import (
 )
 
 // ScrubSensitiveData returns a copy of a given value with sensitive fields scrubbed.
-// Fields are marked as sensitive with truct field tag `kopia:"sensitive"`
+// Fields are marked as sensitive with truct field tag `kopia:"sensitive"`.
 func ScrubSensitiveData(v reflect.Value) reflect.Value {
+	// nolint:exhaustive
 	switch v.Kind() {
 	case reflect.Ptr:
 		return ScrubSensitiveData(v.Elem()).Addr()

+ 2 - 1
internal/server/api_content.go

@@ -2,6 +2,7 @@ package server
 
 import (
 	"context"
+	"errors"
 	"io/ioutil"
 	"net/http"
 
@@ -21,7 +22,7 @@ func (s *Server) handleContentGet(ctx context.Context, r *http.Request) (interfa
 	cid := content.ID(mux.Vars(r)["contentID"])
 
 	data, err := dr.Content.GetContent(ctx, cid)
-	if err == content.ErrContentNotFound {
+	if errors.Is(err, content.ErrContentNotFound) {
 		return nil, notFoundError("content not found")
 	}
 

+ 4 - 2
internal/server/api_manifest.go

@@ -3,6 +3,7 @@ package server
 import (
 	"context"
 	"encoding/json"
+	"errors"
 	"net/http"
 
 	"github.com/gorilla/mux"
@@ -21,7 +22,7 @@ func (s *Server) handleManifestGet(ctx context.Context, r *http.Request) (interf
 	var data json.RawMessage
 
 	md, err := s.rep.GetManifest(ctx, mid, &data)
-	if err == manifest.ErrNotFound {
+	if errors.Is(err, manifest.ErrNotFound) {
 		return nil, notFoundError("manifest not found")
 	}
 
@@ -43,7 +44,7 @@ func (s *Server) handleManifestDelete(ctx context.Context, r *http.Request) (int
 	mid := manifest.ID(mux.Vars(r)["manifestID"])
 
 	err := s.rep.DeleteManifest(ctx, mid)
-	if err == manifest.ErrNotFound {
+	if errors.Is(err, manifest.ErrNotFound) {
 		return nil, notFoundError("manifest not found")
 	}
 
@@ -53,6 +54,7 @@ func (s *Server) handleManifestDelete(ctx context.Context, r *http.Request) (int
 
 	return &serverapi.Empty{}, nil
 }
+
 func (s *Server) handleManifestList(ctx context.Context, r *http.Request) (interface{}, *apiError) {
 	// password already validated by a wrapper, no need to check here.
 	userAtHost, _, _ := r.BasicAuth()

+ 2 - 1
internal/server/api_object_get.go

@@ -1,6 +1,7 @@
 package server
 
 import (
+	"errors"
 	"net/http"
 	"time"
 
@@ -19,7 +20,7 @@ func (s *Server) handleObjectGet(w http.ResponseWriter, r *http.Request) {
 	}
 
 	obj, err := s.rep.OpenObject(r.Context(), oid)
-	if err == object.ErrObjectNotFound {
+	if errors.Is(err, object.ErrObjectNotFound) {
 		http.Error(w, "object not found", http.StatusNotFound)
 		return
 	}

+ 2 - 1
internal/server/api_policies.go

@@ -3,6 +3,7 @@ package server
 import (
 	"context"
 	"encoding/json"
+	"errors"
 	"net/http"
 	"net/url"
 
@@ -51,7 +52,7 @@ func getPolicyTargetFromURL(u *url.URL) snapshot.SourceInfo {
 
 func (s *Server) handlePolicyGet(ctx context.Context, r *http.Request) (interface{}, *apiError) {
 	pol, err := policy.GetDefinedPolicy(ctx, s.rep, getPolicyTargetFromURL(r.URL))
-	if err == policy.ErrPolicyNotFound {
+	if errors.Is(err, policy.ErrPolicyNotFound) {
 		return nil, requestError(serverapi.ErrorNotFound, "policy not found")
 	}
 

+ 18 - 17
internal/server/htmlui_fallback.go

@@ -51,32 +51,32 @@ type bindataFileInfo struct {
 	modTime time.Time
 }
 
-// Name return file name
+// Name return file name.
 func (fi bindataFileInfo) Name() string {
 	return fi.name
 }
 
-// Size return file size
+// Size return file size.
 func (fi bindataFileInfo) Size() int64 {
 	return fi.size
 }
 
-// Mode return file mode
+// Mode return file mode.
 func (fi bindataFileInfo) Mode() os.FileMode {
 	return fi.mode
 }
 
-// Mode return file modify time
+// Mode return file modify time.
 func (fi bindataFileInfo) ModTime() time.Time {
 	return fi.modTime
 }
 
-// IsDir return file whether a directory
+// IsDir return file whether a directory.
 func (fi bindataFileInfo) IsDir() bool {
 	return fi.mode&os.ModeDir != 0
 }
 
-// Sys return file is sys mode
+// Sys return file is sys mode.
 func (fi bindataFileInfo) Sys() interface{} {
 	return nil
 }
@@ -90,7 +90,7 @@ type assetFile struct {
 
 type assetOperator struct{}
 
-// Open implement http.FileSystem interface
+// Open implement http.FileSystem interface.
 func (f *assetOperator) Open(name string) (http.File, error) {
 	var err error
 	if len(name) > 0 && name[0] == '/' {
@@ -124,12 +124,12 @@ func (f *assetOperator) Open(name string) (http.File, error) {
 	}
 }
 
-// Close no need do anything
+// Close no need do anything.
 func (f *assetFile) Close() error {
 	return nil
 }
 
-// Readdir read dir's children file info
+// Readdir read dir's children file info.
 func (f *assetFile) Readdir(count int) ([]os.FileInfo, error) {
 	if len(f.childInfos) == 0 {
 		return nil, os.ErrNotExist
@@ -145,7 +145,7 @@ func (f *assetFile) Readdir(count int) ([]os.FileInfo, error) {
 	return f.childInfos[offset : offset+count], nil
 }
 
-// Stat read file info from asset item
+// Stat read file info from asset item.
 func (f *assetFile) Stat() (os.FileInfo, error) {
 	if len(f.childInfos) != 0 {
 		return newDirFileInfo(f.name), nil
@@ -153,16 +153,17 @@ func (f *assetFile) Stat() (os.FileInfo, error) {
 	return AssetInfo(f.name)
 }
 
-// newDirFileInfo return default dir file info
+// newDirFileInfo return default dir file info.
 func newDirFileInfo(name string) os.FileInfo {
 	return &bindataFileInfo{
 		name:    name,
 		size:    0,
 		mode:    os.FileMode(2147484068), // equal os.FileMode(0644)|os.ModeDir
-		modTime: time.Time{}}
+		modTime: time.Time{},
+	}
 }
 
-// AssetFile return a http.FileSystem instance that data backend by asset
+// AssetFile return a http.FileSystem instance that data backend by asset.
 func AssetFile() http.FileSystem {
 	return &assetOperator{}
 }
@@ -283,10 +284,10 @@ type bintree struct {
 }
 
 var _bintree = &bintree{nil, map[string]*bintree{
-	"index.html": &bintree{indexHtml, map[string]*bintree{}},
+	"index.html": {indexHtml, map[string]*bintree{}},
 }}
 
-// RestoreAsset restores an asset under the given directory
+// RestoreAsset restores an asset under the given directory.
 func RestoreAsset(dir, name string) error {
 	data, err := Asset(name)
 	if err != nil {
@@ -296,7 +297,7 @@ func RestoreAsset(dir, name string) error {
 	if err != nil {
 		return err
 	}
-	err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
+	err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0o755))
 	if err != nil {
 		return err
 	}
@@ -311,7 +312,7 @@ func RestoreAsset(dir, name string) error {
 	return nil
 }
 
-// RestoreAssets restores an asset under the given directory recursively
+// RestoreAssets restores an asset under the given directory recursively.
 func RestoreAssets(dir, name string) error {
 	children, err := AssetDir(name)
 	// File

+ 1 - 1
internal/server/source_manager.go

@@ -24,7 +24,7 @@ const (
 // - READY - waiting for next snapshot
 // - PAUSED - inactive
 // - FAILED - inactive
-// - UPLOADING - uploading a snapshot
+// - UPLOADING - uploading a snapshot.
 type sourceManager struct {
 	snapshotfs.NullUploadProgress
 

+ 1 - 1
internal/serverapi/serverapi.go

@@ -93,7 +93,7 @@ type MultipleSourceActionResponse struct {
 	Sources map[string]SourceActionResponse `json:"sources"`
 }
 
-// CreateRepositoryRequest contains request to create a repository in a given storage
+// CreateRepositoryRequest contains request to create a repository in a given storage.
 type CreateRepositoryRequest struct {
 	ConnectRepositoryRequest
 	NewRepositoryOptions repo.NewRepositoryOptions `json:"options"`

+ 2 - 2
internal/stats/countsum.go

@@ -6,14 +6,14 @@ package stats
 
 import "sync/atomic"
 
-// CountSum holds sum and count values
+// CountSum holds sum and count values.
 type CountSum struct {
 	sum   int64
 	count uint32
 }
 
 // Add adds size to s and returns approximate values for the current count
-// and total bytes
+// and total bytes.
 func (s *CountSum) Add(size int64) (count uint32, sum int64) {
 	return atomic.AddUint32(&s.count, 1), atomic.AddInt64(&s.sum, size)
 }

+ 2 - 2
internal/stats/countsum_mutex.go

@@ -8,7 +8,7 @@ import (
 	"sync"
 )
 
-// CountSum holds sum and count values
+// CountSum holds sum and count values.
 type CountSum struct {
 	mu    sync.Mutex
 	sum   int64
@@ -16,7 +16,7 @@ type CountSum struct {
 }
 
 // Add adds size to s and returns approximate values for the current count
-// and total bytes
+// and total bytes.
 func (s *CountSum) Add(size int64) (count uint32, sum int64) {
 	s.mu.Lock()
 	defer s.mu.Unlock()

+ 5 - 1
internal/testlogging/ctx.go

@@ -17,7 +17,7 @@ type testingT interface {
 // Level specifies log level.
 type Level int
 
-// log levels
+// log levels.
 const (
 	LevelDebug Level = iota
 	LevelInfo
@@ -40,6 +40,7 @@ func (l *testLogger) Debugf(msg string, args ...interface{}) {
 	l.t.Helper()
 	l.t.Logf(l.prefix+msg, args...)
 }
+
 func (l *testLogger) Infof(msg string, args ...interface{}) {
 	if l.minLevel > LevelInfo {
 		return
@@ -48,6 +49,7 @@ func (l *testLogger) Infof(msg string, args ...interface{}) {
 	l.t.Helper()
 	l.t.Logf(l.prefix+msg, args...)
 }
+
 func (l *testLogger) Warningf(msg string, args ...interface{}) {
 	if l.minLevel > LevelWarning {
 		return
@@ -56,6 +58,7 @@ func (l *testLogger) Warningf(msg string, args ...interface{}) {
 	l.t.Helper()
 	l.t.Logf(l.prefix+"warning: "+msg, args...)
 }
+
 func (l *testLogger) Errorf(msg string, args ...interface{}) {
 	if l.minLevel > LevelError {
 		return
@@ -64,6 +67,7 @@ func (l *testLogger) Errorf(msg string, args ...interface{}) {
 	l.t.Helper()
 	l.t.Errorf(l.prefix+msg, args...)
 }
+
 func (l *testLogger) Fatalf(msg string, args ...interface{}) {
 	if l.minLevel > LevelFatal {
 		return

+ 8 - 8
internal/testutil/retriable_t.go

@@ -30,56 +30,56 @@ func (t *RetriableT) maybeSuppressAndSkip(cnt int32) {
 	}
 }
 
-// Fail wraps testing.T.Fail()
+// Fail wraps testing.T.Fail().
 func (t *RetriableT) Fail() {
 	t.T.Helper()
 	t.maybeSuppressAndSkip(1)
 	t.T.Fail()
 }
 
-// FailNow wraps testing.T.FailNow()
+// FailNow wraps testing.T.FailNow().
 func (t *RetriableT) FailNow() {
 	t.T.Helper()
 	t.maybeSuppressAndSkip(1)
 	t.T.FailNow()
 }
 
-// Error wraps testing.T.Error()
+// Error wraps testing.T.Error().
 func (t *RetriableT) Error(args ...interface{}) {
 	t.T.Helper()
 	t.maybeSuppressAndSkip(1)
 	t.T.Error(args...)
 }
 
-// Errorf wraps testing.T.Errorf()
+// Errorf wraps testing.T.Errorf().
 func (t *RetriableT) Errorf(msg string, args ...interface{}) {
 	t.T.Helper()
 	t.maybeSuppressAndSkip(1)
 	t.T.Errorf(msg, args...)
 }
 
-// Fatal wraps testing.T.Fatal()
+// Fatal wraps testing.T.Fatal().
 func (t *RetriableT) Fatal(args ...interface{}) {
 	t.T.Helper()
 	t.maybeSuppressAndSkip(1)
 	t.T.Fatal(args...)
 }
 
-// Fatalf wraps testing.T.Fatalf()
+// Fatalf wraps testing.T.Fatalf().
 func (t *RetriableT) Fatalf(msg string, args ...interface{}) {
 	t.T.Helper()
 	t.maybeSuppressAndSkip(1)
 	t.T.Fatalf(msg, args...)
 }
 
-// Skip wraps testing.T.Skip()
+// Skip wraps testing.T.Skip().
 func (t *RetriableT) Skip(args ...interface{}) {
 	t.T.Helper()
 	t.maybeSuppressAndSkip(0)
 	t.T.Skip(args...)
 }
 
-// Skipf wraps testing.T.Skipf()
+// Skipf wraps testing.T.Skipf().
 func (t *RetriableT) Skipf(msg string, args ...interface{}) {
 	t.T.Helper()
 	t.maybeSuppressAndSkip(0)

+ 1 - 1
internal/testutil/retriable_t_test.go

@@ -6,7 +6,7 @@ import (
 	"github.com/kopia/kopia/internal/testutil"
 )
 
-// different ways a test can fail
+// different ways a test can fail.
 var cases = map[string]func(r *testutil.RetriableT){
 	"Fail":    func(r *testutil.RetriableT) { r.Fail() },
 	"FailNow": func(r *testutil.RetriableT) { r.FailNow() },

+ 1 - 1
internal/throttle/round_tripper_test.go

@@ -41,7 +41,7 @@ func (fp *fakePool) AddReader(r io.ReadCloser) (io.ReadCloser, error) {
 	return r, nil
 }
 
-//nolint:gocyclo,gocognit
+//nolint:gocyclo
 func TestRoundTripper(t *testing.T) {
 	downloadBody := ioutil.NopCloser(bytes.NewReader([]byte("data1")))
 	uploadBody := ioutil.NopCloser(bytes.NewReader([]byte("data1")))

+ 4 - 2
internal/units/units.go

@@ -6,8 +6,10 @@ import (
 	"strings"
 )
 
-var base10UnitPrefixes = []string{"", "K", "M", "G", "T"}
-var base2UnitPrefixes = []string{"", "Ki", "Mi", "Gi", "Ti"}
+var (
+	base10UnitPrefixes = []string{"", "K", "M", "G", "T"}
+	base2UnitPrefixes  = []string{"", "Ki", "Mi", "Gi", "Ti"}
+)
 
 func niceNumber(f float64) string {
 	return strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.1f", f), "0"), ".")

+ 5 - 3
internal/webdavmount/webdavmount.go

@@ -16,9 +16,11 @@ import (
 
 var log = logging.GetContextLoggerFunc("kopia/webdavmount")
 
-var _ os.FileInfo = webdavFileInfo{}
-var _ webdav.File = (*webdavFile)(nil)
-var _ webdav.File = (*webdavDir)(nil)
+var (
+	_ os.FileInfo = webdavFileInfo{}
+	_ webdav.File = (*webdavFile)(nil)
+	_ webdav.File = (*webdavDir)(nil)
+)
 
 type webdavFile struct {
 	ctx   context.Context

+ 1 - 2
main.go

@@ -12,14 +12,13 @@ package main
 import (
 	"os"
 
+	gologging "github.com/op/go-logging"
 	"gopkg.in/alecthomas/kingpin.v2"
 
 	"github.com/kopia/kopia/cli"
 	"github.com/kopia/kopia/internal/logfile"
 	"github.com/kopia/kopia/repo"
 	"github.com/kopia/kopia/repo/logging"
-
-	gologging "github.com/op/go-logging"
 )
 
 const usageTemplate = `{{define "FormatCommand"}}\

+ 2 - 2
repo/api_server_repository.go

@@ -231,11 +231,11 @@ func ConnectAPIServer(ctx context.Context, configFile string, si *APIServerInfo,
 		return err
 	}
 
-	if err = os.MkdirAll(filepath.Dir(configFile), 0700); err != nil {
+	if err = os.MkdirAll(filepath.Dir(configFile), 0o700); err != nil {
 		return errors.Wrap(err, "unable to create config directory")
 	}
 
-	if err = ioutil.WriteFile(configFile, d, 0600); err != nil {
+	if err = ioutil.WriteFile(configFile, d, 0o600); err != nil {
 		return errors.Wrap(err, "unable to write config file")
 	}
 

+ 5 - 3
repo/blob/azure/azure_storage.go

@@ -105,6 +105,7 @@ func isRetriableError(err error) bool {
 	}
 
 	// https://pkg.go.dev/gocloud.dev/gcerrors?tab=doc#ErrorCode
+	// nolint:exhaustive
 	switch gcerrors.Code(err) {
 	case gcerrors.Internal:
 		return true
@@ -116,6 +117,7 @@ func isRetriableError(err error) bool {
 }
 
 func translateError(err error) error {
+	// nolint:exhaustive
 	switch gcerrors.Code(err) {
 	case gcerrors.OK:
 		return nil
@@ -155,7 +157,7 @@ func (az *azStorage) PutBlob(ctx context.Context, b blob.ID, data blob.Bytes) er
 	return translateError(writer.Close())
 }
 
-// DeleteBlob deletes azure blob from container with given ID
+// DeleteBlob deletes azure blob from container with given ID.
 func (az *azStorage) DeleteBlob(ctx context.Context, b blob.ID) error {
 	attempt := func() (interface{}, error) {
 		return nil, az.bucket.Delete(ctx, az.getObjectNameString(b))
@@ -164,7 +166,7 @@ func (az *azStorage) DeleteBlob(ctx context.Context, b blob.ID) error {
 	err = translateError(err)
 
 	// don't return error if blob is already deleted
-	if err == blob.ErrBlobNotFound {
+	if errors.Is(err, blob.ErrBlobNotFound) {
 		return nil
 	}
 
@@ -175,7 +177,7 @@ func (az *azStorage) getObjectNameString(b blob.ID) string {
 	return az.Prefix + string(b)
 }
 
-// ListBlobs list azure blobs with given prefix
+// ListBlobs list azure blobs with given prefix.
 func (az *azStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(blob.Metadata) error) error {
 	// create list iterator
 	li := az.bucket.List(&gblob.ListOptions{Prefix: az.getObjectNameString(prefix)})

+ 3 - 3
repo/blob/azure/azure_storage_test.go

@@ -72,16 +72,16 @@ func TestAzureStorage(t *testing.T) {
 	createContainer(t, container, storageAccount, storageKey)
 
 	data := make([]byte, 8)
-	rand.Read(data) //nolint:errcheck
+	rand.Read(data)
 
 	ctx := context.Background()
+
 	st, err := azure.New(ctx, &azure.Options{
 		Container:      container,
 		StorageAccount: storageAccount,
 		StorageKey:     storageKey,
 		Prefix:         fmt.Sprintf("test-%v-%x-", time.Now().Unix(), data),
 	})
-
 	if err != nil {
 		t.Fatalf("unable to connect to Azure: %v", err)
 	}
@@ -113,12 +113,12 @@ func TestAzureStorageInvalidBlob(t *testing.T) {
 	storageKey := getEnvOrSkip(t, testStorageKeyEnv)
 
 	ctx := context.Background()
+
 	st, err := azure.New(ctx, &azure.Options{
 		Container:      container,
 		StorageAccount: storageAccount,
 		StorageKey:     storageKey,
 	})
-
 	if err != nil {
 		t.Fatalf("unable to connect to Azure container: %v", err)
 	}

+ 3 - 3
repo/blob/b2/b2_storage.go

@@ -187,7 +187,7 @@ func (s *b2Storage) DeleteBlob(ctx context.Context, id blob.ID) error {
 	_, err := s.bucket.HideFile(fileName)
 	err = translateError(err)
 
-	if err == blob.ErrBlobNotFound {
+	if errors.Is(err, blob.ErrBlobNotFound) {
 		// Deleting failed because it already is deleted? Fine.
 		return nil
 	}
@@ -261,7 +261,7 @@ func toBandwidth(bytesPerSecond int) iothrottler.Bandwidth {
 	return iothrottler.Bandwidth(bytesPerSecond) * iothrottler.BytesPerSecond
 }
 
-// New creates new B2-backed storage with specified options:
+// New creates new B2-backed storage with specified options.
 func New(ctx context.Context, opt *Options) (blob.Storage, error) {
 	if opt.BucketName == "" {
 		return nil, errors.New("bucket name must be specified")
@@ -281,7 +281,7 @@ func New(ctx context.Context, opt *Options) (blob.Storage, error) {
 	}
 
 	if bucket == nil {
-		return nil, fmt.Errorf("bucket not found: %s", opt.BucketName)
+		return nil, errors.Errorf("bucket not found: %s", opt.BucketName)
 	}
 
 	return &b2Storage{

+ 2 - 4
repo/blob/b2/b2_storage_test.go

@@ -35,9 +35,8 @@ func TestB2Storage(t *testing.T) {
 	keyID := getEnvOrSkip(t, testKeyIDEnv)
 	key := getEnvOrSkip(t, testKeyEnv)
 	testutil.Retry(t, func(t *testutil.RetriableT) {
-
 		data := make([]byte, 8)
-		rand.Read(data) //nolint:errcheck
+		rand.Read(data)
 
 		ctx := context.Background()
 		st, err := b2.New(ctx, &b2.Options{
@@ -46,7 +45,6 @@ func TestB2Storage(t *testing.T) {
 			Key:        key,
 			Prefix:     fmt.Sprintf("test-%v-%x-", time.Now().Unix(), data),
 		})
-
 		if err != nil {
 			t.Fatalf("unable to build b2 storage: %v", err)
 		}
@@ -79,12 +77,12 @@ func TestB2StorageInvalidBlob(t *testing.T) {
 	key := getEnvOrSkip(t, testKeyEnv)
 
 	ctx := context.Background()
+
 	st, err := b2.New(ctx, &b2.Options{
 		BucketName: bucket,
 		KeyID:      keyID,
 		Key:        key,
 	})
-
 	if err != nil {
 		t.Fatalf("unable to build b2 storage: %v", err)
 	}

+ 7 - 9
repo/blob/filesystem/filesystem_storage.go

@@ -26,8 +26,8 @@ const (
 	fsStorageType        = "filesystem"
 	fsStorageChunkSuffix = ".f"
 
-	fsDefaultFileMode os.FileMode = 0600
-	fsDefaultDirMode  os.FileMode = 0700
+	fsDefaultFileMode os.FileMode = 0o600
+	fsDefaultDirMode  os.FileMode = 0o700
 )
 
 var fsDefaultShards = []int{3, 3}
@@ -67,7 +67,7 @@ func isRetriable(err error) bool {
 		return true
 	}
 
-	return err == errRetriableInvalidLength
+	return errors.Is(err, errRetriableInvalidLength)
 }
 
 func (fs *fsImpl) GetBlobFromPath(ctx context.Context, dirPath, path string, offset, length int64) ([]byte, error) {
@@ -77,7 +77,7 @@ func (fs *fsImpl) GetBlobFromPath(ctx context.Context, dirPath, path string, off
 			return nil, err
 		}
 
-		defer f.Close() //nolint:errcheck
+		defer f.Close() //nolint:errcheck,gosec
 
 		if length < 0 {
 			return ioutil.ReadAll(f)
@@ -108,7 +108,6 @@ func (fs *fsImpl) GetBlobFromPath(ctx context.Context, dirPath, path string, off
 
 		return b, nil
 	}, isRetriable)
-
 	if err != nil {
 		if os.IsNotExist(err) {
 			return nil, blob.ErrBlobNotFound
@@ -121,7 +120,7 @@ func (fs *fsImpl) GetBlobFromPath(ctx context.Context, dirPath, path string, off
 }
 
 func (fs *fsImpl) GetMetadataFromPath(ctx context.Context, dirPath, path string) (blob.Metadata, error) {
-	fi, err := os.Stat(path) //nolint:gosec
+	fi, err := os.Stat(path)
 	if err != nil {
 		return blob.Metadata{}, err
 	}
@@ -193,13 +192,13 @@ func (fs *fsImpl) PutBlobInPath(ctx context.Context, dirPath, path string, data
 func (fs *fsImpl) createTempFileAndDir(tempFile string) (*os.File, error) {
 	flags := os.O_CREATE | os.O_WRONLY | os.O_EXCL
 
-	f, err := os.OpenFile(tempFile, flags, fs.fileMode())
+	f, err := os.OpenFile(tempFile, flags, fs.fileMode()) //nolint:gosec
 	if os.IsNotExist(err) {
 		if err = os.MkdirAll(filepath.Dir(tempFile), fs.dirMode()); err != nil {
 			return nil, errors.Wrap(err, "cannot create directory")
 		}
 
-		return os.OpenFile(tempFile, flags, fs.fileMode())
+		return os.OpenFile(tempFile, flags, fs.fileMode()) //nolint:gosec
 	}
 
 	return f, err
@@ -221,7 +220,6 @@ func (fs *fsImpl) ReadDir(ctx context.Context, dirname string) ([]os.FileInfo, e
 		v, err := ioutil.ReadDir(dirname)
 		return v, err
 	}, isRetriable)
-
 	if err != nil {
 		return nil, err
 	}

+ 1 - 3
repo/blob/filesystem/filesystem_storage_test.go

@@ -8,11 +8,10 @@ import (
 	"testing"
 	"time"
 
-	"github.com/kopia/kopia/repo/blob"
-
 	"github.com/kopia/kopia/internal/blobtesting"
 	"github.com/kopia/kopia/internal/gather"
 	"github.com/kopia/kopia/internal/testlogging"
+	"github.com/kopia/kopia/repo/blob"
 )
 
 func TestFileStorage(t *testing.T) {
@@ -106,7 +105,6 @@ func TestFileStorageConcurrency(t *testing.T) {
 	st, err := New(ctx, &Options{
 		Path: path,
 	})
-
 	if err != nil {
 		t.Fatal(err)
 	}

+ 5 - 4
repo/blob/gcs/gcs_storage.go

@@ -8,6 +8,7 @@ import (
 	"io/ioutil"
 	"time"
 
+	gcsclient "cloud.google.com/go/storage"
 	"github.com/efarrer/iothrottler"
 	"github.com/pkg/errors"
 	"golang.org/x/oauth2"
@@ -20,8 +21,6 @@ import (
 	"github.com/kopia/kopia/internal/retry"
 	"github.com/kopia/kopia/internal/throttle"
 	"github.com/kopia/kopia/repo/blob"
-
-	gcsclient "cloud.google.com/go/storage"
 )
 
 const (
@@ -89,6 +88,7 @@ func (gcs *gcsStorage) GetMetadata(ctx context.Context, b blob.ID) (blob.Metadat
 
 	return v.(blob.Metadata), nil
 }
+
 func exponentialBackoff(ctx context.Context, desc string, att retry.AttemptFunc) (interface{}, error) {
 	return retry.WithExponentialBackoff(ctx, desc, att, isRetriableError)
 }
@@ -120,6 +120,7 @@ func translateError(err error) error {
 		return errors.Wrap(err, "unexpected GCS error")
 	}
 }
+
 func (gcs *gcsStorage) PutBlob(ctx context.Context, b blob.ID, data blob.Bytes) error {
 	ctx, cancel := context.WithCancel(ctx)
 
@@ -166,7 +167,7 @@ func (gcs *gcsStorage) DeleteBlob(ctx context.Context, b blob.ID) error {
 	_, err := exponentialBackoff(ctx, fmt.Sprintf("DeleteBlob(%q)", b), attempt)
 	err = translateError(err)
 
-	if err == blob.ErrBlobNotFound {
+	if errors.Is(err, blob.ErrBlobNotFound) {
 		return nil
 	}
 
@@ -195,7 +196,7 @@ func (gcs *gcsStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback f
 		oa, err = lst.Next()
 	}
 
-	if err != iterator.Done {
+	if !errors.Is(err, iterator.Done) {
 		return err
 	}
 

Algunos archivos no se mostraron porque demasiados archivos cambiaron en este cambio