فهرست منبع

lint: upgraded to 1.22.2 and make lint issues a build failure

fixed or silenced linter warnings, mostly due to magic numeric constants
Jarek Kowalski 5 سال پیش
والد
کامیت
ac70a38101
85فایلهای تغییر یافته به همراه398 افزوده شده و 232 حذف شده
  1. 27 11
      .golangci.yml
  2. 3 2
      Makefile
  3. 7 2
      cli/cli_progress.go
  4. 1 1
      cli/command_benchmark_compression.go
  5. 4 1
      cli/command_benchmark_splitters.go
  6. 6 1
      cli/command_content_rewrite.go
  7. 2 2
      cli/command_mount_browse.go
  8. 1 0
      cli/command_mount_webdav.go
  9. 8 0
      cli/command_policy_set.go
  10. 1 1
      cli/command_repository_connect.go
  11. 1 1
      cli/command_repository_connect_from_config.go
  12. 7 7
      cli/command_server_start.go
  13. 1 1
      cli/command_snapshot_create.go
  14. 7 7
      cli/command_snapshot_estimate.go
  15. 1 1
      cli/command_snapshot_verify.go
  16. 5 4
      fs/cachefs/cache.go
  17. 1 0
      fs/cachefs/cache_test.go
  18. 6 2
      fs/localfs/copy.go
  19. 16 4
      fs/localfs/local_fs.go
  20. 1 0
      internal/blobtesting/map.go
  21. 2 1
      internal/diff/diff.go
  22. 3 3
      internal/editor/editor.go
  23. 5 4
      internal/fshasher/fshasher.go
  24. 2 0
      internal/ignore/ignore.go
  25. 8 5
      internal/mockfs/mockfs.go
  26. 4 2
      internal/parallelwork/parallel_work_queue.go
  27. 1 0
      internal/scrubber/scrub_sensitive.go
  28. 3 0
      internal/server/server.go
  29. 10 5
      internal/server/source_manager.go
  30. 2 2
      internal/serverapi/client.go
  31. 1 0
      internal/serverapi/serverapi.go
  32. 1 0
      internal/throttle/round_tripper.go
  33. 1 0
      internal/units/units.go
  34. 1 0
      internal/webdavmount/webdavmount.go
  35. 1 1
      repo/blob/filesystem/filesystem_storage.go
  36. 7 6
      repo/blob/gcs/gcs_storage.go
  37. 3 1
      repo/blob/logging/logging_storage.go
  38. 3 2
      repo/blob/s3/s3_storage.go
  39. 12 6
      repo/blob/sftp/sftp_storage.go
  40. 11 1
      repo/blob/sharded/sharded.go
  41. 13 0
      repo/compression/compressor.go
  42. 4 4
      repo/compression/compressor_gzip.go
  43. 4 4
      repo/compression/compressor_pgzip.go
  44. 5 5
      repo/compression/compressor_s2.go
  45. 3 3
      repo/compression/compressor_zstd.go
  46. 19 26
      repo/content/block_manager_compaction.go
  47. 13 6
      repo/content/builder.go
  48. 5 3
      repo/content/content_formatter.go
  49. 5 2
      repo/content/content_id_to_bytes.go
  50. 2 2
      repo/content/content_index_recovery.go
  51. 2 1
      repo/content/content_manager_iterate.go
  52. 1 1
      repo/content/content_manager_lock_free.go
  53. 16 0
      repo/content/content_manager_test.go
  54. 3 3
      repo/content/format.go
  55. 3 7
      repo/content/index.go
  56. 3 1
      repo/content/merged.go
  57. 4 1
      repo/crypto_key_derivation.go
  58. 18 10
      repo/format_block.go
  59. 10 4
      repo/initialize.go
  60. 1 1
      repo/local_config.go
  61. 3 1
      repo/manifest/manifest_manager.go
  62. 3 5
      repo/object/object_manager.go
  63. 1 1
      repo/object/object_reader.go
  64. 14 14
      repo/object/object_splitter.go
  65. 7 2
      repo/object/object_writer.go
  66. 1 1
      repo/object/objectid.go
  67. 3 3
      repo/object/splitter_buzhash32.go
  68. 2 3
      repo/object/splitter_rabinkarp64.go
  69. 4 2
      repo/open.go
  70. 4 4
      site/cli2md/cli2md.go
  71. 1 0
      snapshot/gc/gc.go
  72. 7 2
      snapshot/manager.go
  73. 1 0
      snapshot/policy/compression_policy.go
  74. 1 1
      snapshot/policy/files_policy.go
  75. 2 0
      snapshot/policy/policy_tree.go
  76. 7 7
      snapshot/policy/retention_policy.go
  77. 1 1
      snapshot/snapshotfs/repofs.go
  78. 3 1
      snapshot/snapshotfs/snapshot_tree_walker.go
  79. 1 1
      snapshot/snapshotfs/source_snapshots.go
  80. 6 3
      snapshot/snapshotfs/upload.go
  81. 0 1
      tests/end_to_end_test/end_to_end.go
  82. 2 0
      tests/end_to_end_test/end_to_end_test.go
  83. 0 3
      tests/repository_stress_test/repository_stress.go
  84. 0 3
      tests/stress_test/stress.go
  85. 3 2
      tools/tools.mk

+ 27 - 11
.golangci.yml

@@ -1,18 +1,11 @@
 linters-settings:
   govet:
     check-shadowing: true
-    settings:
-      printf:
-        funcs:
-          - (github.com/op/go-logging/Logger).Infof
-          - (github.com/op/go-logging/Logger).Warningf
-          - (github.com/op/go-logging/Logger).Errorf
-          - (github.com/op/go-logging/Logger).Fatalf
   funlen:
     lines: 100
     statements: 60
-  golint:
-    min-confidence: 0
+  gocognit:
+    min-complexity: 40
   gocyclo:
     min-complexity: 15
   maligned:
@@ -28,11 +21,14 @@ linters-settings:
     local-prefixes: github.com/kopia/kopia
   gocritic:
     enabled-tags:
+      - diagnostic
       - performance
       - style
+      - opinionated
       - experimental
     disabled-checks:
       - wrapperFunc
+      - whyNoLint
 linters:
   enable-all: true
   disable:
@@ -47,7 +43,18 @@ run:
     - test/testdata_etc
 
 issues:
+  exclude-use-default: false
   exclude-rules:
+    - path: _test\.go|testing
+      linters:
+      - gomnd
+      - gocognit
+      - funlen
+      - errcheck
+      - gosec
+    - text: "Magic number: 1e"
+      linters:
+      - gomnd
     - text: "weak cryptographic primitive"
       linters:
         - gosec
@@ -56,10 +63,19 @@ issues:
         - dupl
     - text: "Line contains TODO"
       linters:
-        - godox
+        - godox 
+    - text: ".*Magic number\\: [01],"
+      linters:
+        - gomnd
+    - text: "Errors unhandled"
+      linters:
+        - gosec
+    - path: cli
+      linters:
+      - gochecknoglobals
 
 # golangci.com configuration
 # https://github.com/golangci/golangci/wiki/Configuration
 service:
-  golangci-lint-version: 1.21.x # use the fixed version to not introduce new linters unexpectedly
+  golangci-lint-version: 1.22.2 # use the fixed version to not introduce new linters unexpectedly
 

+ 3 - 2
Makefile

@@ -29,6 +29,9 @@ play:
 	go run cmd/playground/main.go
 
 lint: $(LINTER_TOOL)
+	$(LINTER_TOOL) --deadline 180s run
+
+lint-and-log: $(LINTER_TOOL)
 	$(LINTER_TOOL) --deadline 180s run | tee .linterr.txt
 
 vet:
@@ -177,5 +180,3 @@ travis-create-long-term-repository:
 	echo Not creating long-term repository.
 
 endif
-
-

+ 7 - 2
cli/cli_progress.go

@@ -10,6 +10,8 @@ import (
 	"github.com/kopia/kopia/snapshot"
 )
 
+const maxUnshortenedPath = 60
+
 type singleProgress struct {
 	desc      string
 	startTime time.Time
@@ -114,11 +116,14 @@ func (mp *multiProgress) UploadFinished() {
 }
 
 func shortenPath(s string) string {
-	if len(s) < 60 {
+	if len(s) < maxUnshortenedPath {
 		return s
 	}
 
-	return s[0:30] + "..." + s[len(s)-27:]
+	p1 := maxUnshortenedPath / 2 //nolint:gomnd
+	p2 := p1 - 3                 //nolint:gomnd
+
+	return s[0:p1] + "..." + s[len(s)-p2:]
 }
 
 var cliProgress = &multiProgress{}

+ 1 - 1
cli/command_benchmark_compression.go

@@ -104,7 +104,7 @@ func init() {
 
 func hashOf(b []byte) uint64 {
 	h := fnv.New64a()
-	h.Write(b) // nolint:errcheck
+	h.Write(b) //nolint:errcheck
 
 	return h.Sum64()
 }

+ 4 - 1
cli/command_benchmark_splitters.go

@@ -40,7 +40,10 @@ func runBenchmarkSplitterAction(ctx *kingpin.ParseContext) error {
 
 	for i := 0; i < *benchmarkSplitterBlockCount; i++ {
 		b := make([]byte, *benchmarkSplitterBlockSize)
-		rnd.Read(b)
+		if _, err := rnd.Read(b); err != nil {
+			return err
+		}
+
 		dataBlocks = append(dataBlocks, b)
 	}
 

+ 6 - 1
cli/command_content_rewrite.go

@@ -23,6 +23,8 @@ var (
 	contentRewriteDryRun        = contentRewriteCommand.Flag("dry-run", "Do not actually rewrite, only print what would happen").Short('n').Bool()
 )
 
+const shortPackThresholdPercent = 60 // blocks below 60% of max block size are considered to be 'short
+
 type contentInfoOrError struct {
 	content.Info
 	err error
@@ -51,6 +53,7 @@ func runContentRewriteCommand(ctx context.Context, rep *repo.Repository) error {
 					mu.Lock()
 					failedCount++
 					mu.Unlock()
+
 					return
 				}
 
@@ -63,9 +66,11 @@ func runContentRewriteCommand(ctx context.Context, rep *repo.Repository) error {
 				mu.Lock()
 				totalBytes += int64(c.Length)
 				mu.Unlock()
+
 				if *contentRewriteDryRun {
 					continue
 				}
+
 				if err := rep.Content.RewriteContent(ctx, c.ID); err != nil {
 					log.Warningf("unable to rewrite content %q: %v", c.ID, err)
 					mu.Lock()
@@ -98,7 +103,7 @@ func getContentToRewrite(ctx context.Context, rep *repo.Repository) <-chan conte
 
 		// add all content IDs from short packs
 		if *contentRewriteShortPacks {
-			threshold := int64(rep.Content.Format.MaxPackSize * 6 / 10)
+			threshold := int64(rep.Content.Format.MaxPackSize * shortPackThresholdPercent / 100) //nolint:gomnd
 			findContentInShortPacks(rep, ch, threshold)
 		}
 

+ 2 - 2
cli/command_mount_browse.go

@@ -48,7 +48,7 @@ func openInOSBrowser(mountPoint, addr string) error {
 }
 
 func netUSE(mountPoint, addr string) error {
-	c := exec.Command("net", "use", mountPoint, addr)
+	c := exec.Command("net", "use", mountPoint, addr) // nolint:gosec
 	c.Stdout = os.Stdout
 	c.Stderr = os.Stderr
 	c.Stdin = os.Stdin
@@ -60,7 +60,7 @@ func netUSE(mountPoint, addr string) error {
 	startWebBrowser("x:\\")
 	waitForCtrlC()
 
-	c = exec.Command("net", "use", mountPoint, "/d")
+	c = exec.Command("net", "use", mountPoint, "/d") // nolint:gosec
 	c.Stdout = os.Stdout
 	c.Stderr = os.Stderr
 	c.Stdin = os.Stdin

+ 1 - 0
cli/command_mount_webdav.go

@@ -56,6 +56,7 @@ func mountDirectoryWebDAV(entry fs.Directory, mountPoint string) error {
 	go func() {
 		defer wg.Done()
 		printStderr("Server listening at http://%v/ Press Ctrl-C to shut down.\n", s.Addr)
+
 		if err := s.ListenAndServe(); err != nil {
 			log.Warningf("server shut down with error: %v", err)
 		}

+ 8 - 0
cli/command_policy_set.go

@@ -132,6 +132,7 @@ func setPolicyFromFlags(p *policy.Policy, changeCount *int) error {
 func setFilesPolicyFromFlags(fp *policy.FilesPolicy, changeCount *int) {
 	if *policySetClearDotIgnore {
 		*changeCount++
+
 		printStderr(" - removing all rules for dot-ignore files\n")
 
 		fp.DotIgnoreFiles = nil
@@ -177,6 +178,7 @@ func setSchedulingPolicyFromFlags(sp *policy.SchedulingPolicy, changeCount *int)
 	// It's not really a list, just optional value.
 	for _, interval := range *policySetInterval {
 		*changeCount++
+
 		sp.SetInterval(interval)
 		printStderr(" - setting snapshot interval to %v\n", sp.Interval())
 
@@ -271,6 +273,7 @@ func addRemoveDedupeAndSort(desc string, base, add, remove []string, changeCount
 
 	for _, b := range add {
 		*changeCount++
+
 		printStderr(" - adding %v to %v\n", b, desc)
 
 		entries[b] = true
@@ -278,6 +281,7 @@ func addRemoveDedupeAndSort(desc string, base, add, remove []string, changeCount
 
 	for _, b := range remove {
 		*changeCount++
+
 		printStderr(" - removing %v from %v\n", b, desc)
 		delete(entries, b)
 	}
@@ -300,6 +304,7 @@ func applyPolicyNumber(desc string, val **int, str string, changeCount *int) err
 
 	if str == inheritPolicyString || str == "default" {
 		*changeCount++
+
 		printStderr(" - resetting %v to a default value inherited from parent.\n", desc)
 
 		*val = nil
@@ -314,6 +319,7 @@ func applyPolicyNumber(desc string, val **int, str string, changeCount *int) err
 
 	i := int(v)
 	*changeCount++
+
 	printStderr(" - setting %v to %v.\n", desc, i)
 	*val = &i
 
@@ -328,6 +334,7 @@ func applyPolicyNumber64(desc string, val *int64, str string, changeCount *int)
 
 	if str == inheritPolicyString || str == "default" {
 		*changeCount++
+
 		printStderr(" - resetting %v to a default value inherited from parent.\n", desc)
 
 		*val = 0
@@ -341,6 +348,7 @@ func applyPolicyNumber64(desc string, val *int64, str string, changeCount *int)
 	}
 
 	*changeCount++
+
 	printStderr(" - setting %v to %v.\n", desc, v)
 	*val = v
 

+ 1 - 1
cli/command_repository_connect.go

@@ -36,7 +36,7 @@ func connectOptions() repo.ConnectOptions {
 	return repo.ConnectOptions{
 		CachingOptions: content.CachingOptions{
 			CacheDirectory:          connectCacheDirectory,
-			MaxCacheSizeBytes:       connectMaxCacheSizeMB << 20,
+			MaxCacheSizeBytes:       connectMaxCacheSizeMB << 20, //nolint:gomnd
 			MaxListCacheDurationSec: int(connectMaxListCacheDuration.Seconds()),
 		},
 	}

+ 1 - 1
cli/command_repository_connect_from_config.go

@@ -33,7 +33,7 @@ func connectToStorageFromConfig(ctx context.Context, isNew bool) (blob.Storage,
 func connectToStorageFromConfigFile(ctx context.Context) (blob.Storage, error) {
 	var cfg repo.LocalConfig
 
-	f, err := os.Open(connectFromConfigFile)
+	f, err := os.Open(connectFromConfigFile) //nolint:gosec
 	if err != nil {
 		return nil, errors.Wrap(err, "unable to open config")
 	}

+ 7 - 7
cli/command_server_start.go

@@ -6,7 +6,6 @@ import (
 	"net/http"
 	"net/url"
 	"strings"
-	"time"
 
 	"github.com/pkg/errors"
 
@@ -17,11 +16,12 @@ import (
 var (
 	serverAddress = serverCommands.Flag("address", "Server address").Default("127.0.0.1:51515").String()
 
-	serverStartCommand  = serverCommands.Command("start", "Start Kopia server").Default()
-	serverStartHTMLPath = serverStartCommand.Flag("html", "Server the provided HTML at the root URL").ExistingDir()
-	serverStartUI       = serverStartCommand.Flag("ui", "Start the server with HTML UI (EXPERIMENTAL)").Bool()
-	serverStartUsername = serverStartCommand.Flag("server-username", "HTTP server username (basic auth)").Envar("KOPIA_SERVER_USERNAME").Default("kopia").String()
-	serverStartPassword = serverStartCommand.Flag("server-password", "Require HTTP server password (basic auth)").Envar("KOPIA_SERVER_PASSWORD").String()
+	serverStartCommand         = serverCommands.Command("start", "Start Kopia server").Default()
+	serverStartHTMLPath        = serverStartCommand.Flag("html", "Server the provided HTML at the root URL").ExistingDir()
+	serverStartUI              = serverStartCommand.Flag("ui", "Start the server with HTML UI (EXPERIMENTAL)").Bool()
+	serverStartUsername        = serverStartCommand.Flag("server-username", "HTTP server username (basic auth)").Envar("KOPIA_SERVER_USERNAME").Default("kopia").String()
+	serverStartPassword        = serverStartCommand.Flag("server-password", "Require HTTP server password (basic auth)").Envar("KOPIA_SERVER_PASSWORD").String()
+	serverStartRefreshInterval = serverStartCommand.Flag("refresh-interval", "Frequency for refreshing repository status").Default("10s").Duration()
 )
 
 func init() {
@@ -35,7 +35,7 @@ func runServer(ctx context.Context, rep *repo.Repository) error {
 		return errors.Wrap(err, "unable to initialize server")
 	}
 
-	go rep.RefreshPeriodically(ctx, 10*time.Second)
+	go rep.RefreshPeriodically(ctx, *serverStartRefreshInterval)
 
 	rootURL := "http://" + *serverAddress
 	log.Infof("starting server on %v", rootURL)

+ 1 - 1
cli/command_snapshot_create.go

@@ -46,7 +46,7 @@ func runBackupCommand(ctx context.Context, rep *repo.Repository) error {
 	}
 
 	u := snapshotfs.NewUploader(rep)
-	u.MaxUploadBytes = *snapshotCreateCheckpointUploadLimitMB * 1024 * 1024
+	u.MaxUploadBytes = *snapshotCreateCheckpointUploadLimitMB << 20 //nolint:gomnd
 	u.ForceHashPercentage = *snapshotCreateForceHash
 	u.ParallelUploads = *snapshotCreateParallelUploads
 	onCtrlC(u.Cancel)

+ 7 - 7
cli/command_snapshot_estimate.go

@@ -8,14 +8,12 @@ import (
 
 	"github.com/pkg/errors"
 
-	"github.com/kopia/kopia/fs/ignorefs"
-	"github.com/kopia/kopia/snapshot/policy"
-
-	"github.com/kopia/kopia/repo"
-
 	"github.com/kopia/kopia/fs"
+	"github.com/kopia/kopia/fs/ignorefs"
 	"github.com/kopia/kopia/internal/units"
+	"github.com/kopia/kopia/repo"
 	"github.com/kopia/kopia/snapshot"
+	"github.com/kopia/kopia/snapshot/policy"
 )
 
 var (
@@ -26,6 +24,8 @@ var (
 	snapshotEstimateUploadSpeed = snapshotEstimate.Flag("upload-speed", "Upload speed to use for estimation").Default("10").PlaceHolder("mbit/s").Float64()
 )
 
+const maxExamplesPerBucket = 10
+
 type bucket struct {
 	MinSize   int64    `json:"minSize"`
 	Count     int      `json:"count"`
@@ -37,7 +37,7 @@ func (b *bucket) add(fname string, size int64) {
 	b.Count++
 	b.TotalSize += size
 
-	if len(b.Examples) < 10 {
+	if len(b.Examples) < maxExamplesPerBucket {
 		b.Examples = append(b.Examples, fmt.Sprintf("%v - %v", fname, units.BytesStringBase10(size)))
 	}
 }
@@ -114,7 +114,7 @@ func runSnapshotEstimateCommand(ctx context.Context, rep *repo.Repository) error
 	fmt.Printf("Snapshot excludes %v directories and %v files with total size %v\n", stats.ExcludedDirCount, stats.ExcludedFileCount, units.BytesStringBase10(stats.ExcludedTotalFileSize))
 	showBuckets(eb)
 
-	megabits := float64(stats.TotalFileSize) * 8 / 1000000
+	megabits := float64(stats.TotalFileSize) * 8 / 1000000 //nolint:gomnd
 	seconds := megabits / *snapshotEstimateUploadSpeed
 
 	fmt.Println()

+ 1 - 1
cli/command_snapshot_verify.go

@@ -151,7 +151,7 @@ func (v *verifier) doVerifyObject(ctx context.Context, oid object.ID, path strin
 		v.reportError(path, errors.Wrapf(err, "error verifying %v", oid))
 	}
 
-	if rand.Intn(100) < *verifyCommandFilesPercent {
+	if rand.Intn(100) < *verifyCommandFilesPercent { //nolint:gomnd
 		if err := v.readEntireObject(ctx, oid, path); err != nil {
 			v.reportError(path, errors.Wrapf(err, "error reading object %v", oid))
 		}

+ 5 - 4
fs/cachefs/cache.go

@@ -12,6 +12,8 @@ import (
 
 var log = kopialogging.Logger("kopia/cachefs")
 
+const dirCacheExpiration = 24 * time.Hour
+
 type cacheEntry struct {
 	id   string
 	prev *cacheEntry
@@ -82,9 +84,8 @@ type Loader func(ctx context.Context) (fs.Entries, error)
 func (c *Cache) Readdir(ctx context.Context, d fs.Directory) (fs.Entries, error) {
 	if h, ok := d.(object.HasObjectID); ok {
 		cacheID := string(h.ObjectID())
-		cacheExpiration := 24 * time.Hour
 
-		return c.getEntries(ctx, cacheID, cacheExpiration, d.Readdir)
+		return c.getEntries(ctx, cacheID, dirCacheExpiration, d.Readdir)
 	}
 
 	return d.Readdir(ctx)
@@ -171,8 +172,8 @@ type Options struct {
 }
 
 var defaultOptions = &Options{
-	MaxCachedDirectories: 1000,
-	MaxCachedEntries:     100000,
+	MaxCachedDirectories: 1000,   //nolint:gomnd
+	MaxCachedEntries:     100000, //nolint:gomnd
 }
 
 // NewCache creates filesystem cache.

+ 1 - 0
fs/cachefs/cache_test.go

@@ -25,6 +25,7 @@ type cacheSource struct {
 func (cs *cacheSource) get(id string) func(ctx context.Context) (fs.Entries, error) {
 	return func(context.Context) (fs.Entries, error) {
 		cs.callCounter[id]++
+
 		d, ok := cs.data[id]
 		if !ok {
 			return nil, errors.New("no such id")

+ 6 - 2
fs/localfs/copy.go

@@ -37,6 +37,7 @@ func Copy(ctx context.Context, targetPath string, e fs.Entry, opt CopyOptions) e
 	}
 
 	c := copier{CopyOptions: opt}
+
 	return c.copyEntry(ctx, e, targetPath)
 }
 
@@ -136,7 +137,9 @@ func (c *copier) createDirectory(path string) error {
 				return errors.Errorf("non-empty directory already exists, not overwriting it: %q", path)
 			}
 		}
+
 		log.Debug("Not creating already existing directory: ", path)
+
 		return nil
 	default:
 		return errors.Errorf("unable to create directory, %q already exists and it is not a directory", path)
@@ -150,6 +153,7 @@ func (c *copier) copyFileContent(ctx context.Context, targetPath string, f fs.Fi
 		if !c.OverwriteFiles {
 			return errors.Errorf("unable to create %q, it already exists", targetPath)
 		}
+
 		log.Debug("Overwriting existing file: ", targetPath)
 	default:
 		return errors.Wrap(err, "failed to stat "+targetPath)
@@ -167,12 +171,12 @@ func (c *copier) copyFileContent(ctx context.Context, targetPath string, f fs.Fi
 }
 
 func isEmptyDirectory(name string) (bool, error) {
-	f, err := os.Open(name)
+	f, err := os.Open(name) //nolint:gosec
 	if err != nil {
 		return false, err
 	}
 
-	defer f.Close()
+	defer f.Close() //nolint:errcheck
 
 	if _, err = f.Readdirnames(1); err == io.EOF {
 		return true, nil

+ 16 - 4
fs/localfs/local_fs.go

@@ -15,6 +15,11 @@ import (
 	"github.com/kopia/kopia/internal/kopialogging"
 )
 
+const (
+	numEntriesToRead   = 100 // number of directory entries to read in one shot
+	dirListingPrefetch = 200 // number of directory items to os.Lstat() in advance
+)
+
 var log = kopialogging.Logger("kopia/localfs")
 
 type sortedEntries fs.Entries
@@ -119,14 +124,14 @@ func (fsd *filesystemDirectory) Child(ctx context.Context, name string) (fs.Entr
 func (fsd *filesystemDirectory) Readdir(ctx context.Context) (fs.Entries, error) {
 	fullPath := fsd.fullPath()
 
-	f, direrr := os.Open(fullPath)
+	f, direrr := os.Open(fullPath) //nolint:gosec
 	if direrr != nil {
 		return nil, direrr
 	}
 	defer f.Close() //nolint:errcheck
 
 	// start feeding directory entry names to namesCh
-	namesCh := make(chan string, 200)
+	namesCh := make(chan string, dirListingPrefetch)
 
 	var namesErr error
 
@@ -137,23 +142,28 @@ func (fsd *filesystemDirectory) Readdir(ctx context.Context) (fs.Entries, error)
 	go func() {
 		defer namesWG.Done()
 		defer close(namesCh)
+
 		for {
-			names, err := f.Readdirnames(100)
+			names, err := f.Readdirnames(numEntriesToRead)
 			for _, name := range names {
 				namesCh <- name
 			}
+
 			if err == nil {
 				continue
 			}
+
 			if err == io.EOF {
 				break
 			}
+
 			namesErr = err
+
 			break
 		}
 	}()
 
-	entriesCh := make(chan fs.Entry, 200)
+	entriesCh := make(chan fs.Entry, dirListingPrefetch)
 
 	var workersWG sync.WaitGroup
 
@@ -171,11 +181,13 @@ func (fsd *filesystemDirectory) Readdir(ctx context.Context) (fs.Entries, error)
 					// lost the race - ignore.
 					continue
 				}
+
 				e, fierr := entryFromChildFileInfo(fi, fullPath)
 				if fierr != nil {
 					log.Warningf("unable to create directory entry %q: %v", fi.Name(), fierr)
 					continue
 				}
+
 				entriesCh <- e
 			}
 		}()

+ 1 - 0
internal/blobtesting/map.go

@@ -11,6 +11,7 @@ import (
 	"github.com/kopia/kopia/repo/blob"
 )
 
+// DataMap is a map of blob ID to their contents.
 type DataMap map[blob.ID][]byte
 
 type mapStorage struct {

+ 2 - 1
internal/diff/diff.go

@@ -1,3 +1,4 @@
+// Package diff implements helpers for comparing two filesystems.
 package diff
 
 import (
@@ -253,7 +254,7 @@ func (c *Comparer) compareFiles(ctx context.Context, f1, f2 fs.File, fname strin
 	args = append(args, c.DiffArguments...)
 	args = append(args, oldName, newName)
 
-	cmd := exec.CommandContext(ctx, c.DiffCommand, args...)
+	cmd := exec.CommandContext(ctx, c.DiffCommand, args...) // nolint:gosec
 	cmd.Dir = c.tmpDir
 	cmd.Stdout = c.out
 	cmd.Stderr = c.out

+ 3 - 3
internal/editor/editor.go

@@ -53,7 +53,7 @@ func EditLoop(fname, initial string, parse func(updated string) error) error {
 
 		var shouldReopen string
 
-		fmt.Scanf("%s", &shouldReopen) //nolint:errcheck
+		_, _ = fmt.Scanf("%s", &shouldReopen)
 
 		if strings.HasPrefix(strings.ToLower(shouldReopen), "n") {
 			return errors.New("aborted")
@@ -62,7 +62,7 @@ func EditLoop(fname, initial string, parse func(updated string) error) error {
 }
 
 func readAndStripComments(fname string) (string, error) {
-	f, err := os.Open(fname)
+	f, err := os.Open(fname) //nolint:gosec
 	if err != nil {
 		return "", err
 	}
@@ -90,7 +90,7 @@ func editFile(file string) error {
 	args = append(args, editorArgs...)
 	args = append(args, file)
 
-	cmd := exec.Command(editor, args...)
+	cmd := exec.Command(editor, args...) //nolint:gosec
 	cmd.Stderr = os.Stderr
 	cmd.Stdin = os.Stdin
 	cmd.Stdout = os.Stdout

+ 5 - 4
internal/fshasher/fshasher.go

@@ -25,7 +25,7 @@ func Hash(ctx context.Context, e fs.Entry) ([]byte, error) {
 	}
 
 	tw := tar.NewWriter(h)
-	defer tw.Close()
+	defer tw.Close() //nolint:errcheck
 
 	if err := write(ctx, tw, "", e); err != nil {
 		return nil, err
@@ -114,9 +114,10 @@ func writeFile(ctx context.Context, w io.Writer, f fs.File) error {
 	if err != nil {
 		return err
 	}
-	defer r.Close()
 
-	_, err = io.Copy(w, r)
+	if _, err = io.Copy(w, r); err != nil {
+		return err
+	}
 
-	return err
+	return r.Close()
 }

+ 2 - 0
internal/ignore/ignore.go

@@ -65,6 +65,7 @@ func matchBaseDir(baseDir string, m nameMatcher) nameMatcher {
 		}
 
 		path = path[len(baseDir):]
+
 		return m(path)
 	}
 }
@@ -99,6 +100,7 @@ func parseGlobPattern(pattern string) nameMatcher {
 	return func(path string) bool {
 		last := path[strings.LastIndex(path, "/")+1:]
 		ok, _ := filepath.Match(pattern, last)
+
 		return ok
 	}
 }

+ 8 - 5
internal/mockfs/mockfs.go

@@ -173,6 +173,7 @@ func (imd *Directory) FailReaddir(err error) {
 	imd.readdirError = err
 }
 
+// Child gets the named child of a directory.
 func (imd *Directory) Child(ctx context.Context, name string) (fs.Entry, error) {
 	return fs.ReadDirAndFindChild(ctx, imd, name)
 }
@@ -230,16 +231,18 @@ func (imsl *inmemorySymlink) Readlink(ctx context.Context) (string, error) {
 	panic("not implemented yet")
 }
 
-// NewDirectory returns new mock directory.ds
+// NewDirectory returns new mock directory.
 func NewDirectory() *Directory {
 	return &Directory{
 		entry: entry{
 			name: "<root>",
-			mode: 0777 | os.ModeDir,
+			mode: 0777 | os.ModeDir, // nolint:gomnd
 		},
 	}
 }
 
-var _ fs.Directory = &Directory{}
-var _ fs.File = &File{}
-var _ fs.Symlink = &inmemorySymlink{}
+var (
+	_ fs.Directory = &Directory{}
+	_ fs.File      = &File{}
+	_ fs.Symlink   = &inmemorySymlink{}
+)

+ 4 - 2
internal/parallelwork/parallel_work_queue.go

@@ -1,3 +1,4 @@
+// Package parallelwork implements pallel work queue with fixed number of workers that concurrently process and add work items to the queue.
 package parallelwork
 
 import (
@@ -60,19 +61,20 @@ func (v *Queue) Process(workers int) error {
 	for i := 0; i < workers; i++ {
 		wg.Add(1)
 
-		go func(workerID int) {
+		go func(_ int) {
 			defer wg.Done()
-			_ = workerID
 
 			for {
 				callback := v.dequeue()
 				if callback == nil {
 					break
 				}
+
 				if err := callback(); err != nil {
 					errors <- err
 					break
 				}
+
 				v.completed()
 			}
 		}(i)

+ 1 - 0
internal/scrubber/scrub_sensitive.go

@@ -1,3 +1,4 @@
+// Package scrubber contains helpers that remove sensitive information from Go structs before it's presented to users.
 package scrubber
 
 import (

+ 3 - 0
internal/server/server.go

@@ -1,3 +1,4 @@
+// Package server implements Kopia API server handlers.
 package server
 
 import (
@@ -62,10 +63,12 @@ func (s *Server) handleAPI(f func(ctx context.Context, r *http.Request) (interfa
 
 		v, err := f(context.Background(), r)
 		log.Debugf("returned %+v", v)
+
 		if err == nil {
 			if err := e.Encode(v); err != nil {
 				log.Warningf("error encoding response: %v", err)
 			}
+
 			return
 		}
 

+ 10 - 5
internal/server/source_manager.go

@@ -12,6 +12,11 @@ import (
 	"github.com/kopia/kopia/snapshot/snapshotfs"
 )
 
+const (
+	statusRefreshInterval = 15 * time.Second // how frequently to refresh source status
+	oneDay                = 24 * time.Hour
+)
+
 // sourceManager manages the state machine of each source
 // Possible states:
 //
@@ -85,7 +90,7 @@ func (s *sourceManager) runLocal(ctx context.Context) {
 			timeBeforeNextSnapshot = time.Until(s.nextSnapshotTime)
 			log.Infof("time to next snapshot %v is %v", s.src, timeBeforeNextSnapshot)
 		} else {
-			timeBeforeNextSnapshot = 24 * time.Hour
+			timeBeforeNextSnapshot = oneDay
 		}
 
 		s.setStatus("WAITING")
@@ -93,7 +98,7 @@ func (s *sourceManager) runLocal(ctx context.Context) {
 		case <-s.closed:
 			return
 
-		case <-time.After(15 * time.Second):
+		case <-time.After(statusRefreshInterval):
 			s.refreshStatus(ctx)
 
 		case <-time.After(timeBeforeNextSnapshot):
@@ -113,7 +118,7 @@ func (s *sourceManager) runRemote(ctx context.Context) {
 		select {
 		case <-s.closed:
 			return
-		case <-time.After(15 * time.Second):
+		case <-time.After(statusRefreshInterval):
 			s.refreshStatus(ctx)
 		}
 	}
@@ -205,7 +210,7 @@ func (s *sourceManager) snapshot(ctx context.Context) {
 }
 
 func (s *sourceManager) findClosestNextSnapshotTime() time.Time {
-	nextSnapshotTime := time.Now().Add(24 * time.Hour)
+	nextSnapshotTime := time.Now().Add(oneDay)
 
 	if s.pol != nil {
 		// compute next snapshot time based on interval
@@ -223,7 +228,7 @@ func (s *sourceManager) findClosestNextSnapshotTime() time.Time {
 			localSnapshotTime := time.Date(nowLocalTime.Year(), nowLocalTime.Month(), nowLocalTime.Day(), tod.Hour, tod.Minute, 0, 0, time.Local)
 
 			if tod.Hour < nowLocalTime.Hour() || (tod.Hour == nowLocalTime.Hour() && tod.Minute < nowLocalTime.Minute()) {
-				localSnapshotTime = localSnapshotTime.Add(24 * time.Hour)
+				localSnapshotTime = localSnapshotTime.Add(oneDay)
 			}
 
 			if localSnapshotTime.Before(nextSnapshotTime) {

+ 2 - 2
internal/serverapi/client.go

@@ -22,7 +22,7 @@ func (c *Client) Get(path string, respPayload interface{}) error {
 	}
 	defer resp.Body.Close() //nolint:errcheck
 
-	if resp.StatusCode != 200 {
+	if resp.StatusCode != http.StatusOK {
 		return errors.Errorf("invalid server response: %v", resp.Status)
 	}
 
@@ -47,7 +47,7 @@ func (c *Client) Post(path string, reqPayload, respPayload interface{}) error {
 	}
 	defer resp.Body.Close() //nolint:errcheck
 
-	if resp.StatusCode != 200 {
+	if resp.StatusCode != http.StatusOK {
 		return errors.Errorf("invalid server response: %v", resp.Status)
 	}
 

+ 1 - 0
internal/serverapi/serverapi.go

@@ -1,3 +1,4 @@
+// Package serverapi contains GO types corresponding to Kopia server API.
 package serverapi
 
 import (

+ 1 - 0
internal/throttle/round_tripper.go

@@ -1,3 +1,4 @@
+// Package throttle implements helpers for throttling uploads and downloads.
 package throttle
 
 import (

+ 1 - 0
internal/units/units.go

@@ -1,3 +1,4 @@
+// Package units contains helpers to convert sizes to humand-readable strings.
 package units
 
 import (

+ 1 - 0
internal/webdavmount/webdavmount.go

@@ -1,3 +1,4 @@
+// Package webdavmount implements webdav filesystem for serving snapshots.
 package webdavmount
 
 import (

+ 1 - 1
repo/blob/filesystem/filesystem_storage.go

@@ -39,7 +39,7 @@ type fsImpl struct {
 }
 
 func (fs *fsImpl) GetBlobFromPath(ctx context.Context, dirPath, path string, offset, length int64) ([]byte, error) {
-	f, err := os.Open(path)
+	f, err := os.Open(path) //nolint:gosec
 	if os.IsNotExist(err) {
 		return nil, blob.ErrBlobNotFound
 	}

+ 7 - 6
repo/blob/gcs/gcs_storage.go

@@ -24,7 +24,8 @@ import (
 )
 
 const (
-	gcsStorageType = "gcs"
+	gcsStorageType  = "gcs"
+	writerChunkSize = 1 << 20
 )
 
 type gcsStorage struct {
@@ -104,7 +105,7 @@ func (gcs *gcsStorage) PutBlob(ctx context.Context, b blob.ID, data []byte) erro
 
 	obj := gcs.bucket.Object(gcs.getObjectNameString(b))
 	writer := obj.NewWriter(ctx)
-	writer.ChunkSize = 1 << 20
+	writer.ChunkSize = writerChunkSize
 	writer.ContentType = "application/x-kopia"
 
 	progressCallback := blob.ProgressCallback(ctx)
@@ -124,7 +125,8 @@ func (gcs *gcsStorage) PutBlob(ctx context.Context, b blob.ID, data []byte) erro
 	if err != nil {
 		// cancel context before closing the writer causes it to abandon the upload.
 		cancel()
-		writer.Close() //nolint:errcheck
+
+		_ = writer.Close() // failing already, ignore the error
 
 		return translateError(err)
 	}
@@ -187,8 +189,7 @@ func (gcs *gcsStorage) ConnectionInfo() blob.ConnectionInfo {
 }
 
 func (gcs *gcsStorage) Close(ctx context.Context) error {
-	gcs.storageClient.Close() //nolint:errcheck
-	return nil
+	return gcs.storageClient.Close()
 }
 
 func toBandwidth(bytesPerSecond int) iothrottler.Bandwidth {
@@ -200,7 +201,7 @@ func toBandwidth(bytesPerSecond int) iothrottler.Bandwidth {
 }
 
 func tokenSourceFromCredentialsFile(ctx context.Context, fn string, scopes ...string) (oauth2.TokenSource, error) {
-	data, err := ioutil.ReadFile(fn)
+	data, err := ioutil.ReadFile(fn) //nolint:gosec
 	if err != nil {
 		return nil, err
 	}

+ 3 - 1
repo/blob/logging/logging_storage.go

@@ -9,6 +9,8 @@ import (
 	"github.com/kopia/kopia/repo/blob"
 )
 
+const maxLoggedBlobLength = 20 // maximum length of the blob to log contents of
+
 var log = repologging.Logger("repo/blob")
 
 type loggingStorage struct {
@@ -22,7 +24,7 @@ func (s *loggingStorage) GetBlob(ctx context.Context, id blob.ID, offset, length
 	result, err := s.base.GetBlob(ctx, id, offset, length)
 	dt := time.Since(t0)
 
-	if len(result) < 20 {
+	if len(result) < maxLoggedBlobLength {
 		s.printf(s.prefix+"GetBlob(%q,%v,%v)=(%#v, %#v) took %v", id, offset, length, result, err, dt)
 	} else {
 		s.printf(s.prefix+"GetBlob(%q,%v,%v)=({%#v bytes}, %#v) took %v", id, offset, length, len(result), err, dt)

+ 3 - 2
repo/blob/s3/s3_storage.go

@@ -7,6 +7,7 @@ import (
 	"fmt"
 	"io"
 	"io/ioutil"
+	"net/http"
 
 	"github.com/efarrer/iothrottler"
 	"github.com/minio/minio-go"
@@ -92,11 +93,11 @@ func isRetriableError(err error) bool {
 
 func translateError(err error) error {
 	if me, ok := err.(minio.ErrorResponse); ok {
-		if me.StatusCode == 200 {
+		if me.StatusCode == http.StatusOK {
 			return nil
 		}
 
-		if me.StatusCode == 404 {
+		if me.StatusCode == http.StatusNotFound {
 			return blob.ErrBlobNotFound
 		}
 	}

+ 12 - 6
repo/blob/sftp/sftp_storage.go

@@ -1,3 +1,4 @@
+// Package sftp implements blob storage provided for SFTP/SSH.
 package sftp
 
 import (
@@ -52,7 +53,7 @@ func (s *sftpImpl) GetBlobFromPath(ctx context.Context, dirPath, path string, of
 	if err != nil {
 		return nil, err
 	}
-	defer r.Close()
+	defer r.Close() //nolint:errcheck
 
 	// pkg/sftp doesn't have a `ioutil.Readall`, so we WriteTo to a buffer
 	// and either return it all or return the offset/length bytes
@@ -148,8 +149,13 @@ func (s *sftpStorage) ConnectionInfo() blob.ConnectionInfo {
 }
 
 func (s *sftpStorage) Close(ctx context.Context) error {
-	s.Impl.(*sftpImpl).cli.Close()
-	s.Impl.(*sftpImpl).conn.Close()
+	if err := s.Impl.(*sftpImpl).cli.Close(); err != nil {
+		return errors.Wrap(err, "closing SFTP client")
+	}
+
+	if err := s.Impl.(*sftpImpl).conn.Close(); err != nil {
+		return errors.Wrap(err, "closing SFTP connection")
+	}
 
 	return nil
 }
@@ -180,11 +186,11 @@ func hostExists(host string, hosts []string) bool {
 // getHostKey parses OpenSSH known_hosts file for a public key that matches the host
 // The known_hosts file format is documented in the sshd(8) manual page
 func getHostKey(host, knownHosts string) (ssh.PublicKey, error) {
-	file, err := os.Open(knownHosts)
+	file, err := os.Open(knownHosts) //nolint:gosec
 	if err != nil {
 		return nil, err
 	}
-	defer file.Close()
+	defer file.Close() //nolint:errcheck
 
 	var hostKey ssh.PublicKey
 
@@ -207,7 +213,7 @@ func getHostKey(host, knownHosts string) (ssh.PublicKey, error) {
 
 // getSigner parses and returns a signer for the user-entered private key
 func getSigner(path string) (ssh.Signer, error) {
-	buffer, err := ioutil.ReadFile(path)
+	buffer, err := ioutil.ReadFile(path) //nolint:gosec
 	if err != nil {
 		return nil, err
 	}

+ 11 - 1
repo/blob/sharded/sharded.go

@@ -1,3 +1,4 @@
+// Package sharded implements common support for sharded blob providers, such as filesystem or webdav.
 package sharded
 
 import (
@@ -9,6 +10,9 @@ import (
 	"github.com/kopia/kopia/repo/blob"
 )
 
+const minShardedBlobIDLength = 20
+
+// Impl must be implemented by underlying provided.
 type Impl interface {
 	GetBlobFromPath(ctx context.Context, dirPath, filePath string, offset, length int64) ([]byte, error)
 	PutBlobInPath(ctx context.Context, dirPath, filePath string, data []byte) error
@@ -16,6 +20,7 @@ type Impl interface {
 	ReadDir(ctx context.Context, path string) ([]os.FileInfo, error)
 }
 
+// Storage provides common implementation of sharded storage.
 type Storage struct {
 	Impl Impl
 
@@ -24,6 +29,7 @@ type Storage struct {
 	Shards   []int
 }
 
+// GetBlob implements blob.Storage
 func (s Storage) GetBlob(ctx context.Context, blobID blob.ID, offset, length int64) ([]byte, error) {
 	dirPath, filePath := s.GetShardedPathAndFilePath(blobID)
 	return s.Impl.GetBlobFromPath(ctx, dirPath, filePath, offset, length)
@@ -41,6 +47,7 @@ func (s Storage) makeFileName(blobID blob.ID) string {
 	return string(blobID) + s.Suffix
 }
 
+// ListBlobs implements blob.Storage
 func (s Storage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(blob.Metadata) error) error {
 	var walkDir func(string, string) error
 
@@ -85,12 +92,14 @@ func (s Storage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(bl
 	return walkDir(s.RootPath, "")
 }
 
+// PutBlob implements blob.Storage
 func (s Storage) PutBlob(ctx context.Context, blobID blob.ID, data []byte) error {
 	dirPath, filePath := s.GetShardedPathAndFilePath(blobID)
 
 	return s.Impl.PutBlobInPath(ctx, dirPath, filePath, data)
 }
 
+// DeleteBlob implements blob.Storage
 func (s Storage) DeleteBlob(ctx context.Context, blobID blob.ID) error {
 	dirPath, filePath := s.GetShardedPathAndFilePath(blobID)
 	return s.Impl.DeleteBlobInPath(ctx, dirPath, filePath)
@@ -99,7 +108,7 @@ func (s Storage) DeleteBlob(ctx context.Context, blobID blob.ID) error {
 func (s Storage) getShardDirectory(blobID blob.ID) (string, blob.ID) {
 	shardPath := s.RootPath
 
-	if len(blobID) < 20 {
+	if len(blobID) < minShardedBlobIDLength {
 		return shardPath, blobID
 	}
 
@@ -111,6 +120,7 @@ func (s Storage) getShardDirectory(blobID blob.ID) (string, blob.ID) {
 	return shardPath, blobID
 }
 
+// GetShardedPathAndFilePath returns the path of the shard and file name within the shard for a given blob ID.
 func (s Storage) GetShardedPathAndFilePath(blobID blob.ID) (shardPath, filePath string) {
 	shardPath, blobID = s.getShardDirectory(blobID)
 	filePath = filepath.Join(shardPath, s.makeFileName(blobID))

+ 13 - 0
repo/compression/compressor.go

@@ -4,8 +4,12 @@ package compression
 import (
 	"encoding/binary"
 	"fmt"
+
+	"github.com/pkg/errors"
 )
 
+const compressionHeaderSize = 4
+
 // Name is the name of the compressor to use.
 type Name string
 
@@ -42,3 +46,12 @@ func compressionHeader(id HeaderID) []byte {
 
 	return b
 }
+
+// IDFromHeader retrieves compression ID from content header
+func IDFromHeader(b []byte) (HeaderID, error) {
+	if len(b) < compressionHeaderSize {
+		return 0, errors.Errorf("invalid size: %v", len(b))
+	}
+
+	return HeaderID(binary.BigEndian.Uint32(b[0:compressionHeaderSize])), nil
+}

+ 4 - 4
repo/compression/compressor_gzip.go

@@ -52,19 +52,19 @@ func (c *gzipCompressor) Compress(b []byte) ([]byte, error) {
 }
 
 func (c *gzipCompressor) Decompress(b []byte) ([]byte, error) {
-	if len(b) < 4 {
+	if len(b) < compressionHeaderSize {
 		return nil, errors.Errorf("invalid compression header")
 	}
 
-	if !bytes.Equal(b[0:4], c.header) {
+	if !bytes.Equal(b[0:compressionHeaderSize], c.header) {
 		return nil, errors.Errorf("invalid compression header")
 	}
 
-	r, err := gzip.NewReader(bytes.NewReader(b[4:]))
+	r, err := gzip.NewReader(bytes.NewReader(b[compressionHeaderSize:]))
 	if err != nil {
 		return nil, errors.Wrap(err, "unable to open gzip stream")
 	}
-	defer r.Close()
+	defer r.Close() //nolint:errcheck
 
 	var buf bytes.Buffer
 	if _, err := io.Copy(&buf, r); err != nil {

+ 4 - 4
repo/compression/compressor_pgzip.go

@@ -52,19 +52,19 @@ func (c *pgzipCompressor) Compress(b []byte) ([]byte, error) {
 }
 
 func (c *pgzipCompressor) Decompress(b []byte) ([]byte, error) {
-	if len(b) < 4 {
+	if len(b) < compressionHeaderSize {
 		return nil, errors.Errorf("invalid compression header")
 	}
 
-	if !bytes.Equal(b[0:4], c.header) {
+	if !bytes.Equal(b[0:compressionHeaderSize], c.header) {
 		return nil, errors.Errorf("invalid compression header")
 	}
 
-	r, err := pgzip.NewReader(bytes.NewReader(b[4:]))
+	r, err := pgzip.NewReader(bytes.NewReader(b[compressionHeaderSize:]))
 	if err != nil {
 		return nil, errors.Wrap(err, "unable to open gzip stream")
 	}
-	defer r.Close()
+	defer r.Close() //nolint:errcheck
 
 	var buf bytes.Buffer
 	if _, err := io.Copy(&buf, r); err != nil {

+ 5 - 5
repo/compression/compressor_s2.go

@@ -11,8 +11,8 @@ import (
 func init() {
 	RegisterCompressor("s2-default", newS2Compressor(headerS2Default))
 	RegisterCompressor("s2-better", newS2Compressor(headerS2Better, s2.WriterBetterCompression()))
-	RegisterCompressor("s2-parallel-4", newS2Compressor(headerS2Parallel4, s2.WriterConcurrency(4)))
-	RegisterCompressor("s2-parallel-8", newS2Compressor(headerS2Parallel8, s2.WriterConcurrency(8)))
+	RegisterCompressor("s2-parallel-4", newS2Compressor(headerS2Parallel4, s2.WriterConcurrency(4))) //nolint:gomnd
+	RegisterCompressor("s2-parallel-8", newS2Compressor(headerS2Parallel8, s2.WriterConcurrency(8))) //nolint:gomnd
 }
 
 func newS2Compressor(id HeaderID, opts ...s2.WriterOption) Compressor {
@@ -50,15 +50,15 @@ func (c *s2Compressor) Compress(b []byte) ([]byte, error) {
 }
 
 func (c *s2Compressor) Decompress(b []byte) ([]byte, error) {
-	if len(b) < 4 {
+	if len(b) < compressionHeaderSize {
 		return nil, errors.Errorf("invalid compression header")
 	}
 
-	if !bytes.Equal(b[0:4], c.header) {
+	if !bytes.Equal(b[0:compressionHeaderSize], c.header) {
 		return nil, errors.Errorf("invalid compression header")
 	}
 
-	r := s2.NewReader(bytes.NewReader(b[4:]))
+	r := s2.NewReader(bytes.NewReader(b[compressionHeaderSize:]))
 
 	var buf bytes.Buffer
 	if _, err := io.Copy(&buf, r); err != nil {

+ 3 - 3
repo/compression/compressor_zstd.go

@@ -53,15 +53,15 @@ func (c *zstdCompressor) Compress(b []byte) ([]byte, error) {
 }
 
 func (c *zstdCompressor) Decompress(b []byte) ([]byte, error) {
-	if len(b) < 4 {
+	if len(b) < compressionHeaderSize {
 		return nil, errors.Errorf("invalid compression header")
 	}
 
-	if !bytes.Equal(b[0:4], c.header) {
+	if !bytes.Equal(b[0:compressionHeaderSize], c.header) {
 		return nil, errors.Errorf("invalid compression header")
 	}
 
-	r, err := zstd.NewReader(bytes.NewReader(b[4:]))
+	r, err := zstd.NewReader(bytes.NewReader(b[compressionHeaderSize:]))
 	if err != nil {
 		return nil, errors.Wrap(err, "unable to open zstd stream")
 	}

+ 19 - 26
repo/content/block_manager_compaction.go

@@ -8,9 +8,11 @@ import (
 	"github.com/pkg/errors"
 )
 
+const verySmallContentFraction = 20 // blobs less than 1/verySmallContentFraction of maxPackSize are considered 'very small'
+
 var autoCompactionOptions = CompactOptions{
-	MinSmallBlobs: 4 * parallelFetches,
-	MaxSmallBlobs: 64,
+	MinSmallBlobs: 4 * parallelFetches, // nolint:gomnd
+	MaxSmallBlobs: 64,                  // nolint:gomnd
 }
 
 // CompactOptions provides options for compaction
@@ -47,50 +49,41 @@ func (bm *Manager) CompactIndexes(ctx context.Context, opt CompactOptions) error
 }
 
 func (bm *Manager) getContentsToCompact(indexBlobs []IndexBlobInfo, opt CompactOptions) []IndexBlobInfo {
-	var nonCompactedContents []IndexBlobInfo
-
-	var totalSizeNonCompactedContents int64
-
-	var verySmallContents []IndexBlobInfo
+	var nonCompactedBlobs, verySmallBlobs, mediumSizedBlobs []IndexBlobInfo
 
-	var totalSizeVerySmallContents int64
-
-	var mediumSizedContents []IndexBlobInfo
-
-	var totalSizeMediumSizedContents int64
+	var totalSizeNonCompactedBlobs, totalSizeVerySmallBlobs, totalSizeMediumSizedBlobs int64
 
 	for _, b := range indexBlobs {
 		if b.Length > int64(bm.maxPackSize) && !opt.AllIndexes {
 			continue
 		}
 
-		nonCompactedContents = append(nonCompactedContents, b)
+		nonCompactedBlobs = append(nonCompactedBlobs, b)
+		totalSizeNonCompactedBlobs += b.Length
 
-		if b.Length < int64(bm.maxPackSize/20) {
-			verySmallContents = append(verySmallContents, b)
-			totalSizeVerySmallContents += b.Length
+		if b.Length < int64(bm.maxPackSize/verySmallContentFraction) {
+			verySmallBlobs = append(verySmallBlobs, b)
+			totalSizeVerySmallBlobs += b.Length
 		} else {
-			mediumSizedContents = append(mediumSizedContents, b)
-			totalSizeMediumSizedContents += b.Length
+			mediumSizedBlobs = append(mediumSizedBlobs, b)
+			totalSizeMediumSizedBlobs += b.Length
 		}
-
-		totalSizeNonCompactedContents += b.Length
 	}
 
-	if len(nonCompactedContents) < opt.MinSmallBlobs {
+	if len(nonCompactedBlobs) < opt.MinSmallBlobs {
 		// current count is below min allowed - nothing to do
 		formatLog.Debugf("no small contents to compact")
 		return nil
 	}
 
-	if len(verySmallContents) > len(nonCompactedContents)/2 && len(mediumSizedContents)+1 < opt.MinSmallBlobs {
-		formatLog.Debugf("compacting %v very small contents", len(verySmallContents))
-		return verySmallContents
+	if len(verySmallBlobs) > len(nonCompactedBlobs)/2 && len(mediumSizedBlobs)+1 < opt.MinSmallBlobs {
+		formatLog.Debugf("compacting %v very small contents", len(verySmallBlobs))
+		return verySmallBlobs
 	}
 
-	formatLog.Debugf("compacting all %v non-compacted contents", len(nonCompactedContents))
+	formatLog.Debugf("compacting all %v non-compacted contents", len(nonCompactedBlobs))
 
-	return nonCompactedContents
+	return nonCompactedBlobs
 }
 
 func (bm *Manager) compactAndDeleteIndexBlobs(ctx context.Context, indexBlobs []IndexBlobInfo, opt CompactOptions) error {

+ 13 - 6
repo/content/builder.go

@@ -11,6 +11,13 @@ import (
 	"github.com/kopia/kopia/repo/blob"
 )
 
+const (
+	packHeaderSize = 8
+	deletedMarker  = 0x80000000
+
+	entryFixedHeaderLength = 20
+)
+
 // packIndexBuilder prepares and writes content index.
 type packIndexBuilder map[ID]*Info
 
@@ -67,7 +74,7 @@ func (b packIndexBuilder) Build(output io.Writer) error {
 	layout := &indexLayout{
 		packBlobIDOffsets: map[blob.ID]uint32{},
 		keyLength:         -1,
-		entryLength:       20,
+		entryLength:       entryFixedHeaderLength,
 		entryCount:        len(allContents),
 	}
 
@@ -77,7 +84,7 @@ func (b packIndexBuilder) Build(output io.Writer) error {
 	extraData := prepareExtraData(allContents, layout)
 
 	// write header
-	header := make([]byte, 8)
+	header := make([]byte, packHeaderSize)
 	header[0] = 1 // version
 	header[1] = byte(layout.keyLength)
 	binary.BigEndian.PutUint16(header[2:4], uint16(layout.entryLength))
@@ -123,7 +130,7 @@ func prepareExtraData(allContents []*Info, layout *indexLayout) []byte {
 		}
 	}
 
-	layout.extraDataOffset = uint32(8 + layout.entryCount*(layout.keyLength+layout.entryLength))
+	layout.extraDataOffset = uint32(packHeaderSize + layout.entryCount*(layout.keyLength+layout.entryLength))
 
 	return extraData
 }
@@ -154,7 +161,7 @@ func formatEntry(entry []byte, it *Info, layout *indexLayout) error {
 	entryPackFileOffset := entry[8:12]
 	entryPackedOffset := entry[12:16]
 	entryPackedLength := entry[16:20]
-	timestampAndFlags := uint64(it.TimestampSeconds) << 16
+	timestampAndFlags := uint64(it.TimestampSeconds) << 16 // nolint:gomnd
 
 	if len(it.PackBlobID) == 0 {
 		return errors.Errorf("empty pack content ID for %v", it.ID)
@@ -163,13 +170,13 @@ func formatEntry(entry []byte, it *Info, layout *indexLayout) error {
 	binary.BigEndian.PutUint32(entryPackFileOffset, layout.extraDataOffset+layout.packBlobIDOffsets[it.PackBlobID])
 
 	if it.Deleted {
-		binary.BigEndian.PutUint32(entryPackedOffset, it.PackOffset|0x80000000)
+		binary.BigEndian.PutUint32(entryPackedOffset, it.PackOffset|deletedMarker)
 	} else {
 		binary.BigEndian.PutUint32(entryPackedOffset, it.PackOffset)
 	}
 
 	binary.BigEndian.PutUint32(entryPackedLength, it.Length)
-	timestampAndFlags |= uint64(it.FormatVersion) << 8
+	timestampAndFlags |= uint64(it.FormatVersion) << 8 // nolint:gomnd
 	timestampAndFlags |= uint64(len(it.PackBlobID))
 	binary.BigEndian.PutUint64(entryTimestampAndFlags, timestampAndFlags)
 

+ 5 - 3
repo/content/content_formatter.go

@@ -151,6 +151,7 @@ func truncatedHMACHashFuncFactory(hf func() hash.Hash, truncate int) HashFuncFac
 		return func(b []byte) []byte {
 			h := hmac.New(hf, o.HMACSecret)
 			h.Write(b) // nolint:errcheck
+
 			return h.Sum(nil)[0:truncate]
 		}, nil
 	}
@@ -167,6 +168,7 @@ func truncatedKeyedHashFuncFactory(hf func(key []byte) (hash.Hash, error), trunc
 		return func(b []byte) []byte {
 			h, _ := hf(o.HMACSecret)
 			h.Write(b) // nolint:errcheck
+
 			return h.Sum(nil)[0:truncate]
 		}, nil
 	}
@@ -244,9 +246,9 @@ func init() {
 	RegisterEncryption("NONE", func(f *FormattingOptions) (Encryptor, error) {
 		return nullEncryptor{}, nil
 	})
-	RegisterEncryption("AES-128-CTR", newCTREncryptorFactory(16, aes.NewCipher))
-	RegisterEncryption("AES-192-CTR", newCTREncryptorFactory(24, aes.NewCipher))
-	RegisterEncryption("AES-256-CTR", newCTREncryptorFactory(32, aes.NewCipher))
+	RegisterEncryption("AES-128-CTR", newCTREncryptorFactory(16, aes.NewCipher)) //nolint:gomnd
+	RegisterEncryption("AES-192-CTR", newCTREncryptorFactory(24, aes.NewCipher)) //nolint:gomnd
+	RegisterEncryption("AES-256-CTR", newCTREncryptorFactory(32, aes.NewCipher)) //nolint:gomnd
 	RegisterEncryption("SALSA20", func(f *FormattingOptions) (Encryptor, error) {
 		var k [32]byte
 		copy(k[:], f.MasterKey[0:32])

+ 5 - 2
repo/content/content_id_to_bytes.go

@@ -4,12 +4,15 @@ import (
 	"encoding/hex"
 )
 
+// unpackedContentIDPrefix is a prefix for all content IDs that are stored unpacked in the index.
+const unpackedContentIDPrefix = 0xff
+
 func bytesToContentID(b []byte) ID {
 	if len(b) == 0 {
 		return ""
 	}
 
-	if b[0] == 0xff {
+	if b[0] == unpackedContentIDPrefix {
 		return ID(b[1:])
 	}
 
@@ -36,7 +39,7 @@ func contentIDToBytes(c ID) []byte {
 
 	b, err := hex.DecodeString(string(c[skip:]))
 	if err != nil {
-		return append([]byte{0xff}, []byte(c)...)
+		return append([]byte{unpackedContentIDPrefix}, []byte(c)...)
 	}
 
 	return append(prefix, b...)

+ 2 - 2
repo/content/content_index_recovery.go

@@ -61,7 +61,7 @@ func (p *packContentPostamble) toBytes() ([]byte, error) {
 	binary.BigEndian.PutUint32(buf[n:], checksum)
 	n += 4
 
-	if n > 255 {
+	if n > 255 { // nolint:gomnd
 		return nil, errors.Errorf("postamble too long: %v", n)
 	}
 
@@ -81,7 +81,7 @@ func findPostamble(b []byte) *packContentPostamble {
 
 	// length of postamble is the last byte
 	postambleLength := int(b[len(b)-1])
-	if postambleLength < 5 {
+	if postambleLength < 5 { // nolint:gomnd
 		// too short, must be at least 5 bytes (checksum + own length)
 		return nil
 	}

+ 2 - 1
repo/content/content_manager_iterate.go

@@ -64,6 +64,7 @@ func maybeParallelExecutor(parallel int, originalCallback IterateCallback) (Iter
 
 		go func() {
 			defer wg.Done()
+
 			for i := range workch {
 				if err := originalCallback(i); err != nil {
 					select {
@@ -196,7 +197,7 @@ func (bm *Manager) IteratePacks(options IteratePackOptions, callback IteratePack
 // packs shorter than the given threshold.
 func (bm *Manager) IterateContentInShortPacks(threshold int64, callback IterateCallback) error {
 	if threshold <= 0 {
-		threshold = int64(bm.maxPackSize) * 8 / 10
+		threshold = int64(bm.maxPackSize) * 8 / 10 // nolint:gomnd
 	}
 
 	return bm.IteratePacks(

+ 1 - 1
repo/content/content_manager_lock_free.go

@@ -66,7 +66,7 @@ func appendRandomBytes(b []byte, count int) ([]byte, error) {
 }
 
 func (bm *lockFreeManager) loadPackIndexesUnlocked(ctx context.Context) ([]IndexBlobInfo, bool, error) {
-	nextSleepTime := 100 * time.Millisecond
+	nextSleepTime := 100 * time.Millisecond //nolint:gomnd
 
 	for i := 0; i < indexLoadAttempts; i++ {
 		if err := ctx.Err(); err != nil {

+ 16 - 0
repo/content/content_manager_test.go

@@ -483,12 +483,14 @@ func TestParallelWrites(t *testing.T) {
 
 		go func() {
 			defer workersWG.Done()
+
 			for {
 				select {
 				case <-closeWorkers:
 					return
 				case <-time.After(1 * time.Nanosecond):
 					id := writeContentAndVerify(ctx, t, bm, seededRandomData(rand.Int(), 100)) //nolint:gosec
+
 					workerLock.RLock()
 					workerWritten[workerID] = append(workerWritten[workerID], id)
 					workerLock.RUnlock()
@@ -505,6 +507,7 @@ func TestParallelWrites(t *testing.T) {
 
 	go func() {
 		defer flusherWG.Done()
+
 		for {
 			select {
 			case <-closeFlusher:
@@ -515,14 +518,19 @@ func TestParallelWrites(t *testing.T) {
 
 				// capture snapshot of all content IDs while holding a writer lock
 				allWritten := map[ID]bool{}
+
 				workerLock.Lock()
+
 				for _, ww := range workerWritten {
 					for _, id := range ww {
 						allWritten[id] = true
 					}
 				}
+
 				workerLock.Unlock()
+
 				log.Infof("captured %v contents", len(allWritten))
+
 				if err := bm.Flush(ctx); err != nil {
 					t.Errorf("flush error: %v", err)
 				}
@@ -577,10 +585,13 @@ func TestFlushResumesWriters(t *testing.T) {
 
 	go func() {
 		defer writeWG.Done()
+
 		// start a write while flush is ongoing, the write will block on the condition variable
 		time.Sleep(1 * time.Second)
 		log.Infof("write started")
+
 		second = writeContentAndVerify(ctx, t, bm, []byte{3, 4, 5})
+
 		log.Infof("write finished")
 	}()
 
@@ -1159,6 +1170,7 @@ func TestContentReadAliasing(t *testing.T) {
 	}
 
 	contentData2[0]++
+
 	verifyContent(ctx, t, bm, id1, contentData)
 	bm.Flush(ctx)
 	verifyContent(ctx, t, bm, id1, contentData)
@@ -1301,8 +1313,10 @@ func fakeTimeNowWithAutoAdvance(t time.Time, dt time.Duration) func() time.Time
 	return func() time.Time {
 		mu.Lock()
 		defer mu.Unlock()
+
 		ret := t
 		t = t.Add(dt)
+
 		return ret
 	}
 }
@@ -1381,7 +1395,9 @@ func writeContentWithRetriesAndVerify(ctx context.Context, t *testing.T, bm *Man
 	contentID, err := bm.WriteContent(ctx, b, "")
 	for i := 0; err != nil && i < maxRetries; i++ {
 		retryCount++
+
 		log.Warningf("WriteContent failed %v, retrying", err)
+
 		contentID, err = bm.WriteContent(ctx, b, "")
 	}
 

+ 3 - 3
repo/content/format.go

@@ -35,7 +35,7 @@ type entry struct {
 }
 
 func (e *entry) parse(b []byte) error {
-	if len(b) < 20 {
+	if len(b) < entryFixedHeaderLength {
 		return errors.Errorf("invalid entry length: %v", len(b))
 	}
 
@@ -52,11 +52,11 @@ func (e *entry) IsDeleted() bool {
 }
 
 func (e *entry) TimestampSeconds() int64 {
-	return int64(e.timestampAndFlags >> 16)
+	return int64(e.timestampAndFlags >> 16) // nolint:gomnd
 }
 
 func (e *entry) PackedFormatVersion() byte {
-	return byte(e.timestampAndFlags >> 8)
+	return byte(e.timestampAndFlags >> 8) // nolint:gomnd
 }
 
 func (e *entry) PackFileLength() byte {

+ 3 - 7
repo/content/index.go

@@ -68,7 +68,7 @@ func (b *index) Iterate(prefix ID, cb func(Info) error) error {
 	entry := make([]byte, stride)
 
 	for i := startPos; i < b.hdr.entryCount; i++ {
-		n, err := b.readerAt.ReadAt(entry, int64(8+stride*i))
+		n, err := b.readerAt.ReadAt(entry, int64(packHeaderSize+stride*i))
 		if err != nil || n != len(entry) {
 			return errors.Wrap(err, "unable to read from index")
 		}
@@ -103,7 +103,7 @@ func (b *index) findEntryPosition(contentID ID) (int, error) {
 		if readErr != nil {
 			return false
 		}
-		_, err := b.readerAt.ReadAt(entryBuf, int64(8+stride*p))
+		_, err := b.readerAt.ReadAt(entryBuf, int64(packHeaderSize+stride*p))
 		if err != nil {
 			readErr = err
 			return false
@@ -133,7 +133,7 @@ func (b *index) findEntry(contentID ID) ([]byte, error) {
 	}
 
 	entryBuf := make([]byte, stride)
-	if _, err := b.readerAt.ReadAt(entryBuf, int64(8+stride*position)); err != nil {
+	if _, err := b.readerAt.ReadAt(entryBuf, int64(packHeaderSize+stride*position)); err != nil {
 		return nil, err
 	}
 
@@ -164,10 +164,6 @@ func (b *index) GetInfo(contentID ID) (*Info, error) {
 }
 
 func (b *index) entryToInfo(contentID ID, entryData []byte) (Info, error) {
-	if len(entryData) < 20 {
-		return Info{}, errors.Errorf("invalid entry length: %v", len(entryData))
-	}
-
 	var e entry
 	if err := e.parse(entryData); err != nil {
 		return Info{}, err

+ 3 - 1
repo/content/merged.go

@@ -5,6 +5,8 @@ import (
 	"errors"
 )
 
+const iterateParallelism = 16
+
 // mergedIndex is an implementation of Index that transparently merges returns from underlying Indexes.
 type mergedIndex []packIndex
 
@@ -73,7 +75,7 @@ func (h *nextInfoHeap) Pop() interface{} {
 }
 
 func iterateChan(prefix ID, ndx packIndex, done chan bool) <-chan Info {
-	ch := make(chan Info, 16)
+	ch := make(chan Info, iterateParallelism)
 
 	go func() {
 		defer close(ch)

+ 4 - 1
repo/crypto_key_derivation.go

@@ -28,7 +28,10 @@ func (f *formatBlob) deriveMasterKeyFromPassword(password string) ([]byte, error
 func deriveKeyFromMasterKey(masterKey, uniqueID, purpose []byte, length int) []byte {
 	key := make([]byte, length)
 	k := hkdf.New(sha256.New, masterKey, uniqueID, purpose)
-	io.ReadFull(k, key) //nolint:errcheck
+
+	if _, err := io.ReadFull(k, key); err != nil {
+		panic("unable to derive key from master key, this should never happen")
+	}
 
 	return key
 }

+ 18 - 10
repo/format_block.go

@@ -16,10 +16,12 @@ import (
 	"github.com/kopia/kopia/repo/blob"
 )
 
-const defaultFormatEncryption = "AES256_GCM"
-
 const (
+	defaultFormatEncryption         = "AES256_GCM"
+	lengthOfRecoverBlockLength      = 2 // number of bytes used to store recover block length
 	maxChecksummedFormatBytesLength = 65000
+	maxRecoverChunkLength           = 65536
+	minRecoverableChunkLength       = lengthOfRecoverBlockLength + 2
 	formatBlobChecksumSize          = sha256.Size
 )
 
@@ -95,20 +97,21 @@ func RecoverFormatBlob(ctx context.Context, st blob.Storage, blobID blob.ID, opt
 }
 
 func recoverFormatBlobWithLength(ctx context.Context, st blob.Storage, blobID blob.ID, length int64) ([]byte, error) {
-	chunkLength := int64(65536)
+	chunkLength := int64(maxRecoverChunkLength)
 	if chunkLength > length {
 		chunkLength = length
 	}
 
-	if chunkLength > 4 {
+	if chunkLength > minRecoverableChunkLength {
 		// try prefix
 		prefixChunk, err := st.GetBlob(ctx, blobID, 0, chunkLength)
 		if err != nil {
 			return nil, err
 		}
 
-		if l := int(prefixChunk[0]) + int(prefixChunk[1])<<8; l <= maxChecksummedFormatBytesLength && l+2 < len(prefixChunk) {
-			if b, ok := verifyFormatBlobChecksum(prefixChunk[2 : 2+l]); ok {
+		l := decodeInt16(prefixChunk)
+		if l <= maxChecksummedFormatBytesLength && l+lengthOfRecoverBlockLength < len(prefixChunk) {
+			if b, ok := verifyFormatBlobChecksum(prefixChunk[lengthOfRecoverBlockLength : lengthOfRecoverBlockLength+l]); ok {
 				return b, nil
 			}
 		}
@@ -119,8 +122,9 @@ func recoverFormatBlobWithLength(ctx context.Context, st blob.Storage, blobID bl
 			return nil, err
 		}
 
-		if l := int(suffixChunk[len(suffixChunk)-2]) + int(suffixChunk[len(suffixChunk)-1])<<8; l <= maxChecksummedFormatBytesLength && l+2 < len(suffixChunk) {
-			if b, ok := verifyFormatBlobChecksum(suffixChunk[len(suffixChunk)-2-l : len(suffixChunk)-2]); ok {
+		l = decodeInt16(suffixChunk[len(suffixChunk)-lengthOfRecoverBlockLength:])
+		if l <= maxChecksummedFormatBytesLength && l+lengthOfRecoverBlockLength < len(suffixChunk) {
+			if b, ok := verifyFormatBlobChecksum(suffixChunk[len(suffixChunk)-lengthOfRecoverBlockLength-l : len(suffixChunk)-lengthOfRecoverBlockLength]); ok {
 				return b, nil
 			}
 		}
@@ -129,6 +133,10 @@ func recoverFormatBlobWithLength(ctx context.Context, st blob.Storage, blobID bl
 	return nil, errFormatBlobNotFound
 }
 
+func decodeInt16(b []byte) int {
+	return int(b[0]) + int(b[1])<<8
+}
+
 func verifyFormatBlobChecksum(b []byte) ([]byte, bool) {
 	if len(b) < formatBlobChecksumSize {
 		return nil, false
@@ -264,9 +272,9 @@ func addFormatBlobChecksumAndLength(fb []byte) ([]byte, error) {
 	}
 
 	// return <length><checksummed-bytes><length>
-	result := append([]byte(nil), byte(l), byte(l>>8))
+	result := append([]byte(nil), byte(l), byte(l>>8)) //nolint:gomnd
 	result = append(result, checksummedFormatBytes...)
-	result = append(result, byte(l), byte(l>>8))
+	result = append(result, byte(l), byte(l>>8)) //nolint:gomnd
 
 	return result, nil
 }

+ 10 - 4
repo/initialize.go

@@ -18,6 +18,12 @@ var (
 	BuildVersion = "v0-unofficial"
 )
 
+const (
+	hmacSecretLength = 32
+	masterKeyLength  = 32
+	uniqueIDLength   = 32
+)
+
 // NewRepositoryOptions specifies options that apply to newly created repositories.
 // All fields are optional, when not provided, reasonable defaults will be used.
 type NewRepositoryOptions struct {
@@ -66,7 +72,7 @@ func formatBlobFromOptions(opt *NewRepositoryOptions) *formatBlob {
 		Tool:                   "https://github.com/kopia/kopia",
 		BuildInfo:              BuildInfo,
 		KeyDerivationAlgorithm: defaultKeyDerivationAlgorithm,
-		UniqueID:               applyDefaultRandomBytes(opt.UniqueID, 32),
+		UniqueID:               applyDefaultRandomBytes(opt.UniqueID, uniqueIDLength),
 		Version:                "1",
 		EncryptionAlgorithm:    defaultFormatEncryption,
 	}
@@ -84,9 +90,9 @@ func repositoryObjectFormatFromOptions(opt *NewRepositoryOptions) *repositoryObj
 			Version:     1,
 			Hash:        applyDefaultString(opt.BlockFormat.Hash, content.DefaultHash),
 			Encryption:  applyDefaultString(opt.BlockFormat.Encryption, content.DefaultEncryption),
-			HMACSecret:  applyDefaultRandomBytes(opt.BlockFormat.HMACSecret, 32),
-			MasterKey:   applyDefaultRandomBytes(opt.BlockFormat.MasterKey, 32),
-			MaxPackSize: applyDefaultInt(opt.BlockFormat.MaxPackSize, 20<<20), // 20 MB
+			HMACSecret:  applyDefaultRandomBytes(opt.BlockFormat.HMACSecret, hmacSecretLength), //nolint:gomnd
+			MasterKey:   applyDefaultRandomBytes(opt.BlockFormat.MasterKey, masterKeyLength),   //nolint:gomnd
+			MaxPackSize: applyDefaultInt(opt.BlockFormat.MaxPackSize, 20<<20),                  //nolint:gomnd
 		},
 		Format: object.Format{
 			Splitter: applyDefaultString(opt.ObjectFormat.Splitter, object.DefaultSplitter),

+ 1 - 1
repo/local_config.go

@@ -42,7 +42,7 @@ func (lc *LocalConfig) Save(w io.Writer) error {
 
 // loadConfigFromFile reads the local configuration from the specified file.
 func loadConfigFromFile(fileName string) (*LocalConfig, error) {
-	f, err := os.Open(fileName)
+	f, err := os.Open(fileName) //nolint:gosec
 	if err != nil {
 		return nil, err
 	}

+ 3 - 1
repo/manifest/manifest_manager.go

@@ -18,6 +18,8 @@ import (
 	"github.com/kopia/kopia/repo/content"
 )
 
+const manifestLoadParallelism = 8
+
 var log = repologging.Logger("kopia/manifest")
 
 // ErrNotFound is returned when the metadata item is not found.
@@ -299,7 +301,7 @@ func (m *Manager) loadCommittedContentsLocked(ctx context.Context) error {
 
 		err := m.b.IterateContents(content.IterateOptions{
 			Prefix:   ContentPrefix,
-			Parallel: 8,
+			Parallel: manifestLoadParallelism,
 		}, func(ci content.Info) error {
 			man, err := m.loadManifestContent(ctx, ci.ID)
 			if err != nil {

+ 3 - 5
repo/object/object_manager.go

@@ -4,7 +4,6 @@ package object
 import (
 	"bytes"
 	"context"
-	"encoding/binary"
 	"encoding/json"
 	"io"
 
@@ -238,12 +237,11 @@ func (om *Manager) newRawReader(ctx context.Context, objectID ID, assertLength i
 }
 
 func (om *Manager) decompress(b []byte) ([]byte, error) {
-	if len(b) < 4 {
-		return nil, errors.Errorf("invalid compression header")
+	compressorID, err := compression.IDFromHeader(b)
+	if err != nil {
+		return nil, errors.Wrap(err, "invalid compression header")
 	}
 
-	compressorID := compression.HeaderID(binary.BigEndian.Uint32(b[0:4]))
-
 	compressor := compression.ByHeaderID[compressorID]
 	if compressor == nil {
 		return nil, errors.Errorf("unsupported compressor %x", compressorID)

+ 1 - 1
repo/object/object_reader.go

@@ -106,7 +106,7 @@ func (r *objectReader) findChunkIndexForOffset(offset int64) (int, error) {
 	right := len(r.seekTable) - 1
 
 	for left <= right {
-		middle := (left + right) / 2
+		middle := (left + right) / 2 //nolint:gomnd
 
 		if offset < r.seekTable[middle].Start {
 			right = middle - 1

+ 14 - 14
repo/object/object_splitter.go

@@ -22,27 +22,27 @@ type SplitterFactory func() Splitter
 
 // splitterFactories is a map of registered splitter factories.
 var splitterFactories = map[string]SplitterFactory{
-	"FIXED-1M": newFixedSplitterFactory(megabytes(1)),
-	"FIXED-2M": newFixedSplitterFactory(megabytes(2)),
-	"FIXED-4M": newFixedSplitterFactory(megabytes(4)),
-	"FIXED-8M": newFixedSplitterFactory(megabytes(8)),
+	"FIXED-1M": newFixedSplitterFactory(megabytes(1)), //nolint:gomnd
+	"FIXED-2M": newFixedSplitterFactory(megabytes(2)), //nolint:gomnd
+	"FIXED-4M": newFixedSplitterFactory(megabytes(4)), //nolint:gomnd
+	"FIXED-8M": newFixedSplitterFactory(megabytes(8)), //nolint:gomnd
 
-	"DYNAMIC-1M-BUZHASH": newBuzHash32SplitterFactory(megabytes(1)),
-	"DYNAMIC-2M-BUZHASH": newBuzHash32SplitterFactory(megabytes(2)),
-	"DYNAMIC-4M-BUZHASH": newBuzHash32SplitterFactory(megabytes(4)),
-	"DYNAMIC-8M-BUZHASH": newBuzHash32SplitterFactory(megabytes(8)),
+	"DYNAMIC-1M-BUZHASH": newBuzHash32SplitterFactory(megabytes(1)), //nolint:gomnd
+	"DYNAMIC-2M-BUZHASH": newBuzHash32SplitterFactory(megabytes(2)), //nolint:gomnd
+	"DYNAMIC-4M-BUZHASH": newBuzHash32SplitterFactory(megabytes(4)), //nolint:gomnd
+	"DYNAMIC-8M-BUZHASH": newBuzHash32SplitterFactory(megabytes(8)), //nolint:gomnd
 
-	"DYNAMIC-1M-RABINKARP": newRabinKarp64SplitterFactory(megabytes(1)),
-	"DYNAMIC-2M-RABINKARP": newRabinKarp64SplitterFactory(megabytes(2)),
-	"DYNAMIC-4M-RABINKARP": newRabinKarp64SplitterFactory(megabytes(4)),
-	"DYNAMIC-8M-RABINKARP": newRabinKarp64SplitterFactory(megabytes(8)),
+	"DYNAMIC-1M-RABINKARP": newRabinKarp64SplitterFactory(megabytes(1)), //nolint:gomnd
+	"DYNAMIC-2M-RABINKARP": newRabinKarp64SplitterFactory(megabytes(2)), //nolint:gomnd
+	"DYNAMIC-4M-RABINKARP": newRabinKarp64SplitterFactory(megabytes(4)), //nolint:gomnd
+	"DYNAMIC-8M-RABINKARP": newRabinKarp64SplitterFactory(megabytes(8)), //nolint:gomnd
 
 	// handle deprecated legacy names to splitters of arbitrary size
-	"FIXED": newFixedSplitterFactory(4 << 20),
+	"FIXED": newFixedSplitterFactory(4 << 20), //nolint:gomnd
 
 	// we don't want to use old DYNAMIC splitter because of its license, so
 	// map this one to arbitrary buzhash32 (different)
-	"DYNAMIC": newBuzHash32SplitterFactory(megabytes(4)),
+	"DYNAMIC": newBuzHash32SplitterFactory(megabytes(4)), //nolint:gomnd
 }
 
 func megabytes(mb int) int {

+ 7 - 2
repo/object/object_writer.go

@@ -76,7 +76,9 @@ func (w *objectWriter) Write(data []byte) (n int, err error) {
 	w.totalLength += int64(dataLen)
 
 	for _, d := range data {
-		w.buffer.WriteByte(d)
+		if err := w.buffer.WriteByte(d); err != nil {
+			return 0, err
+		}
 
 		if w.splitter.ShouldSplit(d) {
 			if err := w.flushBuffer(); err != nil {
@@ -133,7 +135,10 @@ func (w *objectWriter) maybeCompressedContentBytes() (data []byte, isCompressed
 
 	var b2 bytes.Buffer
 
-	w.buffer.WriteTo(&b2) //nolint:errcheck
+	if _, err := w.buffer.WriteTo(&b2); err != nil {
+		return nil, false, err
+	}
+
 	w.buffer.Reset()
 
 	return b2.Bytes(), false, nil

+ 1 - 1
repo/object/objectid.go

@@ -63,7 +63,7 @@ func (i ID) Validate() error {
 	}
 
 	if contentID, _, ok := i.ContentID(); ok {
-		if len(contentID) < 2 {
+		if len(contentID) <= 1 {
 			return errors.Errorf("missing content ID")
 		}
 

+ 3 - 3
repo/object/splitter_buzhash32.go

@@ -1,4 +1,3 @@
-//nolint:dupl
 package object
 
 import (
@@ -36,12 +35,13 @@ func newBuzHash32SplitterFactory(avgSize int) SplitterFactory {
 	// avgSize must be a power of two, so 0b000001000...0000
 	// it just so happens that mask is avgSize-1 :)
 	mask := uint32(avgSize - 1)
-	maxSize := avgSize * 2
-	minSize := avgSize / 2
+	maxSize := avgSize * 2 // nolint:gomnd
+	minSize := avgSize / 2 // nolint:gomnd
 
 	return func() Splitter {
 		s := buzhash32.New()
 		s.Write(make([]byte, splitterSlidingWindowSize)) //nolint:errcheck
+
 		return &buzhash32Splitter{s, mask, 0, minSize, maxSize}
 	}
 }

+ 2 - 3
repo/object/splitter_rabinkarp64.go

@@ -1,4 +1,3 @@
-//nolint:dupl
 package object
 
 import "github.com/chmduquesne/rollinghash/rabinkarp64"
@@ -32,12 +31,12 @@ func (rs *rabinKarp64Splitter) ShouldSplit(b byte) bool {
 
 func newRabinKarp64SplitterFactory(avgSize int) SplitterFactory {
 	mask := uint64(avgSize - 1)
-	maxSize := avgSize * 2
-	minSize := avgSize / 2
+	minSize, maxSize := avgSize/2, avgSize*2 //nolint:gomnd
 
 	return func() Splitter {
 		s := rabinkarp64.New()
 		s.Write(make([]byte, splitterSlidingWindowSize)) //nolint:errcheck
+
 		return &rabinKarp64Splitter{s, mask, 0, minSize, maxSize}
 	}
 }

+ 4 - 2
repo/open.go

@@ -99,8 +99,10 @@ func OpenWithConfig(ctx context.Context, st blob.Storage, lc *LocalConfig, passw
 	caching.HMACSecret = deriveKeyFromMasterKey(masterKey, f.UniqueID, []byte("local-cache-integrity"), 16)
 
 	fo := &repoConfig.FormattingOptions
+
 	if fo.MaxPackSize == 0 {
-		fo.MaxPackSize = 20 << 20 // 20 MB
+		// legacy only, apply default
+		fo.MaxPackSize = 20 << 20 // nolint:gomnd
 	}
 
 	cm, err := content.NewManager(ctx, st, fo, caching, fb)
@@ -157,7 +159,7 @@ func readAndCacheFormatBlobBytes(ctx context.Context, st blob.Storage, cacheDire
 	cachedFile := filepath.Join(cacheDirectory, "kopia.repository")
 
 	if cacheDirectory != "" {
-		b, err := ioutil.ReadFile(cachedFile)
+		b, err := ioutil.ReadFile(cachedFile) //nolint:gosec
 		if err == nil {
 			// read from cache.
 			return b, nil

+ 4 - 4
site/cli2md/cli2md.go

@@ -122,7 +122,7 @@ func generateAppFlags(app *kingpin.ApplicationModel) error {
 	if err != nil {
 		return errors.Wrap(err, "unable to create common flags file")
 	}
-	defer f.Close()
+	defer f.Close() //nolint:errcheck
 
 	title := "Flags"
 	fmt.Fprintf(f, `---
@@ -138,7 +138,7 @@ weight: 3
 
 func generateCommands(app *kingpin.ApplicationModel, section string, weight int, advanced bool) error {
 	dir := filepath.Join(baseDir, section)
-	if err := os.MkdirAll(dir, 0755); err != nil {
+	if err := os.MkdirAll(dir, 0750); err != nil {
 		return err
 	}
 
@@ -146,7 +146,7 @@ func generateCommands(app *kingpin.ApplicationModel, section string, weight int,
 	if err != nil {
 		return errors.Wrap(err, "unable to create common flags file")
 	}
-	defer f.Close()
+	defer f.Close() //nolint:errcheck
 
 	title := section + " Commands"
 	fmt.Fprintf(f, `---
@@ -244,7 +244,7 @@ func generateSubcommandPage(fname string, cmd *kingpin.CmdModel) {
 	if err != nil {
 		log.Fatalf("unable to create page: %v", err)
 	}
-	defer f.Close()
+	defer f.Close() //nolint:errcheck
 
 	title := cmd.FullCommand
 	fmt.Fprintf(f, `---

+ 1 - 0
snapshot/gc/gc.go

@@ -1,3 +1,4 @@
+// Package gc implements garbage collection of contents that are no longer referenced through snapshots.
 package gc
 
 import (

+ 7 - 2
snapshot/manager.go

@@ -14,7 +14,11 @@ import (
 // ManifestType is the value of the "type" label for snapshot manifests
 const ManifestType = "snapshot"
 
-const typeKey = manifest.TypeLabelKey
+const (
+	typeKey = manifest.TypeLabelKey
+
+	loadSnapshotsConcurrency = 50 // number of snapshots to load in parallel
+)
 
 var log = kopialogging.Logger("kopia/snapshot")
 
@@ -102,7 +106,7 @@ func SaveSnapshot(ctx context.Context, rep *repo.Repository, man *Manifest) (man
 // LoadSnapshots efficiently loads and parses a given list of snapshot IDs.
 func LoadSnapshots(ctx context.Context, rep *repo.Repository, manifestIDs []manifest.ID) ([]*Manifest, error) {
 	result := make([]*Manifest, len(manifestIDs))
-	sem := make(chan bool, 50)
+	sem := make(chan bool, loadSnapshotsConcurrency)
 
 	for i, n := range manifestIDs {
 		sem <- true
@@ -115,6 +119,7 @@ func LoadSnapshots(ctx context.Context, rep *repo.Repository, manifestIDs []mani
 				log.Warningf("unable to parse snapshot manifest %v: %v", n, err)
 				return
 			}
+
 			result[i] = m
 		}(i, n)
 	}

+ 1 - 0
snapshot/policy/compression_policy.go

@@ -17,6 +17,7 @@ type CompressionPolicy struct {
 	MaxSize        int64            `json:"maxSize,omitempty"`
 }
 
+// CompressorForFile returns compression name to be used for compressing a given file according to policy, using attributes such as name or size.
 func (p *CompressionPolicy) CompressorForFile(e fs.File) compression.Name {
 	ext := filepath.Ext(e.Name())
 	size := e.Size()

+ 1 - 1
snapshot/policy/files_policy.go

@@ -12,7 +12,7 @@ type FilesPolicy struct {
 }
 
 // Merge applies default values from the provided policy.
-func (p *FilesPolicy) Merge(src FilesPolicy) { //nolint:hugeParam
+func (p *FilesPolicy) Merge(src FilesPolicy) {
 	if p.MaxFileSize == 0 {
 		p.MaxFileSize = src.MaxFileSize
 	}

+ 2 - 0
snapshot/policy/policy_tree.go

@@ -33,6 +33,7 @@ func (t *Tree) EffectivePolicy() *Policy {
 	return t.effective
 }
 
+// IsInherited returns true if the policy inherited to the given tree hode has been inherited from its parent.
 func (t *Tree) IsInherited() bool {
 	if t == nil {
 		return true
@@ -41,6 +42,7 @@ func (t *Tree) IsInherited() bool {
 	return t.inherited
 }
 
+// Child gets a subtree for an entry with a given name.
 func (t *Tree) Child(name string) *Tree {
 	if t == nil {
 		return nil

+ 7 - 7
snapshot/policy/retention_policy.go

@@ -125,7 +125,7 @@ func daysAgo(base time.Time, n int) time.Time {
 }
 
 func weeksAgo(base time.Time, n int) time.Time {
-	return base.AddDate(0, 0, -n*7)
+	return base.AddDate(0, 0, -n*7) //nolint:gomnd
 }
 
 func hoursAgo(base time.Time, n int) time.Time {
@@ -133,12 +133,12 @@ func hoursAgo(base time.Time, n int) time.Time {
 }
 
 var defaultRetentionPolicy = RetentionPolicy{
-	KeepLatest:  intPtr(1),
-	KeepHourly:  intPtr(48),
-	KeepDaily:   intPtr(7),
-	KeepWeekly:  intPtr(4),
-	KeepMonthly: intPtr(4),
-	KeepAnnual:  intPtr(0),
+	KeepLatest:  intPtr(1),  // nolint:gomnd
+	KeepHourly:  intPtr(48), // nolint:gomnd
+	KeepDaily:   intPtr(7),  // nolint:gomnd
+	KeepWeekly:  intPtr(4),  // nolint:gomnd
+	KeepMonthly: intPtr(4),  // nolint:gomnd
+	KeepAnnual:  intPtr(0),  // nolint:gomnd
 }
 
 // Merge applies default values from the provided policy.

+ 1 - 1
snapshot/snapshotfs/repofs.go

@@ -182,7 +182,7 @@ func withFileInfo(r object.Reader, e fs.Entry) fs.Reader {
 func DirectoryEntry(rep *repo.Repository, objectID object.ID, dirSummary *fs.DirectorySummary) fs.Directory {
 	d, _ := EntryFromDirEntry(rep, &snapshot.DirEntry{
 		Name:        "/",
-		Permissions: 0555,
+		Permissions: 0555, //nolint:gomnd
 		Type:        snapshot.EntryTypeDirectory,
 		ObjectID:    objectID,
 		DirSummary:  dirSummary,

+ 3 - 1
snapshot/snapshotfs/snapshot_tree_walker.go

@@ -11,6 +11,8 @@ import (
 	"github.com/kopia/kopia/internal/parallelwork"
 )
 
+const walkersPerCPU = 4
+
 // TreeWalker holds information for concurrently walking down FS trees specified
 // by their roots
 type TreeWalker struct {
@@ -70,7 +72,7 @@ func (w *TreeWalker) Run(ctx context.Context) error {
 // NewTreeWalker creates new tree walker.
 func NewTreeWalker() *TreeWalker {
 	return &TreeWalker{
-		Parallelism: 4 * runtime.NumCPU(),
+		Parallelism: walkersPerCPU * runtime.NumCPU(),
 		queue:       parallelwork.NewQueue(),
 	}
 }

+ 1 - 1
snapshot/snapshotfs/source_snapshots.go

@@ -76,7 +76,7 @@ func (s *sourceSnapshots) Readdir(ctx context.Context) (fs.Entries, error) {
 
 		de := &snapshot.DirEntry{
 			Name:        name,
-			Permissions: 0555,
+			Permissions: 0555, //nolint:gomnd
 			Type:        snapshot.EntryTypeDirectory,
 			ModTime:     m.StartTime,
 			ObjectID:    m.RootObjectID(),

+ 6 - 3
snapshot/snapshotfs/upload.go

@@ -23,6 +23,8 @@ import (
 	"github.com/kopia/kopia/snapshot/policy"
 )
 
+const copyBufferSize = 128 * 1024
+
 var log = kopialogging.Logger("kopia/upload")
 
 var errCancelled = errors.New("canceled")
@@ -152,7 +154,7 @@ func (u *Uploader) addDirProgress(length int64) {
 
 	if time.Now().After(u.nextProgressReportTime) {
 		shouldReport = true
-		u.nextProgressReportTime = time.Now().Add(100 * time.Millisecond)
+		u.nextProgressReportTime = time.Now().Add(100 * time.Millisecond) //nolint:gomnd
 	}
 
 	if c == u.currentDirTotalSize {
@@ -167,7 +169,7 @@ func (u *Uploader) addDirProgress(length int64) {
 }
 
 func (u *Uploader) copyWithProgress(dst io.Writer, src io.Reader, completed, length int64) (int64, error) {
-	uploadBuf := make([]byte, 128*1024) // 128 KB buffer
+	uploadBuf := make([]byte, copyBufferSize)
 
 	var written int64
 
@@ -408,7 +410,7 @@ func objectIDPercent(obj object.ID) int {
 	h := fnv.New32a()
 	io.WriteString(h, obj.String()) //nolint:errcheck
 
-	return int(h.Sum32() % 100)
+	return int(h.Sum32() % 100) //nolint:gomnd
 }
 
 func (u *Uploader) maybeIgnoreCachedEntry(ent fs.Entry) fs.Entry {
@@ -550,6 +552,7 @@ func (u *Uploader) processUploadWorkItems(workItems []*uploadWorkItem, dirManife
 		if result.err != nil {
 			if u.IgnoreFileErrors {
 				u.stats.ReadErrors++
+
 				log.Warningf("unable to hash file %q: %s, ignoring", it.entryRelativePath, result.err)
 
 				continue

+ 0 - 1
tests/end_to_end_test/end_to_end.go

@@ -1 +0,0 @@
-package endtoend

+ 2 - 0
tests/end_to_end_test/end_to_end_test.go

@@ -796,11 +796,13 @@ func TestSnapshotRestore(t *testing.T) {
 	// Attempt to restore snapshot with an already-existing target directory
 	// It should fail because the directory is not empty
 	_ = os.MkdirAll(restoreFailDir, 0700)
+
 	e.runAndExpectFailure(t, "snapshot", "restore", "--no-overwrite-directories", snapID, restoreDir)
 
 	// Attempt to restore snapshot with an already-existing target directory
 	// It should fail because target files already exist
 	_ = os.MkdirAll(restoreFailDir, 0700)
+
 	e.runAndExpectFailure(t, "snapshot", "restore", "--no-overwrite-files", snapID, restoreDir)
 }
 

+ 0 - 3
tests/repository_stress_test/repository_stress.go

@@ -1,3 +0,0 @@
-package repositorystress
-
-// dummy package

+ 0 - 3
tests/stress_test/stress.go

@@ -1,3 +0,0 @@
-package stress
-
-// dummy package

+ 3 - 2
tools/tools.mk

@@ -3,7 +3,7 @@ TOOLS_DIR:=$(SELF_DIR)/.tools
 uname := $(shell uname -s)
 
 # tool versions
-GOLANGCI_LINT_VERSION=v1.21.0
+GOLANGCI_LINT_VERSION=v1.22.2
 NODE_VERSION=12.13.0
 HUGO_VERSION=0.59.1
 
@@ -35,11 +35,12 @@ $(BINDATA_TOOL):
 	go build -o $(BINDATA_TOOL) github.com/go-bindata/go-bindata/go-bindata
 
 # linter
-LINTER_TOOL=$(TOOLS_DIR)/bin/golangci-lint
+LINTER_TOOL=$(TOOLS_DIR)/bin/golangci-lint-$(GOLANGCI_LINT_VERSION)
 
 $(LINTER_TOOL):
 	mkdir -p $(TOOLS_DIR)
 	curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(TOOLS_DIR)/bin/ $(GOLANGCI_LINT_VERSION)
+	ln -sf $(TOOLS_DIR)/bin/golangci-lint $(LINTER_TOOL)
 
 # hugo
 HUGO_TOOL=$(TOOLS_DIR)/hugo/hugo