Ver código fonte

chore(general): use non-formatting log variants when there is no formatting (#3931)

Use non-formatting logging functions for message without formatting.
For example, `log.Info("message")` instead of `log.Infof("message")`

Configure linter for printf-like functions
Julio López 1 ano atrás
pai
commit
1f9f9a1846
65 arquivos alterados com 188 adições e 176 exclusões
  1. 12 0
      .golangci.yml
  2. 1 1
      cli/auto_upgrade.go
  3. 1 1
      cli/command_blob_gc.go
  4. 4 4
      cli/command_blob_shards_modify.go
  5. 1 1
      cli/command_content_verify.go
  6. 1 1
      cli/command_index_recover.go
  7. 1 1
      cli/command_logs_cleanup.go
  8. 5 5
      cli/command_mount.go
  9. 1 1
      cli/command_policy_set_compression.go
  10. 6 6
      cli/command_policy_set_scheduling.go
  11. 1 1
      cli/command_policy_set_splitter.go
  12. 1 1
      cli/command_repository_connect.go
  13. 1 1
      cli/command_repository_connect_server.go
  14. 1 1
      cli/command_repository_create.go
  15. 2 2
      cli/command_repository_repair.go
  16. 7 7
      cli/command_repository_set_client.go
  17. 3 3
      cli/command_repository_set_parameters.go
  18. 5 5
      cli/command_repository_sync.go
  19. 1 1
      cli/command_repository_throttle_set.go
  20. 11 11
      cli/command_repository_upgrade.go
  21. 2 2
      cli/command_server_start.go
  22. 1 1
      cli/command_server_throttle_set.go
  23. 1 1
      cli/command_server_tls.go
  24. 2 2
      cli/command_snapshot_create.go
  25. 2 2
      cli/command_snapshot_fix.go
  26. 2 2
      cli/command_snapshot_list.go
  27. 1 1
      cli/command_snapshot_migrate.go
  28. 1 1
      cli/command_snapshot_verify.go
  29. 3 3
      cli/update_check.go
  30. 2 2
      internal/connection/reconnector.go
  31. 2 2
      internal/crypto/aesgcm.go
  32. 1 1
      internal/epoch/epoch_manager.go
  33. 1 1
      internal/mount/mount_webdav.go
  34. 2 2
      internal/passwordpersist/passwordpersist_keyring.go
  35. 1 1
      internal/pproflogging/pproflogging.go
  36. 11 11
      internal/providervalidation/providervalidation.go
  37. 3 3
      internal/repodiag/log_manager_test.go
  38. 1 1
      internal/server/api_estimate.go
  39. 4 4
      internal/server/server.go
  40. 3 3
      internal/server/source_manager.go
  41. 1 1
      internal/tlsutil/tlsutil.go
  42. 5 5
      internal/uitask/uitask_test.go
  43. 1 1
      repo/blob/rclone/rclone_storage.go
  44. 1 1
      repo/blob/sftp/sftp_storage.go
  45. 1 1
      repo/blob/storage.go
  46. 1 1
      repo/connect.go
  47. 1 1
      repo/content/committed_content_index.go
  48. 6 6
      repo/content/content_manager.go
  49. 1 1
      repo/content/content_manager_indexes.go
  50. 1 1
      repo/content/content_manager_iterate.go
  51. 1 1
      repo/content/indexblob/index_blob_manager_v0.go
  52. 2 2
      repo/grpc_repository_client.go
  53. 20 20
      repo/logging/logging_test.go
  54. 1 1
      repo/maintenance/blob_gc.go
  55. 1 1
      repo/maintenance/blob_retain.go
  56. 2 2
      repo/maintenance/content_rewrite.go
  57. 1 1
      repo/maintenance/index_compaction.go
  58. 17 17
      repo/maintenance/maintenance_run.go
  59. 1 1
      snapshot/restore/local_fs_output.go
  60. 1 1
      snapshot/restore/restore.go
  61. 1 1
      snapshot/restore/shallow_fs_output.go
  62. 1 1
      snapshot/restore/zip_output.go
  63. 1 1
      snapshot/snapshotfs/dir_rewriter.go
  64. 2 2
      snapshot/snapshotgc/gc.go
  65. 5 5
      tests/repository_stress_test/repository_stress_test.go

+ 12 - 0
.golangci.yml

@@ -55,8 +55,20 @@ linters-settings:
     local-prefixes: github.com/kopia/kopia
   govet:
     shadow: true
+    settings:
+      printf:
+        funcs:
+          - (*go.uber.org/zap.SugaredLogger).Debugf
+          - (*go.uber.org/zap.SugaredLogger).Infof
+          - (*go.uber.org/zap.SugaredLogger).Warnf
+          - (*go.uber.org/zap.SugaredLogger).Errorf
   lll:
     line-length: 256
+  loggercheck:
+    zap: true
+    no-printf-like: true
+    rules:
+      - (*go.uber.org/zap.SugaredLogger).With
   maligned:
     suggest-new: true
   misspell:

+ 1 - 1
cli/auto_upgrade.go

@@ -25,7 +25,7 @@ func maybeAutoUpgradeRepository(ctx context.Context, r repo.Repository) error {
 		return nil
 	}
 
-	log(ctx).Debugf("Setting default maintenance parameters...")
+	log(ctx).Debug("Setting default maintenance parameters...")
 
 	//nolint:wrapcheck
 	return repo.DirectWriteSession(ctx, dr, repo.WriteSessionOptions{

+ 1 - 1
cli/command_blob_gc.go

@@ -45,7 +45,7 @@ func (c *commandBlobGC) run(ctx context.Context, rep repo.DirectRepositoryWriter
 	}
 
 	if opts.DryRun && n > 0 {
-		log(ctx).Infof("Pass --delete=yes to delete.")
+		log(ctx).Info("Pass --delete=yes to delete.")
 	}
 
 	return nil

+ 4 - 4
cli/command_blob_shards_modify.go

@@ -139,7 +139,7 @@ func (c *commandBlobShardsModify) run(ctx context.Context) error {
 
 	dotShardsFile := filepath.Join(c.rootPath, sharded.ParametersFile)
 
-	log(ctx).Infof("Reading .shards file.")
+	log(ctx).Info("Reading .shards file.")
 
 	srcPar, err := c.getParameters(dotShardsFile)
 	if err != nil {
@@ -152,7 +152,7 @@ func (c *commandBlobShardsModify) run(ctx context.Context) error {
 		return err2
 	}
 
-	log(ctx).Infof("Moving files...")
+	log(ctx).Info("Moving files...")
 
 	if err2 := c.renameBlobs(ctx, c.rootPath, "", dstPar, &numMoved, &numUnchanged); err2 != nil {
 		return errors.Wrap(err2, "error processing directory")
@@ -165,14 +165,14 @@ func (c *commandBlobShardsModify) run(ctx context.Context) error {
 	}
 
 	log(ctx).Infof("Moved %v files, %v unchanged.", numMoved, numUnchanged)
-	log(ctx).Infof("Removing empty directories...")
+	log(ctx).Info("Removing empty directories...")
 
 	if _, err2 := c.removeEmptyDirs(ctx, c.rootPath, &numRemoved); err2 != nil {
 		return errors.Wrap(err2, "error removing empty directories")
 	}
 
 	log(ctx).Infof("Removed %v empty directories...", numRemoved)
-	log(ctx).Infof("Writing new .shards file.")
+	log(ctx).Info("Writing new .shards file.")
 
 	of, err := os.Create(dotShardsFile) //nolint:gosec
 	if err != nil {

+ 1 - 1
cli/command_content_verify.go

@@ -75,7 +75,7 @@ func (c *commandContentVerify) run(ctx context.Context, rep repo.DirectRepositor
 		c.getTotalContentCount(subctx, rep, &totalCount)
 	}()
 
-	log(ctx).Infof("Verifying all contents...")
+	log(ctx).Info("Verifying all contents...")
 
 	rep.DisableIndexRefresh()
 

+ 1 - 1
cli/command_index_recover.go

@@ -49,7 +49,7 @@ func (c *commandIndexRecover) run(ctx context.Context, rep repo.DirectRepository
 
 	defer func() {
 		if recoveredContentCount.Load() == 0 {
-			log(ctx).Infof("No contents recovered.")
+			log(ctx).Info("No contents recovered.")
 			return
 		}
 

+ 1 - 1
cli/command_logs_cleanup.go

@@ -46,7 +46,7 @@ func (c *commandLogsCleanup) run(ctx context.Context, rep repo.DirectRepositoryW
 			log(ctx).Infof("Deleted %v logs.", len(toDelete))
 		}
 	} else {
-		log(ctx).Infof("No logs found to delete.")
+		log(ctx).Info("No logs found to delete.")
 	}
 
 	return nil

+ 5 - 5
cli/command_mount.go

@@ -90,10 +90,10 @@ func (c *commandMount) run(ctx context.Context, rep repo.Repository) error {
 	log(ctx).Infof("Mounted '%v' on %v", c.mountObjectID, ctrl.MountPath())
 
 	if c.mountPoint == "*" && !c.mountPointBrowse {
-		log(ctx).Infof("HINT: Pass --browse to automatically open file browser.")
+		log(ctx).Info("HINT: Pass --browse to automatically open file browser.")
 	}
 
-	log(ctx).Infof("Press Ctrl-C to unmount.")
+	log(ctx).Info("Press Ctrl-C to unmount.")
 
 	if c.mountPointBrowse {
 		if err := open.Start(ctrl.MountPath()); err != nil {
@@ -110,7 +110,7 @@ func (c *commandMount) run(ctx context.Context, rep repo.Repository) error {
 
 	select {
 	case <-ctrlCPressed:
-		log(ctx).Infof("Unmounting...")
+		log(ctx).Info("Unmounting...")
 		// TODO: Consider lazy unmounting (-z) and polling till the filesystem is unmounted instead of failing with:
 		// "unmount error: exit status 1: fusermount: failed to unmount /tmp/kopia-mount719819963: Device or resource busy, try --help"
 		err := ctrl.Unmount(ctx)
@@ -119,13 +119,13 @@ func (c *commandMount) run(ctx context.Context, rep repo.Repository) error {
 		}
 
 	case <-ctrl.Done():
-		log(ctx).Infof("Unmounted.")
+		log(ctx).Info("Unmounted.")
 		return nil
 	}
 
 	// Reporting clean unmount in case of interrupt signal.
 	<-ctrl.Done()
-	log(ctx).Infof("Unmounted.")
+	log(ctx).Info("Unmounted.")
 
 	return nil
 }

+ 1 - 1
cli/command_policy_set_compression.go

@@ -54,7 +54,7 @@ func (c *policyCompressionFlags) setCompressionPolicyFromFlags(ctx context.Conte
 		*changeCount++
 
 		if v == inheritPolicyString {
-			log(ctx).Infof(" - resetting compression algorithm to default value inherited from parent")
+			log(ctx).Info(" - resetting compression algorithm to default value inherited from parent")
 
 			p.CompressorName = ""
 		} else {

+ 6 - 6
cli/command_policy_set_scheduling.go

@@ -69,7 +69,7 @@ func (c *policySchedulingFlags) setScheduleFromFlags(ctx context.Context, sp *po
 		sp.TimesOfDay = policy.SortAndDedupeTimesOfDay(timesOfDay)
 
 		if timesOfDay == nil {
-			log(ctx).Infof(" - resetting snapshot times of day to default")
+			log(ctx).Info(" - resetting snapshot times of day to default")
 		} else {
 			log(ctx).Infof(" - setting snapshot times to %v", timesOfDay)
 		}
@@ -79,7 +79,7 @@ func (c *policySchedulingFlags) setScheduleFromFlags(ctx context.Context, sp *po
 		ce := splitCronExpressions(c.policySetCron)
 
 		if ce == nil {
-			log(ctx).Infof(" - resetting cron snapshot times to default")
+			log(ctx).Info(" - resetting cron snapshot times to default")
 		} else {
 			log(ctx).Infof(" - setting cron snapshot times to %v", ce)
 		}
@@ -102,7 +102,7 @@ func (c *policySchedulingFlags) setScheduleFromFlags(ctx context.Context, sp *po
 
 		sp.Manual = false
 
-		log(ctx).Infof(" - resetting manual snapshot field to false\n")
+		log(ctx).Info(" - resetting manual snapshot field to false\n")
 	}
 
 	return nil
@@ -152,7 +152,7 @@ func (c *policySchedulingFlags) setManualFromFlags(ctx context.Context, sp *poli
 
 		sp.IntervalSeconds = 0
 
-		log(ctx).Infof(" - resetting snapshot interval to default\n")
+		log(ctx).Info(" - resetting snapshot interval to default\n")
 	}
 
 	if len(sp.TimesOfDay) > 0 {
@@ -160,7 +160,7 @@ func (c *policySchedulingFlags) setManualFromFlags(ctx context.Context, sp *poli
 
 		sp.TimesOfDay = nil
 
-		log(ctx).Infof(" - resetting snapshot times of day to default\n")
+		log(ctx).Info(" - resetting snapshot times of day to default\n")
 	}
 
 	if len(sp.Cron) > 0 {
@@ -168,7 +168,7 @@ func (c *policySchedulingFlags) setManualFromFlags(ctx context.Context, sp *poli
 
 		sp.Cron = nil
 
-		log(ctx).Infof(" - resetting cron snapshot times to default\n")
+		log(ctx).Info(" - resetting cron snapshot times to default\n")
 	}
 
 	*changeCount++

+ 1 - 1
cli/command_policy_set_splitter.go

@@ -22,7 +22,7 @@ func (c *policySplitterFlags) setup(cmd *kingpin.CmdClause) {
 func (c *policySplitterFlags) setSplitterPolicyFromFlags(ctx context.Context, p *policy.SplitterPolicy, changeCount *int) error {
 	if v := c.policySetSplitterAlgorithmOverride; v != "" {
 		if v == inheritPolicyString {
-			log(ctx).Infof(" - resetting splitter algorithm override to default value inherited from parent")
+			log(ctx).Info(" - resetting splitter algorithm override to default value inherited from parent")
 
 			p.Algorithm = ""
 		} else {

+ 1 - 1
cli/command_repository_connect.go

@@ -131,7 +131,7 @@ func (c *App) runConnectCommandWithStorageAndPassword(ctx context.Context, co *c
 		return errors.Wrap(err, "error connecting to repository")
 	}
 
-	log(ctx).Infof("Connected to repository.")
+	log(ctx).Info("Connected to repository.")
 	c.maybeInitializeUpdateCheck(ctx, co)
 
 	return nil

+ 1 - 1
cli/command_repository_connect_server.go

@@ -72,7 +72,7 @@ func (c *commandRepositoryConnectServer) run(ctx context.Context) error {
 		return errors.Wrap(err, "error connecting to API server")
 	}
 
-	log(ctx).Infof("Connected to repository API Server.")
+	log(ctx).Info("Connected to repository API Server.")
 	c.svc.maybeInitializeUpdateCheck(ctx, c.co)
 
 	return nil

+ 1 - 1
cli/command_repository_create.go

@@ -126,7 +126,7 @@ func (c *commandRepositoryCreate) runCreateCommandWithStorage(ctx context.Contex
 		return errors.Wrap(err, "getting password")
 	}
 
-	log(ctx).Infof("Initializing repository with:")
+	log(ctx).Info("Initializing repository with:")
 
 	if options.BlockFormat.Version != 0 {
 		log(ctx).Infof("  format version:      %v", options.BlockFormat.Version)

+ 2 - 2
cli/command_repository_repair.go

@@ -55,13 +55,13 @@ func packBlockPrefixes() []string {
 func (c *commandRepositoryRepair) runRepairCommandWithStorage(ctx context.Context, st blob.Storage) error {
 	switch c.repairCommandRecoverFormatBlob {
 	case "auto":
-		log(ctx).Infof("looking for format blob...")
+		log(ctx).Info("looking for format blob...")
 
 		var tmp gather.WriteBuffer
 		defer tmp.Close()
 
 		if err := st.GetBlob(ctx, format.KopiaRepositoryBlobID, 0, -1, &tmp); err == nil {
-			log(ctx).Infof("format blob already exists, not recovering, pass --recover-format=yes")
+			log(ctx).Info("format blob already exists, not recovering, pass --recover-format=yes")
 			return nil
 		}
 

+ 7 - 7
cli/command_repository_set_client.go

@@ -46,34 +46,34 @@ func (c *commandRepositorySetClient) run(ctx context.Context, rep repo.Repositor
 
 	if c.repoClientOptionsReadOnly {
 		if opt.ReadOnly {
-			log(ctx).Infof("Repository is already in read-only mode.")
+			log(ctx).Info("Repository is already in read-only mode.")
 		} else {
 			opt.ReadOnly = true
 			anyChange = true
 
-			log(ctx).Infof("Setting repository to read-only mode.")
+			log(ctx).Info("Setting repository to read-only mode.")
 		}
 	}
 
 	if c.repoClientOptionsReadWrite {
 		if !opt.ReadOnly {
-			log(ctx).Infof("Repository is already in read-write mode.")
+			log(ctx).Info("Repository is already in read-write mode.")
 		} else {
 			opt.ReadOnly = false
 			anyChange = true
 
-			log(ctx).Infof("Setting repository to read-write mode.")
+			log(ctx).Info("Setting repository to read-write mode.")
 		}
 	}
 
 	if c.repoClientOptionsPermissiveCacheLoading {
 		if !opt.PermissiveCacheLoading {
-			log(ctx).Infof("Repository fails on read of bad index blobs.")
+			log(ctx).Info("Repository fails on read of bad index blobs.")
 		} else {
 			opt.PermissiveCacheLoading = true
 			anyChange = true
 
-			log(ctx).Infof("Setting to load indicies into cache permissively.")
+			log(ctx).Info("Setting to load indicies into cache permissively.")
 		}
 	}
 
@@ -109,7 +109,7 @@ func (c *commandRepositorySetClient) run(ctx context.Context, rep repo.Repositor
 		opt.FormatBlobCacheDuration = -1
 		anyChange = true
 
-		log(ctx).Infof("Disabling format blob cache")
+		log(ctx).Info("Disabling format blob cache")
 	}
 
 	if !anyChange {

+ 3 - 3
cli/command_repository_set_parameters.go

@@ -131,7 +131,7 @@ func updateRepositoryParameters(
 	requiredFeatures []feature.Required,
 ) error {
 	if upgradeToEpochManager {
-		log(ctx).Infof("migrating current indexes to epoch format")
+		log(ctx).Info("migrating current indexes to epoch format")
 
 		if err := rep.ContentManager().PrepareUpgradeToIndexBlobManagerV1(ctx); err != nil {
 			return errors.Wrap(err, "error upgrading indexes")
@@ -166,7 +166,7 @@ func updateEpochParameters(mp *format.MutableParameters, anyChange, upgradeToEpo
 }
 
 func (c *commandRepositorySetParameters) disableBlobRetention(ctx context.Context, blobcfg *format.BlobStorageConfiguration, anyChange *bool) {
-	log(ctx).Infof("disabling blob retention")
+	log(ctx).Info("disabling blob retention")
 
 	blobcfg.RetentionMode = ""
 	blobcfg.RetentionPeriod = 0
@@ -247,7 +247,7 @@ func (c *commandRepositorySetParameters) run(ctx context.Context, rep repo.Direc
 		return errors.Wrap(err, "error updating repository parameters")
 	}
 
-	log(ctx).Infof("NOTE: Repository parameters updated, you must disconnect and re-connect all other Kopia clients.")
+	log(ctx).Info("NOTE: Repository parameters updated, you must disconnect and re-connect all other Kopia clients.")
 
 	return nil
 }

+ 5 - 5
cli/command_repository_sync.go

@@ -81,19 +81,19 @@ func (c *commandRepositorySyncTo) setup(svc advancedAppServices, parent commandP
 const syncProgressInterval = 300 * time.Millisecond
 
 func (c *commandRepositorySyncTo) runSyncWithStorage(ctx context.Context, src blob.Reader, dst blob.Storage) error {
-	log(ctx).Infof("Synchronizing repositories:")
+	log(ctx).Info("Synchronizing repositories:")
 	log(ctx).Infof("  Source:      %v", src.DisplayName())
 	log(ctx).Infof("  Destination: %v", dst.DisplayName())
 
 	if !c.repositorySyncDelete {
-		log(ctx).Infof("NOTE: By default no BLOBs are deleted, pass --delete to allow it.")
+		log(ctx).Info("NOTE: By default no BLOBs are deleted, pass --delete to allow it.")
 	}
 
 	if err := c.ensureRepositoriesHaveSameFormatBlob(ctx, src, dst); err != nil {
 		return err
 	}
 
-	log(ctx).Infof("Looking for BLOBs to synchronize...")
+	log(ctx).Info("Looking for BLOBs to synchronize...")
 
 	var (
 		inSyncBlobs int
@@ -162,7 +162,7 @@ func (c *commandRepositorySyncTo) runSyncWithStorage(ctx context.Context, src bl
 		return nil
 	}
 
-	log(ctx).Infof("Copying...")
+	log(ctx).Info("Copying...")
 
 	c.beginSyncProgress()
 
@@ -316,7 +316,7 @@ func (c *commandRepositorySyncTo) syncCopyBlob(ctx context.Context, m blob.Metad
 			// run again without SetModTime, emit a warning
 			opt.SetModTime = time.Time{}
 
-			log(ctx).Warnf("destination repository does not support preserving modification times")
+			log(ctx).Warn("destination repository does not support preserving modification times")
 
 			c.repositorySyncTimes = false
 

+ 1 - 1
cli/command_repository_throttle_set.go

@@ -30,7 +30,7 @@ func (c *commandRepositoryThrottleSet) run(ctx context.Context, rep repo.DirectR
 	}
 
 	if changeCount == 0 {
-		log(ctx).Infof("No changes made.")
+		log(ctx).Info("No changes made.")
 		return nil
 	}
 

+ 11 - 11
cli/command_repository_upgrade.go

@@ -137,7 +137,7 @@ func (c *commandRepositoryUpgrade) validateAction(ctx context.Context, rep repo.
 	}
 
 	if len(indexBlobInfos0) == 0 && len(indexBlobInfos1) > 0 {
-		log(ctx).Infof("old index is empty (possibly due to upgrade), nothing to compare against")
+		log(ctx).Info("old index is empty (possibly due to upgrade), nothing to compare against")
 		return nil
 	}
 
@@ -181,7 +181,7 @@ func (c *commandRepositoryUpgrade) validateAction(ctx context.Context, rep repo.
 
 	// no msgs means the check passed without finding anything wrong
 	if len(msgs) == 0 {
-		log(ctx).Infof("index validation succeeded")
+		log(ctx).Info("index validation succeeded")
 		return nil
 	}
 
@@ -241,7 +241,7 @@ func (c *commandRepositoryUpgrade) forceRollbackAction(ctx context.Context, rep
 		return errors.Wrap(err, "failed to rollback the upgrade")
 	}
 
-	log(ctx).Infof("Repository upgrade lock has been revoked.")
+	log(ctx).Info("Repository upgrade lock has been revoked.")
 
 	return nil
 }
@@ -340,7 +340,7 @@ func (c *commandRepositoryUpgrade) setLockIntent(ctx context.Context, rep repo.D
 		return nil
 	}
 
-	log(ctx).Infof("Repository upgrade lock intent has been placed.")
+	log(ctx).Info("Repository upgrade lock intent has been placed.")
 
 	// skip all other phases after this step
 	if c.lockOnly {
@@ -362,7 +362,7 @@ func (c *commandRepositoryUpgrade) drainOrCommit(ctx context.Context, rep repo.D
 	}
 
 	if mp.EpochParameters.Enabled {
-		log(ctx).Infof("Repository indices have already been migrated to the epoch format, no need to drain other clients")
+		log(ctx).Info("Repository indices have already been migrated to the epoch format, no need to drain other clients")
 
 		l, err := rep.FormatManager().GetUpgradeLockIntent(ctx)
 		if err != nil {
@@ -374,7 +374,7 @@ func (c *commandRepositoryUpgrade) drainOrCommit(ctx context.Context, rep repo.D
 			return nil
 		}
 
-		log(ctx).Infof("Continuing to drain since advance notice has been set")
+		log(ctx).Info("Continuing to drain since advance notice has been set")
 	}
 
 	if err := c.drainAllClients(ctx, rep); err != nil {
@@ -382,7 +382,7 @@ func (c *commandRepositoryUpgrade) drainOrCommit(ctx context.Context, rep repo.D
 	}
 	// we need to reopen the repository after this point
 
-	log(ctx).Infof("Successfully drained all repository clients, the lock has been fully-established now.")
+	log(ctx).Info("Successfully drained all repository clients, the lock has been fully-established now.")
 
 	return nil
 }
@@ -455,7 +455,7 @@ func (c *commandRepositoryUpgrade) upgrade(ctx context.Context, rep repo.DirectR
 	mp.EpochParameters = epoch.DefaultParameters()
 	mp.IndexVersion = 2
 
-	log(ctx).Infof("migrating current indices to epoch format")
+	log(ctx).Info("migrating current indices to epoch format")
 
 	if uerr := rep.ContentManager().PrepareUpgradeToIndexBlobManagerV1(ctx); uerr != nil {
 		return errors.Wrap(uerr, "error upgrading indices")
@@ -473,7 +473,7 @@ func (c *commandRepositoryUpgrade) upgrade(ctx context.Context, rep repo.DirectR
 
 	// we need to reopen the repository after this point
 
-	log(ctx).Infof("Repository indices have been upgraded.")
+	log(ctx).Info("Repository indices have been upgraded.")
 
 	return nil
 }
@@ -485,7 +485,7 @@ func (c *commandRepositoryUpgrade) upgrade(ctx context.Context, rep repo.DirectR
 // after this phase.
 func (c *commandRepositoryUpgrade) commitUpgrade(ctx context.Context, rep repo.DirectRepositoryWriter) error {
 	if c.commitMode == commitModeNeverCommit {
-		log(ctx).Infof("Commit mode is set to 'never'.  Skipping commit.")
+		log(ctx).Info("Commit mode is set to 'never'.  Skipping commit.")
 		return nil
 	}
 
@@ -494,7 +494,7 @@ func (c *commandRepositoryUpgrade) commitUpgrade(ctx context.Context, rep repo.D
 	}
 	// we need to reopen the repository after this point
 
-	log(ctx).Infof("Repository has been successfully upgraded.")
+	log(ctx).Info("Repository has been successfully upgraded.")
 
 	return nil
 }

+ 2 - 2
cli/command_server_start.go

@@ -214,7 +214,7 @@ func (c *commandServerStart) run(ctx context.Context) error {
 			return errors.Wrap(httpServer.Close(), "close")
 		}
 
-		log(ctx2).Debugf("graceful shutdown succeeded")
+		log(ctx2).Debug("graceful shutdown succeeded")
 
 		return nil
 	}
@@ -266,7 +266,7 @@ func (c *commandServerStart) run(ctx context.Context) error {
 }
 
 func shutdownHTTPServer(ctx context.Context, httpServer *http.Server) {
-	log(ctx).Infof("Shutting down HTTP server ...")
+	log(ctx).Info("Shutting down HTTP server ...")
 
 	if err := httpServer.Shutdown(ctx); err != nil {
 		log(ctx).Errorln("unable to shut down HTTP server:", err)

+ 1 - 1
cli/command_server_throttle_set.go

@@ -38,7 +38,7 @@ func (c *commandServerThrottleSet) run(ctx context.Context, cli *apiclient.Kopia
 	}
 
 	if changeCount == 0 {
-		log(ctx).Infof("No changes made.")
+		log(ctx).Info("No changes made.")
 		return nil
 	}
 

+ 1 - 1
cli/command_server_tls.go

@@ -174,6 +174,6 @@ func (c *commandServerStart) startServerWithOptionalTLSAndListener(ctx context.C
 
 func (c *commandServerStart) showServerUIPrompt(ctx context.Context) {
 	if c.serverStartUI {
-		log(ctx).Infof("Open the address above in a web browser to use the UI.")
+		log(ctx).Info("Open the address above in a web browser to use the UI.")
 	}
 }

+ 2 - 2
cli/command_snapshot_create.go

@@ -129,7 +129,7 @@ func (c *commandSnapshotCreate) run(ctx context.Context, rep repo.RepositoryWrit
 
 	for _, snapshotDir := range sources {
 		if u.IsCanceled() {
-			log(ctx).Infof("Upload canceled")
+			log(ctx).Info("Upload canceled")
 			break
 		}
 
@@ -311,7 +311,7 @@ func (c *commandSnapshotCreate) snapshotSingleSource(ctx context.Context, fsEntr
 	ignoreIdenticalSnapshot := policyTree.EffectivePolicy().RetentionPolicy.IgnoreIdenticalSnapshots.OrDefault(false)
 	if ignoreIdenticalSnapshot && len(previous) > 0 {
 		if previous[0].RootObjectID() == manifest.RootObjectID() {
-			log(ctx).Infof("\n Not saving snapshot because no files have been changed since previous snapshot")
+			log(ctx).Info("\n Not saving snapshot because no files have been changed since previous snapshot")
 			return nil
 		}
 	}

+ 2 - 2
cli/command_snapshot_fix.go

@@ -132,7 +132,7 @@ func (c *commonRewriteSnapshots) rewriteMatchingSnapshots(ctx context.Context, r
 	}
 
 	if updatedSnapshots == 0 {
-		log(ctx).Infof("No changes.")
+		log(ctx).Info("No changes.")
 	}
 
 	return nil
@@ -183,7 +183,7 @@ func (c *commonRewriteSnapshots) listManifestIDs(ctx context.Context, rep repo.R
 	}
 
 	if len(manifests) == 0 {
-		log(ctx).Infof("Listing all snapshots...")
+		log(ctx).Info("Listing all snapshots...")
 
 		m, err := snapshot.ListSnapshotManifests(ctx, rep, nil, nil)
 		if err != nil {

+ 2 - 2
cli/command_snapshot_list.go

@@ -233,7 +233,7 @@ func (c *commandSnapshotList) outputManifestGroups(ctx context.Context, rep repo
 	}
 
 	if !anyOutput && !c.snapshotListShowAll && len(manifests) > 0 {
-		log(ctx).Infof("No snapshots found. Pass --all to show snapshots from all users/hosts.\n")
+		log(ctx).Info("No snapshots found. Pass --all to show snapshots from all users/hosts.\n")
 	}
 
 	return nil
@@ -290,7 +290,7 @@ func (c *commandSnapshotList) outputManifestFromSingleSource(ctx context.Context
 
 		ohid, ok := ent.(object.HasObjectID)
 		if !ok {
-			log(ctx).Errorf("entry does not have object ID: %v", ent, err)
+			log(ctx).Errorf("entry for '%s' does not have object ID: %v", ent.Name(), err)
 			return nil
 		}
 

+ 1 - 1
cli/command_snapshot_migrate.go

@@ -131,7 +131,7 @@ func (c *commandSnapshotMigrate) run(ctx context.Context, destRepo repo.Reposito
 	wg.Wait()
 	c.svc.getProgress().FinishShared()
 	c.out.printStderr("\r\n")
-	log(ctx).Infof("Migration finished.")
+	log(ctx).Info("Migration finished.")
 
 	return nil
 }

+ 1 - 1
cli/command_snapshot_verify.go

@@ -45,7 +45,7 @@ func (c *commandSnapshotVerify) setup(svc appServices, parent commandParent) {
 
 func (c *commandSnapshotVerify) run(ctx context.Context, rep repo.Repository) error {
 	if c.verifyCommandAllSources {
-		log(ctx).Errorf("DEPRECATED: --all-sources flag has no effect and is the default when no sources are provided.")
+		log(ctx).Error("DEPRECATED: --all-sources flag has no effect and is the default when no sources are provided.")
 	}
 
 	if dr, ok := rep.(repo.DirectRepositoryWriter); ok {

+ 3 - 3
cli/update_check.go

@@ -92,7 +92,7 @@ func (c *App) maybeInitializeUpdateCheck(ctx context.Context, co *connectOptions
 			NextNotifyTime: clock.Now().Add(c.initialUpdateCheckDelay),
 		}
 		if err := c.writeUpdateState(us); err != nil {
-			log(ctx).Debugf("error initializing update state")
+			log(ctx).Debug("error initializing update state")
 			return
 		}
 
@@ -199,7 +199,7 @@ func (c *App) maybeCheckGithub(ctx context.Context, us *updateState) error {
 		return nil
 	}
 
-	log(ctx).Debugf("time for next update check has been reached")
+	log(ctx).Debug("time for next update check has been reached")
 
 	// before we check for update, write update state file again, so if this fails
 	// we won't bother GitHub for a while
@@ -245,7 +245,7 @@ func (c *App) maybePrintUpdateNotification(ctx context.Context) {
 	}
 
 	if updatedVersion == "" {
-		log(ctx).Debugf("no updated version available")
+		log(ctx).Debug("no updated version available")
 		return
 	}
 

+ 2 - 2
internal/connection/reconnector.go

@@ -42,7 +42,7 @@ func (r *Reconnector) GetOrOpenConnection(ctx context.Context) (Connection, erro
 	defer r.mu.Unlock()
 
 	if r.activeConnection == nil {
-		log(ctx).Debugf("establishing new connection...")
+		log(ctx).Debug("establishing new connection...")
 
 		conn, err := r.connector.NewConnection(ctx)
 		if err != nil {
@@ -102,7 +102,7 @@ func (r *Reconnector) CloseActiveConnection(ctx context.Context) {
 	r.activeConnection = nil
 
 	if c != nil {
-		log(ctx).Debugf("closing active connection.")
+		log(ctx).Debug("closing active connection.")
 
 		if err := c.Close(); err != nil {
 			log(ctx).Errorf("error closing active connection: %v", err)

+ 2 - 2
internal/crypto/aesgcm.go

@@ -65,7 +65,7 @@ func DecryptAes256Gcm(data, masterKey, salt []byte) ([]byte, error) {
 
 	data = append([]byte(nil), data...)
 	if len(data) < aead.NonceSize() {
-		return nil, errors.Errorf("invalid encrypted payload, too short")
+		return nil, errors.New("invalid encrypted payload, too short")
 	}
 
 	nonce := data[0:aead.NonceSize()]
@@ -73,7 +73,7 @@ func DecryptAes256Gcm(data, masterKey, salt []byte) ([]byte, error) {
 
 	plainText, err := aead.Open(payload[:0], nonce, payload, authData)
 	if err != nil {
-		return nil, errors.Errorf("unable to decrypt repository blob, invalid credentials?")
+		return nil, errors.New("unable to decrypt repository blob, invalid credentials?")
 	}
 
 	return plainText, nil

+ 1 - 1
internal/epoch/epoch_manager.go

@@ -770,7 +770,7 @@ func (e *Manager) GetCompleteIndexSet(ctx context.Context, maxEpoch int) ([]blob
 		// indexes that are still treated as authoritative according to old committed state.
 		//
 		// Retrying will re-examine the state of the world and re-do the logic.
-		e.log.Debugf("GetCompleteIndexSet took too long, retrying to ensure correctness")
+		e.log.Debug("GetCompleteIndexSet took too long, retrying to ensure correctness")
 		atomic.AddInt32(e.getCompleteIndexSetTooSlow, 1)
 	}
 }

+ 1 - 1
internal/mount/mount_webdav.go

@@ -30,7 +30,7 @@ func webdavServerLogger(r *http.Request, err error) {
 // DirectoryWebDAV exposes the provided filesystem directory via WebDAV on a random port on localhost
 // and returns a controller.
 func DirectoryWebDAV(ctx context.Context, entry fs.Directory) (Controller, error) {
-	log(ctx).Debugf("creating webdav server...")
+	log(ctx).Debug("creating webdav server...")
 
 	mux := http.NewServeMux()
 

+ 2 - 2
internal/passwordpersist/passwordpersist_keyring.go

@@ -38,13 +38,13 @@ func (keyringStrategy) GetPassword(ctx context.Context, configFile string) (stri
 }
 
 func (keyringStrategy) PersistPassword(ctx context.Context, configFile, password string) error {
-	log(ctx).Debugf("saving password to OS keyring...")
+	log(ctx).Debug("saving password to OS keyring...")
 
 	err := keyring.Set(getKeyringItemID(configFile), keyringUsername(ctx), password)
 
 	switch {
 	case err == nil:
-		log(ctx).Debugf("Saved password in OS keyring")
+		log(ctx).Debug("Saved password in OS keyring")
 		return nil
 
 	case errors.Is(err, keyring.ErrUnsupportedPlatform):

+ 1 - 1
internal/pproflogging/pproflogging.go

@@ -220,7 +220,7 @@ func setupProfileFractions(ctx context.Context, profileBuffers map[ProfileName]*
 
 		n1, err := strconv.Atoi(s)
 		if err != nil {
-			log(ctx).With("cause", err).Warnf("invalid PPROF rate, %q, for %s: %v", s, k)
+			log(ctx).With("cause", err).Warnf("invalid PPROF rate, %q, for '%s'", s, k)
 			continue
 		}
 

+ 11 - 11
internal/providervalidation/providervalidation.go

@@ -125,7 +125,7 @@ func ValidateProvider(ctx context.Context, st0 blob.Storage, opt Options) error
 	prefix1 := uberPrefix + "a"
 	prefix2 := uberPrefix + "b"
 
-	log(ctx).Infof("Validating storage capacity and usage")
+	log(ctx).Info("Validating storage capacity and usage")
 
 	c, err := st.pickOne().GetCapacity(ctx)
 
@@ -138,13 +138,13 @@ func ValidateProvider(ctx context.Context, st0 blob.Storage, opt Options) error
 		return errors.Wrapf(err, "unexpected error")
 	}
 
-	log(ctx).Infof("Validating blob list responses")
+	log(ctx).Info("Validating blob list responses")
 
 	if err := verifyBlobCount(ctx, st.pickOne(), uberPrefix, 0); err != nil {
 		return errors.Wrap(err, "invalid blob count")
 	}
 
-	log(ctx).Infof("Validating non-existent blob responses")
+	log(ctx).Info("Validating non-existent blob responses")
 
 	var out gather.WriteBuffer
 	defer out.Close()
@@ -173,7 +173,7 @@ func ValidateProvider(ctx context.Context, st0 blob.Storage, opt Options) error
 		return errors.Wrap(err, "error writing blob #1")
 	}
 
-	log(ctx).Infof("Validating conditional creates...")
+	log(ctx).Info("Validating conditional creates...")
 
 	err2 := st.pickOne().PutBlob(ctx, prefix1+"1", gather.FromSlice([]byte{99}), blob.PutOptions{DoNotRecreate: true})
 
@@ -187,7 +187,7 @@ func ValidateProvider(ctx context.Context, st0 blob.Storage, opt Options) error
 		return errors.Errorf("unexpected error returned from PutBlob with DoNotRecreate: %v", err2)
 	}
 
-	log(ctx).Infof("Validating list responses...")
+	log(ctx).Info("Validating list responses...")
 
 	if err := verifyBlobCount(ctx, st.pickOne(), uberPrefix, 1); err != nil {
 		return errors.Wrap(err, "invalid uber blob count")
@@ -201,7 +201,7 @@ func ValidateProvider(ctx context.Context, st0 blob.Storage, opt Options) error
 		return errors.Wrap(err, "invalid blob count with prefix 2")
 	}
 
-	log(ctx).Infof("Validating partial reads...")
+	log(ctx).Info("Validating partial reads...")
 
 	partialBlobCases := []struct {
 		offset int64
@@ -225,7 +225,7 @@ func ValidateProvider(ctx context.Context, st0 blob.Storage, opt Options) error
 		}
 	}
 
-	log(ctx).Infof("Validating full reads...")
+	log(ctx).Info("Validating full reads...")
 
 	// read full blob
 	err2 = st.pickOne().GetBlob(ctx, prefix1+"1", 0, -1, &out)
@@ -237,7 +237,7 @@ func ValidateProvider(ctx context.Context, st0 blob.Storage, opt Options) error
 		return errors.Errorf("got unexpected data after reading partial blob: %x, wanted %x", got, want)
 	}
 
-	log(ctx).Infof("Validating metadata...")
+	log(ctx).Info("Validating metadata...")
 
 	// get metadata for non-existent blob
 	bm, err2 := st.pickOne().GetMetadata(ctx, prefix1+"1")
@@ -272,7 +272,7 @@ func ValidateProvider(ctx context.Context, st0 blob.Storage, opt Options) error
 		return errors.Wrap(err, "error validating concurrency")
 	}
 
-	log(ctx).Infof("All good.")
+	log(ctx).Info("All good.")
 
 	return nil
 }
@@ -479,12 +479,12 @@ func (c *concurrencyTest) run(ctx context.Context) error {
 }
 
 func cleanupAllBlobs(ctx context.Context, st blob.Storage, prefix blob.ID) {
-	log(ctx).Infof("Cleaning up temporary data...")
+	log(ctx).Info("Cleaning up temporary data...")
 
 	if err := st.ListBlobs(ctx, prefix, func(bm blob.Metadata) error {
 		return errors.Wrapf(st.DeleteBlob(ctx, bm.BlobID), "error deleting blob %v", bm.BlobID)
 	}); err != nil {
-		log(ctx).Debugf("error cleaning up")
+		log(ctx).Debug("error cleaning up")
 	}
 }
 

+ 3 - 3
internal/repodiag/log_manager_test.go

@@ -22,7 +22,7 @@ func TestLogManager_Enabled(t *testing.T) {
 
 	lm.Enable()
 	l := lm.NewLogger()
-	l.Infof("hello")
+	l.Info("hello")
 
 	require.Empty(t, d)
 	l.Sync()
@@ -74,7 +74,7 @@ func TestLogManager_NotEnabled(t *testing.T) {
 	lm := repodiag.NewLogManager(ctx, w)
 
 	l := lm.NewLogger()
-	l.Infof("hello")
+	l.Info("hello")
 
 	require.Empty(t, d)
 	l.Sync()
@@ -90,6 +90,6 @@ func TestLogManager_Null(t *testing.T) {
 	lm.Enable()
 
 	l := lm.NewLogger()
-	l.Infof("hello")
+	l.Info("hello")
 	l.Sync()
 }

+ 1 - 1
internal/server/api_estimate.go

@@ -84,7 +84,7 @@ func logBucketSamples(ctx context.Context, buckets snapshotfs.SampleBuckets, pre
 		hasAny = true
 
 		if showExamples && len(bucket.Examples) > 0 {
-			log(ctx).Infof("Examples:")
+			log(ctx).Info("Examples:")
 
 			for _, sample := range bucket.Examples {
 				log(ctx).Infof(" - %v\n", sample)

+ 4 - 4
internal/server/server.go

@@ -474,7 +474,7 @@ func handleFlush(ctx context.Context, rc requestContext) (interface{}, *apiError
 }
 
 func handleShutdown(ctx context.Context, rc requestContext) (interface{}, *apiError) {
-	log(ctx).Infof("shutting down due to API request")
+	log(ctx).Info("shutting down due to API request")
 
 	rc.srv.requestShutdown(ctx)
 
@@ -552,9 +552,9 @@ func (s *Server) SetRepository(ctx context.Context, rep repo.Repository) error {
 		s.unmountAllLocked(ctx)
 
 		// close previous source managers
-		log(ctx).Debugf("stopping all source managers")
+		log(ctx).Debug("stopping all source managers")
 		s.stopAllSourceManagersLocked(ctx)
-		log(ctx).Debugf("stopped all source managers")
+		log(ctx).Debug("stopped all source managers")
 
 		if err := s.rep.Close(ctx); err != nil {
 			return errors.Wrap(err, "unable to close previous repository")
@@ -856,7 +856,7 @@ func (s *Server) InitRepositoryAsync(ctx context.Context, mode string, initializ
 		}
 
 		if rep == nil {
-			log(ctx).Infof("Repository not configured.")
+			log(ctx).Info("Repository not configured.")
 		}
 
 		if err = s.SetRepository(ctx, rep); err != nil {

+ 3 - 3
internal/server/source_manager.go

@@ -250,7 +250,7 @@ func (s *sourceManager) cancel(ctx context.Context) serverapi.SourceActionRespon
 	log(ctx).Debugw("cancel triggered via API", "source", s.src)
 
 	if u := s.currentUploader(); u != nil {
-		log(ctx).Infof("canceling current upload")
+		log(ctx).Info("canceling current upload")
 		u.Cancel()
 	}
 
@@ -265,7 +265,7 @@ func (s *sourceManager) pause(ctx context.Context) serverapi.SourceActionRespons
 	s.sourceMutex.Unlock()
 
 	if u := s.currentUploader(); u != nil {
-		log(ctx).Infof("canceling current upload")
+		log(ctx).Info("canceling current upload")
 		u.Cancel()
 	}
 
@@ -371,7 +371,7 @@ func (s *sourceManager) snapshotInternal(ctx context.Context, ctrl uitask.Contro
 		ignoreIdenticalSnapshot := policyTree.EffectivePolicy().RetentionPolicy.IgnoreIdenticalSnapshots.OrDefault(false)
 		if ignoreIdenticalSnapshot && len(manifestsSinceLastCompleteSnapshot) > 0 {
 			if manifestsSinceLastCompleteSnapshot[0].RootObjectID() == manifest.RootObjectID() {
-				log(ctx).Debugf("Not saving snapshot because no files have been changed since previous snapshot")
+				log(ctx).Debug("Not saving snapshot because no files have been changed since previous snapshot")
 				return nil
 			}
 		}

+ 1 - 1
internal/tlsutil/tlsutil.go

@@ -33,7 +33,7 @@ var log = logging.Module("tls")
 
 // GenerateServerCertificate generates random TLS certificate and key.
 func GenerateServerCertificate(ctx context.Context, keySize int, certValid time.Duration, names []string) (*x509.Certificate, *rsa.PrivateKey, error) {
-	log(ctx).Debugf("generating new TLS certificate")
+	log(ctx).Debug("generating new TLS certificate")
 
 	priv, err := rsa.GenerateKey(rand.Reader, keySize)
 	if err != nil {

+ 5 - 5
internal/uitask/uitask_test.go

@@ -75,15 +75,15 @@ func testUITaskInternal(t *testing.T, ctx context.Context, m *uitask.Manager) {
 		})
 
 		verifyTaskLog(t, m, tid1a, nil)
-		log(ctx).Debugf("first")
-		ignoredLog(ctx).Debugf("this is ignored")
-		log(ctx).Infof("iii")
+		log(ctx).Debug("first")
+		ignoredLog(ctx).Debug("this is ignored")
+		log(ctx).Info("iii")
 		verifyTaskLog(t, m, tid1a, []string{
 			"first",
 			"iii",
 		})
-		log(ctx).Infof("www")
-		log(ctx).Errorf("eee")
+		log(ctx).Info("www")
+		log(ctx).Error("eee")
 
 		// 'first' has aged out
 		verifyTaskLog(t, m, tid1a, []string{

+ 1 - 1
repo/blob/rclone/rclone_storage.go

@@ -105,7 +105,7 @@ func (r *rcloneStorage) Close(ctx context.Context) error {
 
 	// this will kill rclone process if any
 	if r.cmd != nil && r.cmd.Process != nil {
-		log(ctx).Debugf("killing rclone")
+		log(ctx).Debug("killing rclone")
 		r.cmd.Process.Kill() //nolint:errcheck
 		r.cmd.Wait()         //nolint:errcheck
 	}

+ 1 - 1
repo/blob/sftp/sftp_storage.go

@@ -418,7 +418,7 @@ func getSigner(opt *Options) (ssh.Signer, error) {
 }
 
 func createSSHConfig(ctx context.Context, opt *Options) (*ssh.ClientConfig, error) {
-	log(ctx).Debugf("using internal SSH client")
+	log(ctx).Debug("using internal SSH client")
 
 	hostKeyCallback, err := getHostKeyCallback(opt)
 	if err != nil {

+ 1 - 1
repo/blob/storage.go

@@ -396,7 +396,7 @@ func PutBlobAndGetMetadata(ctx context.Context, st Storage, blobID ID, data Byte
 func ReadBlobMap(ctx context.Context, br Reader) (map[ID]Metadata, error) {
 	blobMap := map[ID]Metadata{}
 
-	log(ctx).Infof("Listing blobs...")
+	log(ctx).Info("Listing blobs...")
 
 	if err := br.ListBlobs(ctx, "", func(bm Metadata) error {
 		blobMap[bm.BlobID] = bm

+ 1 - 1
repo/connect.go

@@ -99,7 +99,7 @@ func Disconnect(ctx context.Context, configFile string) error {
 
 	maintenanceLock := configFile + ".mlock"
 	if err := os.RemoveAll(maintenanceLock); err != nil {
-		log(ctx).Errorf("unable to remove maintenance lock file", maintenanceLock)
+		log(ctx).Error("unable to remove maintenance lock file", maintenanceLock)
 	}
 
 	//nolint:wrapcheck

+ 1 - 1
repo/content/committed_content_index.go

@@ -343,7 +343,7 @@ func (c *committedContentIndex) fetchIndexBlobs(ctx context.Context, isPermissiv
 		return errors.Wrap(err, "error downloading indexes")
 	}
 
-	c.log.Debugf("Index blobs downloaded.")
+	c.log.Debug("Index blobs downloaded.")
 
 	return nil
 }

+ 6 - 6
repo/content/content_manager.go

@@ -228,7 +228,7 @@ func (bm *WriteManager) maybeRetryWritingFailedPacksUnlocked(ctx context.Context
 
 	// do not start new uploads while flushing
 	for bm.flushing {
-		bm.log.Debugf("wait-before-retry")
+		bm.log.Debug("wait-before-retry")
 		bm.cond.Wait()
 	}
 
@@ -280,7 +280,7 @@ func (bm *WriteManager) addToPackUnlocked(ctx context.Context, contentID ID, dat
 
 	// do not start new uploads while flushing
 	for bm.flushing {
-		bm.log.Debugf("wait-before-flush")
+		bm.log.Debug("wait-before-flush")
 		bm.cond.Wait()
 	}
 
@@ -351,7 +351,7 @@ func (bm *WriteManager) DisableIndexFlush(ctx context.Context) {
 	bm.lock()
 	defer bm.unlock(ctx)
 
-	bm.log.Debugf("DisableIndexFlush()")
+	bm.log.Debug("DisableIndexFlush()")
 
 	bm.disableIndexFlushCount++
 }
@@ -362,7 +362,7 @@ func (bm *WriteManager) EnableIndexFlush(ctx context.Context) {
 	bm.lock()
 	defer bm.unlock(ctx)
 
-	bm.log.Debugf("EnableIndexFlush()")
+	bm.log.Debug("EnableIndexFlush()")
 
 	bm.disableIndexFlushCount--
 }
@@ -444,7 +444,7 @@ func (bm *WriteManager) flushPackIndexesLocked(ctx context.Context, mp format.Mu
 	defer span.End()
 
 	if bm.disableIndexFlushCount > 0 {
-		bm.log.Debugf("not flushing index because flushes are currently disabled")
+		bm.log.Debug("not flushing index because flushes are currently disabled")
 		return nil
 	}
 
@@ -607,7 +607,7 @@ func (bm *WriteManager) Flush(ctx context.Context) error {
 	bm.lock()
 	defer bm.unlock(ctx)
 
-	bm.log.Debugf("flush")
+	bm.log.Debug("flush")
 
 	// when finished flushing, notify goroutines that were waiting for it.
 	defer bm.cond.Broadcast()

+ 1 - 1
repo/content/content_manager_indexes.go

@@ -18,7 +18,7 @@ func (sm *SharedManager) Refresh(ctx context.Context) error {
 	sm.indexesLock.Lock()
 	defer sm.indexesLock.Unlock()
 
-	sm.log.Debugf("Refresh started")
+	sm.log.Debug("Refresh started")
 
 	ibm, err := sm.indexBlobManager(ctx)
 	if err != nil {

+ 1 - 1
repo/content/content_manager_iterate.go

@@ -236,7 +236,7 @@ func (bm *WriteManager) IterateUnreferencedBlobs(ctx context.Context, blobPrefix
 
 	defer usedPacks.Close(ctx)
 
-	bm.log.Debugf("determining blobs in use")
+	bm.log.Debug("determining blobs in use")
 	// find packs in use
 	if err := bm.IteratePacks(
 		ctx,

+ 1 - 1
repo/content/indexblob/index_blob_manager_v0.go

@@ -465,7 +465,7 @@ func (m *ManagerV0) getBlobsToCompact(indexBlobs []Metadata, opt CompactOptions,
 
 	if len(nonCompactedBlobs) < opt.MaxSmallBlobs {
 		// current count is below min allowed - nothing to do
-		m.log.Debugf("no small contents to Compact")
+		m.log.Debug("no small contents to Compact")
 		return nil
 	}
 

+ 2 - 2
repo/grpc_repository_client.go

@@ -138,7 +138,7 @@ func (r *grpcInnerSession) readLoop(ctx context.Context) {
 		r.sendStreamBrokenAndClose(r.getAndDeleteResponseChannelLocked(id), err)
 	}
 
-	log(ctx).Debugf("finished closing active requests")
+	log(ctx).Debug("finished closing active requests")
 }
 
 // sendRequest sends the provided request to the server and returns a channel on which the
@@ -437,7 +437,7 @@ func (r *grpcInnerSession) PrefetchContents(ctx context.Context, contentIDs []co
 		}
 	}
 
-	log(ctx).Warnf("missing response to PrefetchContents")
+	log(ctx).Warn("missing response to PrefetchContents")
 
 	return nil
 }

+ 20 - 20
repo/logging/logging_test.go

@@ -24,11 +24,11 @@ func TestBroadcast(t *testing.T) {
 	}, "[second] ")
 
 	l := logging.Broadcast(l0, l1)
-	l.Debugf("A")
+	l.Debug("A")
 	l.Debugw("S", "b", 123)
-	l.Infof("B")
-	l.Errorf("C")
-	l.Warnf("W")
+	l.Info("B")
+	l.Error("C")
+	l.Warn("W")
 
 	require.Equal(t, []string{
 		"[first] A",
@@ -48,11 +48,11 @@ func TestWriter(t *testing.T) {
 	var buf bytes.Buffer
 
 	l := logging.ToWriter(&buf)("module1")
-	l.Debugf("A")
+	l.Debug("A")
 	l.Debugw("S", "b", 123)
-	l.Infof("B")
-	l.Errorf("C")
-	l.Warnf("W")
+	l.Info("B")
+	l.Error("C")
+	l.Warn("W")
 
 	require.Equal(t, "A\nS\t{\"b\":123}\nB\nC\nW\n", buf.String())
 }
@@ -60,11 +60,11 @@ func TestWriter(t *testing.T) {
 func TestNullWriterModule(t *testing.T) {
 	l := logging.Module("mod1")(context.Background())
 
-	l.Debugf("A")
+	l.Debug("A")
 	l.Debugw("S", "b", 123)
-	l.Infof("B")
-	l.Errorf("C")
-	l.Warnf("W")
+	l.Info("B")
+	l.Error("C")
+	l.Warn("W")
 }
 
 func TestNonNullWriterModule(t *testing.T) {
@@ -73,11 +73,11 @@ func TestNonNullWriterModule(t *testing.T) {
 	ctx := logging.WithLogger(context.Background(), logging.ToWriter(&buf))
 	l := logging.Module("mod1")(ctx)
 
-	l.Debugf("A")
+	l.Debug("A")
 	l.Debugw("S", "b", 123)
-	l.Infof("B")
-	l.Errorf("C")
-	l.Warnf("W")
+	l.Info("B")
+	l.Error("C")
+	l.Warn("W")
 
 	require.Equal(t, "A\nS\t{\"b\":123}\nB\nC\nW\n", buf.String())
 }
@@ -89,11 +89,11 @@ func TestWithAdditionalLogger(t *testing.T) {
 	ctx = logging.WithAdditionalLogger(ctx, logging.ToWriter(&buf2))
 	l := logging.Module("mod1")(ctx)
 
-	l.Debugf("A")
+	l.Debug("A")
 	l.Debugw("S", "b", 123)
-	l.Infof("B")
-	l.Errorf("C")
-	l.Warnf("W")
+	l.Info("B")
+	l.Error("C")
+	l.Warn("W")
 
 	require.Equal(t, "A\nS\t{\"b\":123}\nB\nC\nW\n", buf.String())
 	require.Equal(t, "A\nS\t{\"b\":123}\nB\nC\nW\n", buf2.String())

+ 1 - 1
repo/maintenance/blob_gc.go

@@ -59,7 +59,7 @@ func DeleteUnreferencedBlobs(ctx context.Context, rep repo.DirectRepositoryWrite
 	}
 
 	// iterate unreferenced blobs and count them + optionally send to the channel to be deleted
-	log(ctx).Infof("Looking for unreferenced blobs...")
+	log(ctx).Info("Looking for unreferenced blobs...")
 
 	var prefixes []blob.ID
 	if p := opt.Prefix; p != "" {

+ 1 - 1
repo/maintenance/blob_retain.go

@@ -88,7 +88,7 @@ func ExtendBlobRetentionTime(ctx context.Context, rep repo.DirectRepositoryWrite
 	}
 
 	// iterate all relevant (active, extendable) blobs and count them + optionally send to the channel to be extended
-	log(ctx).Infof("Extending retention time for blobs...")
+	log(ctx).Info("Extending retention time for blobs...")
 
 	err = blob.IterateAllPrefixesInParallel(ctx, opt.Parallel, rep.BlobStorage(), prefixes, func(bm blob.Metadata) error {
 		if !opt.DryRun {

+ 2 - 2
repo/maintenance/content_rewrite.go

@@ -43,9 +43,9 @@ func RewriteContents(ctx context.Context, rep repo.DirectRepositoryWriter, opt *
 	}
 
 	if opt.ShortPacks {
-		log(ctx).Infof("Rewriting contents from short packs...")
+		log(ctx).Info("Rewriting contents from short packs...")
 	} else {
-		log(ctx).Infof("Rewriting contents...")
+		log(ctx).Info("Rewriting contents...")
 	}
 
 	cnt := getContentToRewrite(ctx, rep, opt)

+ 1 - 1
repo/maintenance/index_compaction.go

@@ -9,7 +9,7 @@ import (
 // runTaskIndexCompactionQuick rewrites index blobs to reduce their count but does not drop any contents.
 func runTaskIndexCompactionQuick(ctx context.Context, runParams RunParameters, s *Schedule, safety SafetyParameters) error {
 	return ReportRun(ctx, runParams.rep, TaskIndexCompaction, s, func() error {
-		log(ctx).Infof("Compacting indexes...")
+		log(ctx).Info("Compacting indexes...")
 
 		const maxSmallBlobsForIndexCompaction = 8
 

+ 17 - 17
repo/maintenance/maintenance_run.go

@@ -68,25 +68,25 @@ func shouldRun(ctx context.Context, rep repo.DirectRepository, p *Params) (Mode,
 	// check full cycle first, as it does more than the quick cycle
 	if p.FullCycle.Enabled {
 		if !rep.Time().Before(s.NextFullMaintenanceTime) {
-			log(ctx).Debugf("due for full maintenance cycle")
+			log(ctx).Debug("due for full maintenance cycle")
 			return ModeFull, nil
 		}
 
 		log(ctx).Debugf("not due for full maintenance cycle until %v", s.NextFullMaintenanceTime)
 	} else {
-		log(ctx).Debugf("full maintenance cycle not enabled")
+		log(ctx).Debug("full maintenance cycle not enabled")
 	}
 
 	// no time for full cycle, check quick cycle
 	if p.QuickCycle.Enabled {
 		if !rep.Time().Before(s.NextQuickMaintenanceTime) {
-			log(ctx).Debugf("due for quick maintenance cycle")
+			log(ctx).Debug("due for quick maintenance cycle")
 			return ModeQuick, nil
 		}
 
 		log(ctx).Debugf("not due for quick maintenance cycle until %v", s.NextQuickMaintenanceTime)
 	} else {
-		log(ctx).Debugf("quick maintenance cycle not enabled")
+		log(ctx).Debug("quick maintenance cycle not enabled")
 	}
 
 	return ModeNone, nil
@@ -170,7 +170,7 @@ func RunExclusive(ctx context.Context, rep repo.DirectRepositoryWriter, mode Mod
 	}
 
 	if mode == ModeNone {
-		log(ctx).Debugf("not due for maintenance")
+		log(ctx).Debug("not due for maintenance")
 		return nil
 	}
 
@@ -186,7 +186,7 @@ func RunExclusive(ctx context.Context, rep repo.DirectRepositoryWriter, mode Mod
 	}
 
 	if !ok {
-		log(ctx).Debugf("maintenance is already in progress locally")
+		log(ctx).Debug("maintenance is already in progress locally")
 		return nil
 	}
 
@@ -254,7 +254,7 @@ func Run(ctx context.Context, runParams RunParameters, safety SafetyParameters)
 func runQuickMaintenance(ctx context.Context, runParams RunParameters, safety SafetyParameters) error {
 	_, ok, emerr := runParams.rep.ContentManager().EpochManager(ctx)
 	if ok {
-		log(ctx).Debugf("quick maintenance not required for epoch manager")
+		log(ctx).Debug("quick maintenance not required for epoch manager")
 		return nil
 	}
 
@@ -285,10 +285,10 @@ func runQuickMaintenance(ctx context.Context, runParams RunParameters, safety Sa
 		// running full orphaned blob deletion, otherwise next quick maintenance will start a quick rewrite
 		// and we'd never delete blobs orphaned by full rewrite.
 		if hadRecentFullRewrite(s) {
-			log(ctx).Debugf("Had recent full rewrite - performing full blob deletion.")
+			log(ctx).Debug("Had recent full rewrite - performing full blob deletion.")
 			err = runTaskDeleteOrphanedBlobsFull(ctx, runParams, s, safety)
 		} else {
-			log(ctx).Debugf("Performing quick blob deletion.")
+			log(ctx).Debug("Performing quick blob deletion.")
 			err = runTaskDeleteOrphanedBlobsQuick(ctx, runParams, s, safety)
 		}
 
@@ -317,7 +317,7 @@ func runQuickMaintenance(ctx context.Context, runParams RunParameters, safety Sa
 }
 
 func notRewritingContents(ctx context.Context) {
-	log(ctx).Infof("Previous content rewrite has not been finalized yet, waiting until the next blob deletion.")
+	log(ctx).Info("Previous content rewrite has not been finalized yet, waiting until the next blob deletion.")
 }
 
 func notDeletingOrphanedBlobs(ctx context.Context, s *Schedule, safety SafetyParameters) {
@@ -338,7 +338,7 @@ func runTaskCleanupLogs(ctx context.Context, runParams RunParameters, s *Schedul
 
 func runTaskEpochAdvance(ctx context.Context, em *epoch.Manager, runParams RunParameters, s *Schedule) error {
 	return ReportRun(ctx, runParams.rep, TaskEpochAdvance, s, func() error {
-		log(ctx).Infof("Cleaning up no-longer-needed epoch markers...")
+		log(ctx).Info("Cleaning up no-longer-needed epoch markers...")
 		return errors.Wrap(em.MaybeAdvanceWriteEpoch(ctx), "error advancing epoch marker")
 	})
 }
@@ -354,7 +354,7 @@ func runTaskEpochMaintenanceQuick(ctx context.Context, runParams RunParameters,
 	}
 
 	err := ReportRun(ctx, runParams.rep, TaskEpochCompactSingle, s, func() error {
-		log(ctx).Infof("Compacting an eligible uncompacted epoch...")
+		log(ctx).Info("Compacting an eligible uncompacted epoch...")
 		return errors.Wrap(em.MaybeCompactSingleEpoch(ctx), "error compacting single epoch")
 	})
 	if err != nil {
@@ -376,7 +376,7 @@ func runTaskEpochMaintenanceFull(ctx context.Context, runParams RunParameters, s
 
 	// compact a single epoch
 	if err := ReportRun(ctx, runParams.rep, TaskEpochCompactSingle, s, func() error {
-		log(ctx).Infof("Compacting an eligible uncompacted epoch...")
+		log(ctx).Info("Compacting an eligible uncompacted epoch...")
 		return errors.Wrap(em.MaybeCompactSingleEpoch(ctx), "error compacting single epoch")
 	}); err != nil {
 		return err
@@ -388,7 +388,7 @@ func runTaskEpochMaintenanceFull(ctx context.Context, runParams RunParameters, s
 
 	// compact range
 	if err := ReportRun(ctx, runParams.rep, TaskEpochGenerateRange, s, func() error {
-		log(ctx).Infof("Attempting to compact a range of epoch indexes ...")
+		log(ctx).Info("Attempting to compact a range of epoch indexes ...")
 
 		return errors.Wrap(em.MaybeGenerateRangeCheckpoint(ctx), "error creating epoch range indexes")
 	}); err != nil {
@@ -397,7 +397,7 @@ func runTaskEpochMaintenanceFull(ctx context.Context, runParams RunParameters, s
 
 	// clean up epoch markers
 	err := ReportRun(ctx, runParams.rep, TaskEpochCleanupMarkers, s, func() error {
-		log(ctx).Infof("Cleaning up unneeded epoch markers...")
+		log(ctx).Info("Cleaning up unneeded epoch markers...")
 
 		return errors.Wrap(em.CleanupMarkers(ctx), "error removing epoch markers")
 	})
@@ -406,7 +406,7 @@ func runTaskEpochMaintenanceFull(ctx context.Context, runParams RunParameters, s
 	}
 
 	return ReportRun(ctx, runParams.rep, TaskEpochDeleteSupersededIndexes, s, func() error {
-		log(ctx).Infof("Cleaning up old index blobs which have already been compacted...")
+		log(ctx).Info("Cleaning up old index blobs which have already been compacted...")
 		return errors.Wrap(em.CleanupSupersededIndexes(ctx), "error removing superseded epoch index blobs")
 	})
 }
@@ -421,7 +421,7 @@ func runTaskDropDeletedContentsFull(ctx context.Context, runParams RunParameters
 	}
 
 	if safeDropTime.IsZero() {
-		log(ctx).Infof("Not enough time has passed since previous successful Snapshot GC. Will try again next time.")
+		log(ctx).Info("Not enough time has passed since previous successful Snapshot GC. Will try again next time.")
 		return nil
 	}
 

+ 1 - 1
snapshot/restore/local_fs_output.go

@@ -45,7 +45,7 @@ func getStreamCopier(ctx context.Context, targetpath string, sparse bool) (strea
 			}, nil
 		}
 
-		log(ctx).Debugf("sparse copying is not supported on Windows, falling back to regular copying")
+		log(ctx).Debug("sparse copying is not supported on Windows, falling back to regular copying")
 	}
 
 	// Wrap iocopy.Copy to conform to StreamCopier type.

+ 1 - 1
snapshot/restore/restore.go

@@ -260,7 +260,7 @@ func (c *copier) copyDirectory(ctx context.Context, d fs.Directory, targetPath s
 	if SafelySuffixablePath(targetPath) && currentdepth > maxdepth {
 		de, ok := d.(snapshot.HasDirEntry)
 		if !ok {
-			return errors.Errorf("fs.Directory object is not HasDirEntry?")
+			return errors.Errorf("fs.Directory '%s' object is not HasDirEntry?", d.Name())
 		}
 
 		if err := c.shallowoutput.WriteDirEntry(ctx, targetPath, de.DirEntry(), d); err != nil {

+ 1 - 1
snapshot/restore/shallow_fs_output.go

@@ -54,7 +54,7 @@ func (o *ShallowFilesystemOutput) WriteFile(ctx context.Context, relativePath st
 
 	mde, ok := f.(snapshot.HasDirEntry)
 	if !ok {
-		return errors.Errorf("fs object is not HasDirEntry?")
+		return errors.Errorf("fs object '%s' is not HasDirEntry?", f.Name())
 	}
 
 	de := mde.DirEntry()

+ 1 - 1
snapshot/restore/zip_output.go

@@ -93,7 +93,7 @@ func (o *ZipOutput) FileExists(ctx context.Context, relativePath string, l fs.Fi
 //
 //nolint:revive
 func (o *ZipOutput) CreateSymlink(ctx context.Context, relativePath string, e fs.Symlink) error {
-	log(ctx).Debugf("create symlink not implemented yet")
+	log(ctx).Debug("create symlink not implemented yet")
 	return nil
 }
 

+ 1 - 1
snapshot/snapshotfs/dir_rewriter.go

@@ -132,7 +132,7 @@ func (rw *DirRewriter) getCachedReplacement(ctx context.Context, parentPath stri
 }
 
 func (rw *DirRewriter) processDirectory(ctx context.Context, pathFromRoot string, entry *snapshot.DirEntry) (*snapshot.DirEntry, error) {
-	dirRewriterLog(ctx).Debugf("processDirectory", "path", pathFromRoot)
+	dirRewriterLog(ctx).Debugw("processDirectory", "path", pathFromRoot)
 
 	r, err := rw.rep.OpenObject(ctx, entry.ObjectID)
 	if err != nil {

+ 2 - 2
snapshot/snapshotgc/gc.go

@@ -56,7 +56,7 @@ func findInUseContentIDs(ctx context.Context, rep repo.Repository, used *bigmap.
 
 	defer w.Close(ctx)
 
-	log(ctx).Infof("Looking for active contents...")
+	log(ctx).Info("Looking for active contents...")
 
 	for _, m := range manifests {
 		root, err := snapshotfs.SnapshotRoot(rep, m)
@@ -111,7 +111,7 @@ func runInternal(ctx context.Context, rep repo.DirectRepositoryWriter, gcDelete
 		return errors.Wrap(err, "unable to find in-use content ID")
 	}
 
-	log(ctx).Infof("Looking for unreferenced contents...")
+	log(ctx).Info("Looking for unreferenced contents...")
 
 	// Ensure that the iteration includes deleted contents, so those can be
 	// undeleted (recovered).

+ 5 - 5
tests/repository_stress_test/repository_stress_test.go

@@ -405,7 +405,7 @@ func readFlushedContent(ctx context.Context, r repo.DirectRepositoryWriter, rs *
 }
 
 func listContents(ctx context.Context, r repo.DirectRepositoryWriter, _ *repomodel.RepositorySession, log logging.Logger) error {
-	log.Debugf("listContents()")
+	log.Debug("listContents()")
 
 	return errors.Wrapf(r.ContentReader().IterateContents(
 		ctx,
@@ -415,7 +415,7 @@ func listContents(ctx context.Context, r repo.DirectRepositoryWriter, _ *repomod
 }
 
 func listAndReadAllContents(ctx context.Context, r repo.DirectRepositoryWriter, _ *repomodel.RepositorySession, log logging.Logger) error {
-	log.Debugf("listAndReadAllContents()")
+	log.Debug("listAndReadAllContents()")
 
 	return errors.Wrapf(r.ContentReader().IterateContents(
 		ctx,
@@ -436,7 +436,7 @@ func compact(ctx context.Context, r repo.DirectRepositoryWriter, rs *repomodel.R
 		return errSkipped
 	}
 
-	log.Debugf("compact()")
+	log.Debug("compact()")
 
 	return errors.Wrapf(
 		r.ContentManager().CompactIndexes(ctx, indexblob.CompactOptions{MaxSmallBlobs: 1}),
@@ -444,7 +444,7 @@ func compact(ctx context.Context, r repo.DirectRepositoryWriter, rs *repomodel.R
 }
 
 func flush(ctx context.Context, r repo.DirectRepositoryWriter, rs *repomodel.RepositorySession, log logging.Logger) error {
-	log.Debugf("flush()")
+	log.Debug("flush()")
 
 	// capture contents and manifests we had before we start flushing.
 	// this is necessary since operations can proceed in parallel to Flush() which might add more data
@@ -465,7 +465,7 @@ func flush(ctx context.Context, r repo.DirectRepositoryWriter, rs *repomodel.Rep
 }
 
 func refresh(ctx context.Context, r repo.DirectRepositoryWriter, rs *repomodel.RepositorySession, log logging.Logger) error {
-	log.Debugf("refresh()")
+	log.Debug("refresh()")
 
 	// refresh model before refreshing repository to guarantee that repository has at least all the items in
 	// the model (possibly more).