Browse Source

refactor(general): use `errors.New` where appropriate (#4160)

Replaces 'errors.Errorf\("([^"]+)"\)' => 'errors.New("\1")'
Julio López 10 tháng trước cách đây
mục cha
commit
961a39039b
100 tập tin đã thay đổi với 181 bổ sung181 xóa
  1. 3 3
      cli/app.go
  2. 1 1
      cli/command_acl_enable.go
  3. 1 1
      cli/command_benchmark_compression.go
  4. 2 2
      cli/command_blob_shards_modify.go
  5. 1 1
      cli/command_cache_set.go
  6. 1 1
      cli/command_index_epoch_list.go
  7. 1 1
      cli/command_index_inspect.go
  8. 1 1
      cli/command_logs_show.go
  9. 1 1
      cli/command_ls.go
  10. 1 1
      cli/command_maintenance_set.go
  11. 1 1
      cli/command_repository_connect_from_config.go
  12. 1 1
      cli/command_repository_create.go
  13. 1 1
      cli/command_repository_set_client.go
  14. 1 1
      cli/command_repository_set_parameters.go
  15. 4 4
      cli/command_repository_sync.go
  16. 1 1
      cli/command_repository_upgrade.go
  17. 2 2
      cli/command_restore.go
  18. 1 1
      cli/command_server.go
  19. 1 1
      cli/command_server_source_manager_action.go
  20. 1 1
      cli/command_server_start.go
  21. 1 1
      cli/command_server_tls.go
  22. 2 2
      cli/command_snapshot_copy_move_history.go
  23. 1 1
      cli/command_snapshot_fix_remove_files.go
  24. 1 1
      cli/command_snapshot_pin.go
  25. 1 1
      cli/command_user_add_set.go
  26. 1 1
      cli/config.go
  27. 2 2
      cli/observability_flags.go
  28. 1 1
      cli/storage_filesystem.go
  29. 1 1
      cli/storage_s3.go
  30. 2 2
      cli/storage_sftp.go
  31. 1 1
      cli/update_check.go
  32. 1 1
      fs/entry.go
  33. 2 2
      fs/ignorefs/ignorefs.go
  34. 4 4
      internal/acl/acl.go
  35. 1 1
      internal/apiclient/apiclient.go
  36. 1 1
      internal/bigmap/bigmap_internal.go
  37. 2 2
      internal/blobcrypto/blob_crypto_test.go
  38. 1 1
      internal/cache/cache_storage_test.go
  39. 3 3
      internal/cache/content_cache_test.go
  40. 9 9
      internal/cache/persistent_lru_cache_test.go
  41. 1 1
      internal/dirutil/mssubdirall_test.go
  42. 4 4
      internal/epoch/epoch_manager.go
  43. 4 4
      internal/epoch/epoch_manager_test.go
  44. 3 3
      internal/gather/gather_bytes.go
  45. 1 1
      internal/gather/gather_bytes_test.go
  46. 1 1
      internal/impossible/impossible_test.go
  47. 1 1
      internal/mount/mount_net_use.go
  48. 1 1
      internal/mount/mount_unsupported.go
  49. 2 2
      internal/passwordpersist/passwordpersist.go
  50. 1 1
      internal/repodiag/blob_writer_test.go
  51. 1 1
      internal/repotesting/reconnectable_storage.go
  52. 1 1
      internal/retry/retry_test.go
  53. 1 1
      internal/server/api_error.go
  54. 1 1
      internal/server/api_estimate.go
  55. 1 1
      internal/server/api_restore.go
  56. 1 1
      internal/server/api_snapshots.go
  57. 3 3
      internal/server/grpc_session.go
  58. 3 3
      internal/server/server.go
  59. 1 1
      internal/server/server_maintenance_test.go
  60. 3 3
      internal/server/server_test.go
  61. 1 1
      internal/uitask/uitask_test.go
  62. 3 3
      internal/user/user_manager.go
  63. 1 1
      repo/blob/azure/azure_storage.go
  64. 1 1
      repo/blob/azure/azure_versioned.go
  65. 2 2
      repo/blob/filesystem/filesystem_storage.go
  66. 1 1
      repo/blob/filesystem/osinterface_mock_other_test.go
  67. 11 11
      repo/blob/filesystem/osinterface_mock_test.go
  68. 1 1
      repo/blob/filesystem/osinterface_mock_unix_test.go
  69. 1 1
      repo/blob/rclone/rclone_storage.go
  70. 1 1
      repo/blob/readonly/readonly_storage.go
  71. 1 1
      repo/blob/s3/s3_storage.go
  72. 2 2
      repo/blob/sftp/sftp_storage.go
  73. 2 2
      repo/blob/storage.go
  74. 1 1
      repo/blob/storage_test.go
  75. 8 8
      repo/blob/storagemetrics/storage_metrics_test.go
  76. 1 1
      repo/blob/throttling/throttling_semaphore.go
  77. 1 1
      repo/blob/throttling/token_bucket.go
  78. 2 2
      repo/connect.go
  79. 1 1
      repo/content/committed_content_index_disk_cache.go
  80. 1 1
      repo/content/content_manager_lock_free.go
  81. 1 1
      repo/content/content_manager_test.go
  82. 5 5
      repo/content/index/id.go
  83. 1 1
      repo/content/index/index_builder.go
  84. 3 3
      repo/content/index/index_v1.go
  85. 5 5
      repo/content/index/index_v2.go
  86. 2 2
      repo/content/index/merged_test.go
  87. 2 2
      repo/content/indexblob/index_blob_encryption_test.go
  88. 3 3
      repo/format/blobcfg_blob.go
  89. 1 1
      repo/format/content_format.go
  90. 1 1
      repo/format/format_blob.go
  91. 1 1
      repo/format/format_change_password.go
  92. 4 4
      repo/format/format_manager.go
  93. 1 1
      repo/format/format_manager_test.go
  94. 1 1
      repo/format/format_set_parameters.go
  95. 1 1
      repo/format/repository_config.go
  96. 2 2
      repo/grpc_repository_client.go
  97. 1 1
      repo/local_config.go
  98. 1 1
      repo/maintenance/content_rewrite.go
  99. 1 1
      repo/maintenance/maintenance_schedule.go
  100. 1 1
      repo/manifest/manifest_manager.go

+ 3 - 3
cli/app.go

@@ -380,7 +380,7 @@ func safetyFlagVar(cmd *kingpin.CmdClause, result *maintenance.SafetyParameters)
 	cmd.Flag("safety", "Safety level").Default("full").PreAction(func(_ *kingpin.ParseContext) error {
 		r, ok := safetyByName[str]
 		if !ok {
-			return errors.Errorf("unhandled safety level")
+			return errors.New("unhandled safety level")
 		}
 
 		*result = r
@@ -435,7 +435,7 @@ func assertDirectRepository(act func(ctx context.Context, rep repo.DirectReposit
 		// but will fail in the future when we have remote repository implementation
 		lr, ok := rep.(repo.DirectRepository)
 		if !ok {
-			return errors.Errorf("operation supported only on direct repository")
+			return errors.New("operation supported only on direct repository")
 		}
 
 		return act(ctx, lr)
@@ -611,7 +611,7 @@ To run this command despite the warning, set --advanced-commands=enabled
 
 `)
 
-		c.exitWithError(errors.Errorf("advanced commands are disabled"))
+		c.exitWithError(errors.New("advanced commands are disabled"))
 	}
 }
 

+ 1 - 1
cli/command_acl_enable.go

@@ -27,7 +27,7 @@ func (c *commandACLEnable) run(ctx context.Context, rep repo.RepositoryWriter) e
 	}
 
 	if len(entries) != 0 && !c.reset {
-		return errors.Errorf("ACLs already enabled")
+		return errors.New("ACLs already enabled")
 	}
 
 	if c.reset {

+ 1 - 1
cli/command_benchmark_compression.go

@@ -115,7 +115,7 @@ func (c *commandBenchmarkCompression) run(ctx context.Context) error {
 	}
 
 	if len(data) == 0 {
-		return errors.Errorf("empty data file")
+		return errors.New("empty data file")
 	}
 
 	repeatCount := c.repeat

+ 2 - 2
cli/command_blob_shards_modify.go

@@ -73,7 +73,7 @@ func parseShardSpec(shards string) ([]int, error) {
 
 		v, err := strconv.Atoi(p)
 		if err != nil || v < 0 {
-			return nil, errors.Errorf("invalid shard specification")
+			return nil, errors.New("invalid shard specification")
 		}
 
 		result = append(result, v)
@@ -98,7 +98,7 @@ func (c *commandBlobShardsModify) applyParameterChangesFromFlags(p *sharded.Para
 	if c.defaultShardSpec != "" {
 		v, err := parseShardSpec(c.defaultShardSpec)
 		if err != nil {
-			return errors.Errorf("invalid --default-shards")
+			return errors.New("invalid --default-shards")
 		}
 
 		p.DefaultShards = v

+ 1 - 1
cli/command_cache_set.go

@@ -132,7 +132,7 @@ func (c *commandCacheSetParams) run(ctx context.Context, _ repo.RepositoryWriter
 	}
 
 	if changed == 0 {
-		return errors.Errorf("no changes")
+		return errors.New("no changes")
 	}
 
 	//nolint:wrapcheck

+ 1 - 1
cli/command_index_epoch_list.go

@@ -29,7 +29,7 @@ func (c *commandIndexEpochList) run(ctx context.Context, rep repo.DirectReposito
 	}
 
 	if !ok {
-		return errors.Errorf("epoch manager is not active")
+		return errors.New("epoch manager is not active")
 	}
 
 	snap, err := emgr.Current(ctx)

+ 1 - 1
cli/command_index_inspect.go

@@ -70,7 +70,7 @@ func (c *commandIndexInspect) runWithOutput(ctx context.Context, rep repo.Direct
 			}
 		}
 	default:
-		return errors.Errorf("must pass either --all, --active or provide a list of blob IDs to inspect")
+		return errors.New("must pass either --all, --active or provide a list of blob IDs to inspect")
 	}
 
 	return nil

+ 1 - 1
cli/command_logs_show.go

@@ -49,7 +49,7 @@ func (c *commandLogsShow) run(ctx context.Context, rep repo.DirectRepository) er
 	}
 
 	if len(sessions) == 0 {
-		return errors.Errorf("no logs found")
+		return errors.New("no logs found")
 	}
 
 	// by default show latest one

+ 1 - 1
cli/command_ls.go

@@ -89,7 +89,7 @@ func (c *commandList) listDirectory(ctx context.Context, d fs.Directory, prefix,
 func (c *commandList) printDirectoryEntry(ctx context.Context, e fs.Entry, prefix, indent string) error {
 	hoid, ok := e.(object.HasObjectID)
 	if !ok {
-		return errors.Errorf("entry without object ID")
+		return errors.New("entry without object ID")
 	}
 
 	objectID := hoid.ObjectID()

+ 1 - 1
cli/command_maintenance_set.go

@@ -190,7 +190,7 @@ func (c *commandMaintenanceSet) run(ctx context.Context, rep repo.DirectReposito
 	}
 
 	if !changedParams && !changedSchedule {
-		return errors.Errorf("no changes specified")
+		return errors.New("no changes specified")
 	}
 
 	blobCfg, err := rep.FormatManager().BlobCfgBlob(ctx)

+ 1 - 1
cli/command_repository_connect_from_config.go

@@ -63,7 +63,7 @@ func (c *storageFromConfigFlags) connectToStorageFromConfigFile(ctx context.Cont
 	}
 
 	if cfg.Storage == nil {
-		return nil, errors.Errorf("connection file does not specify blob storage connection parameters, kopia server connections are not supported")
+		return nil, errors.New("connection file does not specify blob storage connection parameters, kopia server connections are not supported")
 	}
 
 	//nolint:wrapcheck

+ 1 - 1
cli/command_repository_create.go

@@ -100,7 +100,7 @@ func (c *commandRepositoryCreate) newRepositoryOptionsFromFlags() *repo.NewRepos
 }
 
 func (c *commandRepositoryCreate) ensureEmpty(ctx context.Context, s blob.Storage) error {
-	hasDataError := errors.Errorf("has data")
+	hasDataError := errors.New("has data")
 
 	err := s.ListBlobs(ctx, "", func(_ blob.Metadata) error {
 		return hasDataError

+ 1 - 1
cli/command_repository_set_client.go

@@ -113,7 +113,7 @@ func (c *commandRepositorySetClient) run(ctx context.Context, rep repo.Repositor
 	}
 
 	if !anyChange {
-		return errors.Errorf("no changes")
+		return errors.New("no changes")
 	}
 
 	//nolint:wrapcheck

+ 1 - 1
cli/command_repository_set_parameters.go

@@ -192,7 +192,7 @@ func (c *commandRepositorySetParameters) run(ctx context.Context, rep repo.Direc
 		if c.indexFormatVersion > mp.IndexVersion {
 			setIntParameter(ctx, c.indexFormatVersion, "index format version", &mp.IndexVersion, &anyChange)
 		} else {
-			return errors.Errorf("index format version can only be upgraded")
+			return errors.New("index format version can only be upgraded")
 		}
 	}
 

+ 4 - 4
cli/command_repository_sync.go

@@ -69,7 +69,7 @@ func (c *commandRepositorySyncTo) setup(svc advancedAppServices, parent commandP
 
 				dr, ok := rep.(repo.DirectRepository)
 				if !ok {
-					return errors.Errorf("sync only supports directly-connected repositories")
+					return errors.New("sync only supports directly-connected repositories")
 				}
 
 				return c.runSyncWithStorage(ctx, dr.BlobReader(), st)
@@ -356,7 +356,7 @@ func (c *commandRepositorySyncTo) ensureRepositoriesHaveSameFormatBlob(ctx conte
 		// target does not have format blob, save it there first.
 		if errors.Is(err, blob.ErrBlobNotFound) {
 			if c.repositorySyncDestinationMustExist {
-				return errors.Errorf("destination repository does not have a format blob")
+				return errors.New("destination repository does not have a format blob")
 			}
 
 			return errors.Wrap(dst.PutBlob(ctx, format.KopiaRepositoryBlobID, srcData.Bytes(), blob.PutOptions{}), "error saving format blob")
@@ -379,7 +379,7 @@ func (c *commandRepositorySyncTo) ensureRepositoriesHaveSameFormatBlob(ctx conte
 		return nil
 	}
 
-	return errors.Errorf("destination repository contains incompatible data")
+	return errors.New("destination repository contains incompatible data")
 }
 
 func parseUniqueID(r gather.Bytes) (string, error) {
@@ -392,7 +392,7 @@ func parseUniqueID(r gather.Bytes) (string, error) {
 	}
 
 	if f.UniqueID == "" {
-		return "", errors.Errorf("unique ID not found")
+		return "", errors.New("unique ID not found")
 	}
 
 	return f.UniqueID, nil

+ 1 - 1
cli/command_repository_upgrade.go

@@ -426,7 +426,7 @@ func (c *commandRepositoryUpgrade) drainAllClients(ctx context.Context, rep repo
 
 		// TODO: this can get stuck
 		if !c.sleepWithContext(ctx, l.StatusPollInterval) {
-			return errors.Errorf("upgrade drain interrupted")
+			return errors.New("upgrade drain interrupted")
 		}
 	}
 

+ 2 - 2
cli/command_restore.go

@@ -242,7 +242,7 @@ func (c *commandRestore) constructTargetPairs(rep repo.Repository) error {
 	}
 
 	// Some undefined mixture of placeholders and other arguments.
-	return errors.Errorf("restore requires a source and targetpath or placeholders")
+	return errors.New("restore requires a source and targetpath or placeholders")
 }
 
 func (c *commandRestore) restoreOutput(ctx context.Context, rep repo.Repository) (restore.Output, error) {
@@ -473,7 +473,7 @@ func (c *commandRestore) tryToConvertPathToID(ctx context.Context, rep repo.Repo
 	}
 
 	if si.Path == "" {
-		return "", errors.Errorf("the source must contain a path element")
+		return "", errors.New("the source must contain a path element")
 	}
 
 	manifestIDs, err := findSnapshotsForSource(ctx, rep, si, map[string]string{})

+ 1 - 1
cli/command_server.go

@@ -76,7 +76,7 @@ func (c *commandServer) setup(svc advancedAppServices, parent commandParent) {
 
 func (c *serverClientFlags) serverAPIClientOptions() (apiclient.Options, error) {
 	if c.serverAddress == "" {
-		return apiclient.Options{}, errors.Errorf("missing server address")
+		return apiclient.Options{}, errors.New("missing server address")
 	}
 
 	return apiclient.Options{

+ 1 - 1
cli/command_server_source_manager_action.go

@@ -39,7 +39,7 @@ func (c *commandServerSourceManagerAction) triggerActionOnMatchingSources(ctx co
 
 	if !c.all {
 		if c.source == "" {
-			return errors.Errorf("must specify source or --all")
+			return errors.New("must specify source or --all")
 		}
 
 		absPath, err := filepath.Abs(c.source)

+ 1 - 1
cli/command_server_start.go

@@ -322,7 +322,7 @@ func (c *commandServerStart) getAuthenticator(ctx context.Context) (auth.Authent
 	switch {
 	case c.serverStartWithoutPassword:
 		if !c.serverStartInsecure {
-			return nil, errors.Errorf("--without-password specified without --insecure, refusing to start server")
+			return nil, errors.New("--without-password specified without --insecure, refusing to start server")
 		}
 
 		return nil, nil

+ 1 - 1
cli/command_server_tls.go

@@ -162,7 +162,7 @@ func (c *commandServerStart) startServerWithOptionalTLSAndListener(ctx context.C
 
 	default:
 		if !c.serverStartInsecure {
-			return errors.Errorf("TLS not configured. To start server without encryption pass --insecure")
+			return errors.New("TLS not configured. To start server without encryption pass --insecure")
 		}
 
 		fmt.Fprintf(c.out.stderr(), "SERVER ADDRESS: %shttp://%v\n", udsPfx, httpServer.Addr) //nolint:errcheck

+ 2 - 2
cli/command_snapshot_copy_move_history.go

@@ -178,13 +178,13 @@ func (c *commandSnapshotCopyMoveHistory) getCopySourceAndDestination(rep repo.Re
 	if di.Path != "" && si.Path == "" {
 		// it is illegal to specify source without path, but destination with a path
 		// as it would result in multiple individual paths being squished together.
-		return si, di, errors.Errorf("path specified on destination but not source")
+		return si, di, errors.New("path specified on destination but not source")
 	}
 
 	if di.UserName != "" && si.UserName == "" {
 		// it is illegal to specify source without username, but destination with a username
 		// as it would result in multiple individual paths being squished together.
-		return si, di, errors.Errorf("username specified on destination but not source")
+		return si, di, errors.New("username specified on destination but not source")
 	}
 
 	return si, di, nil

+ 1 - 1
cli/command_snapshot_fix_remove_files.go

@@ -54,7 +54,7 @@ func (c *commandSnapshotFixRemoveFiles) rewriteEntry(ctx context.Context, dirRel
 
 func (c *commandSnapshotFixRemoveFiles) run(ctx context.Context, rep repo.RepositoryWriter) error {
 	if len(c.removeObjectIDs)+len(c.removeFilesByName) == 0 {
-		return errors.Errorf("must specify files to remove")
+		return errors.New("must specify files to remove")
 	}
 
 	return c.common.rewriteMatchingSnapshots(ctx, rep, c.rewriteEntry)

+ 1 - 1
cli/command_snapshot_pin.go

@@ -27,7 +27,7 @@ func (c *commandSnapshotPin) setup(svc appServices, parent commandParent) {
 
 func (c *commandSnapshotPin) run(ctx context.Context, rep repo.RepositoryWriter) error {
 	if len(c.addPins)+len(c.removePins) == 0 {
-		return errors.Errorf("must specify --add and/or --remove")
+		return errors.New("must specify --add and/or --remove")
 	}
 
 	for _, id := range c.snapshotIDs {

+ 1 - 1
cli/command_user_add_set.go

@@ -93,7 +93,7 @@ func (c *commandServerUserAddSet) runServerUserAddSet(ctx context.Context, rep r
 	}
 
 	if !changed && !c.isNew {
-		return errors.Errorf("no change")
+		return errors.New("no change")
 	}
 
 	if err := user.SetUserProfile(ctx, rep, up); err != nil {

+ 1 - 1
cli/config.go

@@ -54,7 +54,7 @@ func (c *App) openRepository(ctx context.Context, required bool) (repo.Repositor
 			return nil, nil
 		}
 
-		return nil, errors.Errorf("repository is not connected. See https://kopia.io/docs/repositories/")
+		return nil, errors.New("repository is not connected. See https://kopia.io/docs/repositories/")
 	}
 
 	c.maybePrintUpdateNotification(ctx)

+ 2 - 2
cli/observability_flags.go

@@ -169,7 +169,7 @@ func (c *observabilityFlags) maybeStartMetricsPusher(ctx context.Context) error
 
 		parts := strings.SplitN(g, ":", nParts)
 		if len(parts) != nParts {
-			return errors.Errorf("grouping must be name:value")
+			return errors.New("grouping must be name:value")
 		}
 
 		name := parts[0]
@@ -196,7 +196,7 @@ func (c *observabilityFlags) maybeStartMetricsPusher(ctx context.Context) error
 
 func (c *observabilityFlags) maybeStartTraceExporter(ctx context.Context) error {
 	if c.enableJaeger {
-		return errors.Errorf("Flag '--enable-jaeger-collector' is no longer supported, use '--otlp' instead. See https://github.com/kopia/kopia/pull/3264 for more information")
+		return errors.New("Flag '--enable-jaeger-collector' is no longer supported, use '--otlp' instead. See https://github.com/kopia/kopia/pull/3264 for more information")
 	}
 
 	if !c.otlpTrace {

+ 1 - 1
cli/storage_filesystem.go

@@ -46,7 +46,7 @@ func (c *storageFilesystemFlags) Connect(ctx context.Context, isCreate bool, for
 	fso.Path = ospath.ResolveUserFriendlyPath(fso.Path, false)
 
 	if !ospath.IsAbs(fso.Path) {
-		return nil, errors.Errorf("filesystem repository path must be absolute")
+		return nil, errors.New("filesystem repository path must be absolute")
 	}
 
 	if v := c.connectOwnerUID; v != "" {

+ 1 - 1
cli/storage_s3.go

@@ -55,7 +55,7 @@ func (c *storageS3Flags) Setup(svc StorageProviderServices, cmd *kingpin.CmdClau
 
 func (c *storageS3Flags) preActionLoadPEMPath(_ *kingpin.ParseContext) error {
 	if len(c.s3options.RootCA) > 0 {
-		return errors.Errorf("root-ca-pem-base64 and root-ca-pem-path are mutually exclusive")
+		return errors.New("root-ca-pem-base64 and root-ca-pem-path are mutually exclusive")
 	}
 
 	data, err := os.ReadFile(c.rootCaPemPath) //#nosec

+ 2 - 2
cli/storage_sftp.go

@@ -86,7 +86,7 @@ func (c *storageSFTPFlags) getOptions(formatVersion int) (*sftp.Options, error)
 			sftpo.Keyfile = a
 
 		default:
-			return nil, errors.Errorf("must provide either --sftp-password, --keyfile or --key-data")
+			return nil, errors.New("must provide either --sftp-password, --keyfile or --key-data")
 		}
 
 		switch {
@@ -100,7 +100,7 @@ func (c *storageSFTPFlags) getOptions(formatVersion int) (*sftp.Options, error)
 
 			sftpo.KnownHostsFile = a
 		default:
-			return nil, errors.Errorf("must provide either --known-hosts or --known-hosts-data")
+			return nil, errors.New("must provide either --known-hosts or --known-hosts-data")
 		}
 	}
 

+ 1 - 1
cli/update_check.go

@@ -161,7 +161,7 @@ func (c *App) maybeCheckForUpdates(ctx context.Context) (string, error) {
 	if v := os.Getenv(c.EnvName(checkForUpdatesEnvar)); v != "" {
 		// see if environment variable is set to false.
 		if b, err := strconv.ParseBool(v); err == nil && !b {
-			return "", errors.Errorf("update check disabled")
+			return "", errors.New("update check disabled")
 		}
 	}
 

+ 1 - 1
fs/entry.go

@@ -13,7 +13,7 @@ import (
 const ModBits = os.ModePerm | os.ModeSetgid | os.ModeSetuid | os.ModeSticky
 
 // ErrUnknown is returned by ErrorEntry.ErrorInfo() to indicate that type of an entry is unknown.
-var ErrUnknown = errors.Errorf("unknown or unsupported entry type")
+var ErrUnknown = errors.New("unknown or unsupported entry type")
 
 // Entry represents a filesystem entry, which can be Directory, File, or Symlink.
 type Entry interface {

+ 2 - 2
fs/ignorefs/ignorefs.go

@@ -85,7 +85,7 @@ func isCorrectCacheDirSignature(ctx context.Context, f fs.File) error {
 	)
 
 	if f.Size() < int64(validSignatureLen) {
-		return errors.Errorf("cache dir marker file too short")
+		return errors.New("cache dir marker file too short")
 	}
 
 	r, err := f.Open(ctx)
@@ -102,7 +102,7 @@ func isCorrectCacheDirSignature(ctx context.Context, f fs.File) error {
 	}
 
 	if string(sig) != validSignature {
-		return errors.Errorf("invalid cache dir marker file signature")
+		return errors.New("invalid cache dir marker file signature")
 	}
 
 	return nil

+ 4 - 4
internal/acl/acl.go

@@ -73,7 +73,7 @@ type valueValidatorFunc func(v string) error
 
 func nonEmptyString(v string) error {
 	if v == "" {
-		return errors.Errorf("must be non-empty")
+		return errors.New("must be non-empty")
 	}
 
 	return nil
@@ -119,12 +119,12 @@ var allowedLabelsForType = map[string]map[string]valueValidatorFunc{
 // Validate validates entry.
 func (e *Entry) Validate() error {
 	if e == nil {
-		return errors.Errorf("nil acl")
+		return errors.New("nil acl")
 	}
 
 	parts := strings.Split(e.User, "@")
 	if len(parts) != 2 { //nolint:mnd
-		return errors.Errorf("user must be 'username@hostname' possibly including wildcards")
+		return errors.New("user must be 'username@hostname' possibly including wildcards")
 	}
 
 	typ := e.Target[manifest.TypeLabelKey]
@@ -153,7 +153,7 @@ func (e *Entry) Validate() error {
 	}
 
 	if accessLevelToString[e.Access] == "" {
-		return errors.Errorf("valid access level must be specified")
+		return errors.New("valid access level must be specified")
 	}
 
 	return nil

+ 1 - 1
internal/apiclient/apiclient.go

@@ -73,7 +73,7 @@ func (c *KopiaAPIClient) FetchCSRFTokenForTesting(ctx context.Context) error {
 
 	match := re.FindSubmatch(b)
 	if match == nil {
-		return errors.Errorf("CSRF token not found")
+		return errors.New("CSRF token not found")
 	}
 
 	c.CSRFToken = string(match[1])

+ 1 - 1
internal/bigmap/bigmap_internal.go

@@ -435,7 +435,7 @@ func newInternalMapWithOptions(ctx context.Context, hasValues bool, opts *Option
 	tablewSizeIndex := opts.InitialSizeLogarithm - minSizeLogarithm
 
 	if tablewSizeIndex < 1 {
-		return nil, errors.Errorf("invalid initial size")
+		return nil, errors.New("invalid initial size")
 	}
 
 	m := &internalMap{

+ 2 - 2
internal/blobcrypto/blob_crypto_test.go

@@ -60,11 +60,11 @@ func TestBlobCrypto(t *testing.T) {
 type badEncryptor struct{}
 
 func (badEncryptor) Encrypt(input gather.Bytes, contentID []byte, output *gather.WriteBuffer) error {
-	return errors.Errorf("some error")
+	return errors.New("some error")
 }
 
 func (badEncryptor) Decrypt(input gather.Bytes, contentID []byte, output *gather.WriteBuffer) error {
-	return errors.Errorf("some error")
+	return errors.New("some error")
 }
 
 func (badEncryptor) Overhead() int { return 0 }

+ 1 - 1
internal/cache/cache_storage_test.go

@@ -28,7 +28,7 @@ func TestNewStorageOrNil(t *testing.T) {
 	_, err = NewStorageOrNil(ctx, "relative/path/to/cache/dir", 1000, "subdir")
 	require.Error(t, err)
 
-	someError := errors.Errorf("some error")
+	someError := errors.New("some error")
 
 	oldMkdirAll := mkdirAll
 

+ 3 - 3
internal/cache/content_cache_test.go

@@ -195,8 +195,8 @@ func verifyContentCache(t *testing.T, cc cache.ContentCache, cacheStorage blob.S
 			{"xf0f0f3", "no-such-content", 0, -1, nil, blob.ErrBlobNotFound},
 			{"xf0f0f4", "no-such-content", 10, 5, nil, blob.ErrBlobNotFound},
 			{"f0f0f5", "content-1", 7, 3, []byte{8, 9, 10}, nil},
-			{"xf0f0f6", "content-1", 11, 10, nil, errors.Errorf("invalid offset: 11: invalid blob offset or length")},
-			{"xf0f0f6", "content-1", -1, 5, nil, errors.Errorf("invalid offset: -1: invalid blob offset or length")},
+			{"xf0f0f6", "content-1", 11, 10, nil, errors.New("invalid offset: 11: invalid blob offset or length")},
+			{"xf0f0f6", "content-1", -1, 5, nil, errors.New("invalid offset: -1: invalid blob offset or length")},
 		}
 
 		var v gather.WriteBuffer
@@ -356,5 +356,5 @@ type withoutTouchBlob struct {
 }
 
 func (c withoutTouchBlob) TouchBlob(ctx context.Context, blobID blob.ID, threshold time.Duration) (time.Time, error) {
-	return time.Time{}, errors.Errorf("TouchBlob not implemented")
+	return time.Time{}, errors.New("TouchBlob not implemented")
 }

+ 9 - 9
internal/cache/persistent_lru_cache_test.go

@@ -87,7 +87,7 @@ func TestPersistentLRUCache(t *testing.T) {
 	}, nil, clock.Now)
 	require.NoError(t, err)
 
-	someError := errors.Errorf("some error")
+	someError := errors.New("some error")
 
 	var tmp2 gather.WriteBuffer
 	defer tmp2.Close()
@@ -126,7 +126,7 @@ func TestPersistentLRUCache_Invalid(t *testing.T) {
 
 	ctx := testlogging.ContextWithLevel(t, testlogging.LevelInfo)
 
-	someError := errors.Errorf("some error")
+	someError := errors.New("some error")
 
 	st := blobtesting.NewMapStorage(blobtesting.DataMap{}, nil, nil)
 	fs := blobtesting.NewFaultyStorage(st)
@@ -144,7 +144,7 @@ func TestPersistentLRUCache_GetDeletesInvalidBlob(t *testing.T) {
 
 	ctx := testlogging.ContextWithLevel(t, testlogging.LevelInfo)
 
-	someError := errors.Errorf("some error")
+	someError := errors.New("some error")
 
 	data := blobtesting.DataMap{}
 
@@ -176,7 +176,7 @@ func TestPersistentLRUCache_PutIgnoresStorageFailure(t *testing.T) {
 
 	ctx := testlogging.ContextWithLevel(t, testlogging.LevelInfo)
 
-	someError := errors.Errorf("some error")
+	someError := errors.New("some error")
 
 	data := blobtesting.DataMap{}
 
@@ -222,7 +222,7 @@ func TestPersistentLRUCache_SweepMinSweepAge(t *testing.T) {
 	time.Sleep(1 * time.Second)
 
 	// simulate error during final sweep
-	fs.AddFault(blobtesting.MethodListBlobs).ErrorInstead(errors.Errorf("some error"))
+	fs.AddFault(blobtesting.MethodListBlobs).ErrorInstead(errors.New("some error"))
 	pc.Close(ctx)
 
 	// both keys are retained since we're under min sweep age
@@ -248,14 +248,14 @@ func TestPersistentLRUCache_SweepIgnoresErrors(t *testing.T) {
 	require.NoError(t, err)
 
 	// ignore delete errors forever
-	fs.AddFault(blobtesting.MethodDeleteBlob).ErrorInstead(errors.Errorf("some delete error")).Repeat(1e6)
+	fs.AddFault(blobtesting.MethodDeleteBlob).ErrorInstead(errors.New("some delete error")).Repeat(1e6)
 
 	pc.Put(ctx, "key", gather.FromSlice([]byte{1, 2, 3}))
 	pc.Put(ctx, "key2", gather.FromSlice(bytes.Repeat([]byte{1, 2, 3}, 10)))
 	time.Sleep(500 * time.Millisecond)
 
 	// simulate error during sweep
-	fs.AddFaults(blobtesting.MethodListBlobs, fault.New().ErrorInstead(errors.Errorf("some error")))
+	fs.AddFaults(blobtesting.MethodListBlobs, fault.New().ErrorInstead(errors.New("some error")))
 
 	time.Sleep(500 * time.Millisecond)
 
@@ -286,7 +286,7 @@ func TestPersistentLRUCache_Sweep1(t *testing.T) {
 	time.Sleep(1 * time.Second)
 
 	// simulate error during final sweep
-	fs.AddFaults(blobtesting.MethodListBlobs, fault.New().ErrorInstead(errors.Errorf("some error")))
+	fs.AddFaults(blobtesting.MethodListBlobs, fault.New().ErrorInstead(errors.New("some error")))
 	pc.Close(ctx)
 }
 
@@ -305,7 +305,7 @@ func TestPersistentLRUCacheNil(t *testing.T) {
 
 	called := false
 
-	dummyError := errors.Errorf("dummy error")
+	dummyError := errors.New("dummy error")
 
 	require.ErrorIs(t, pc.GetOrLoad(ctx, "key", func(output *gather.WriteBuffer) error {
 		called = true

+ 1 - 1
internal/dirutil/mssubdirall_test.go

@@ -59,7 +59,7 @@ func TestMkSubdirAll(t *testing.T) {
 		}
 	}
 
-	osi.mkdirErr = errors.Errorf("some error")
+	osi.mkdirErr = errors.New("some error")
 
 	require.ErrorIs(t, dirutil.MkSubdirAll(osi, td, filepath.Join(td, "somedir4"), 0o755), osi.mkdirErr)
 }

+ 4 - 4
internal/epoch/epoch_manager.go

@@ -35,7 +35,7 @@ type ParametersProvider interface {
 
 // ErrVerySlowIndexWrite is returned by WriteIndex if a write takes more than 2 epochs (usually >48h).
 // This is theoretically possible with laptops going to sleep, etc.
-var ErrVerySlowIndexWrite = errors.Errorf("extremely slow index write - index write took more than two epochs")
+var ErrVerySlowIndexWrite = errors.New("extremely slow index write - index write took more than two epochs")
 
 // Parameters encapsulates all parameters that influence the behavior of epoch manager.
 //
@@ -451,7 +451,7 @@ func (e *Manager) refreshLocked(ctx context.Context) error {
 	nextDelayTime := initiaRefreshAttemptSleep
 
 	if !p.Enabled {
-		return errors.Errorf("epoch manager not enabled")
+		return errors.New("epoch manager not enabled")
 	}
 
 	for err := e.refreshAttemptLocked(ctx); err != nil; err = e.refreshAttemptLocked(ctx) {
@@ -775,7 +775,7 @@ func (e *Manager) GetCompleteIndexSet(ctx context.Context, maxEpoch int) ([]blob
 	}
 }
 
-var errWriteIndexTryAgain = errors.Errorf("try again")
+var errWriteIndexTryAgain = errors.New("try again")
 
 // WriteIndex writes new index blob by picking the appropriate prefix based on current epoch.
 func (e *Manager) WriteIndex(ctx context.Context, dataShards map[blob.ID]blob.Bytes) ([]blob.Metadata, error) {
@@ -1015,7 +1015,7 @@ func (e *Manager) generateRangeCheckpointFromCommittedState(ctx context.Context,
 	}
 
 	if e.timeFunc().After(cs.ValidUntil) {
-		return errors.Errorf("not generating full checkpoint - the committed state is no longer valid")
+		return errors.New("not generating full checkpoint - the committed state is no longer valid")
 	}
 
 	if err := e.compact(ctx, blob.IDsFromMetadata(completeSet), rangeCheckpointBlobPrefix(minEpoch, maxEpoch)); err != nil {

+ 4 - 4
internal/epoch/epoch_manager_test.go

@@ -80,7 +80,7 @@ func (te *epochManagerTestEnv) interruptedCompaction(ctx context.Context, _ []bl
 	te.st.PutBlob(ctx, blob.ID(fmt.Sprintf("%v%016x-s%v-c3", prefix, sess, rand.Int63())), gather.FromSlice([]byte("dummy")), blob.PutOptions{})
 	te.st.PutBlob(ctx, blob.ID(fmt.Sprintf("%v%016x-s%v-c3", prefix, sess, rand.Int63())), gather.FromSlice([]byte("dummy")), blob.PutOptions{})
 
-	return errors.Errorf("failed for some reason")
+	return errors.New("failed for some reason")
 }
 
 func newTestEnv(t *testing.T) *epochManagerTestEnv {
@@ -319,7 +319,7 @@ func TestIndexEpochManager_DeletionFailing(t *testing.T) {
 
 	te.faultyStorage.
 		AddFault(blobtesting.MethodDeleteBlob).
-		ErrorInstead(errors.Errorf("something bad happened")).
+		ErrorInstead(errors.New("something bad happened")).
 		Repeat(200)
 
 	// set up test environment in which compactions never succeed for whatever reason.
@@ -772,7 +772,7 @@ func TestInvalid_WriteIndex(t *testing.T) {
 	defer cancel()
 
 	// on first write, advance time enough to lose current context and go to the next epoch.
-	te.faultyStorage.AddFault(blobtesting.MethodListBlobs).Repeat(100).Before(cancel).ErrorInstead(errors.Errorf("canceled"))
+	te.faultyStorage.AddFault(blobtesting.MethodListBlobs).Repeat(100).Before(cancel).ErrorInstead(errors.New("canceled"))
 
 	_, err := te.writeIndexFiles(ctx,
 		newFakeIndexWithEntries(1),
@@ -793,7 +793,7 @@ func TestInvalid_ForceAdvanceEpoch(t *testing.T) {
 	require.ErrorIs(t, err, ctx.Err())
 
 	ctx = testlogging.Context(t)
-	someError := errors.Errorf("failed")
+	someError := errors.New("failed")
 	te.faultyStorage.AddFault(blobtesting.MethodPutBlob).ErrorInstead(someError)
 
 	err = te.mgr.forceAdvanceEpoch(ctx)

+ 3 - 3
internal/gather/gather_bytes.go

@@ -14,7 +14,7 @@ var (
 	//nolint:gochecknoglobals
 	invalidSliceBuf = []byte(uuid.NewString())
 	// ErrInvalidOffset checkable error for supplying an invalid offset.
-	ErrInvalidOffset = errors.Errorf("invalid offset")
+	ErrInvalidOffset = errors.New("invalid offset")
 )
 
 // Bytes represents a sequence of bytes split into slices.
@@ -42,7 +42,7 @@ func (b *Bytes) AppendSectionTo(w io.Writer, offset, size int) error {
 	b.assertValid()
 
 	if offset < 0 {
-		return errors.Errorf("invalid offset")
+		return errors.New("invalid offset")
 	}
 
 	// find the index of starting slice
@@ -237,7 +237,7 @@ func (b *bytesReadSeekCloser) Seek(offset int64, whence int) (int64, error) {
 	}
 
 	if newOffset < 0 || newOffset > b.b.Length() {
-		return 0, errors.Errorf("invalid seek")
+		return 0, errors.New("invalid seek")
 	}
 
 	b.offset = newOffset

+ 1 - 1
internal/gather/gather_bytes_test.go

@@ -122,7 +122,7 @@ func TestGatherBytes(t *testing.T) {
 
 			require.Equal(t, tmp.ToByteSlice(), b.ToByteSlice())
 
-			someError := errors.Errorf("some error")
+			someError := errors.New("some error")
 
 			// WriteTo propagates error
 			if b.Length() > 0 {

+ 1 - 1
internal/impossible/impossible_test.go

@@ -12,7 +12,7 @@ import (
 func TestImpossible(t *testing.T) {
 	impossible.PanicOnError(nil)
 
-	someErr := errors.Errorf("some error")
+	someErr := errors.New("some error")
 	require.PanicsWithError(t, someErr.Error(), func() {
 		impossible.PanicOnError(someErr)
 	})

+ 1 - 1
internal/mount/mount_net_use.go

@@ -17,7 +17,7 @@ import (
 // Directory mounts a given directory under a provided drive letter.
 func Directory(ctx context.Context, entry fs.Directory, driveLetter string, _ Options) (Controller, error) {
 	if !isValidWindowsDriveOrAsterisk(driveLetter) {
-		return nil, errors.Errorf("must be a valid drive letter or asterisk")
+		return nil, errors.New("must be a valid drive letter or asterisk")
 	}
 
 	c, err := DirectoryWebDAV(ctx, entry)

+ 1 - 1
internal/mount/mount_unsupported.go

@@ -15,5 +15,5 @@ import (
 //
 //nolint:revive
 func Directory(ctx context.Context, entry fs.Directory, mountPoint string, mountOptions Options) (Controller, error) {
-	return nil, errors.Errorf("mounting is not supported")
+	return nil, errors.New("mounting is not supported")
 }

+ 2 - 2
internal/passwordpersist/passwordpersist.go

@@ -10,10 +10,10 @@ import (
 )
 
 // ErrPasswordNotFound is returned when a password cannot be found in a persistent storage.
-var ErrPasswordNotFound = errors.Errorf("password not found")
+var ErrPasswordNotFound = errors.New("password not found")
 
 // ErrUnsupported is returned when a password storage is not supported.
-var ErrUnsupported = errors.Errorf("password storage not supported")
+var ErrUnsupported = errors.New("password storage not supported")
 
 var log = logging.Module("passwordpersist")
 

+ 1 - 1
internal/repodiag/blob_writer_test.go

@@ -41,7 +41,7 @@ func TestDiagWriter(t *testing.T) {
 	<-closeCalled2
 
 	// simulate write failure
-	someErr := errors.Errorf("some error")
+	someErr := errors.New("some error")
 	fs.AddFault(blobtesting.MethodPutBlob).ErrorInstead(someErr)
 
 	closeCalled3 := make(chan struct{})

+ 1 - 1
internal/repotesting/reconnectable_storage.go

@@ -59,7 +59,7 @@ func (s reconnectableStorage) ConnectionInfo() blob.ConnectionInfo {
 // New creates new reconnectable storage.
 func New(ctx context.Context, opt *ReconnectableStorageOptions, isCreate bool) (blob.Storage, error) {
 	if opt.UUID == "" {
-		return nil, errors.Errorf("missing UUID")
+		return nil, errors.New("missing UUID")
 	}
 
 	v, ok := reconnectableStorageByUUID.Load(opt.UUID)

+ 1 - 1
internal/retry/retry_test.go

@@ -39,7 +39,7 @@ func TestRetry(t *testing.T) {
 			}
 			return 4, nil
 		}, 4, nil},
-		{"retriable-never-succeeds", func() (int, error) { return 0, errRetriable }, 0, errors.Errorf("unable to complete retriable-never-succeeds despite 3 retries")},
+		{"retriable-never-succeeds", func() (int, error) { return 0, errRetriable }, 0, errors.New("unable to complete retriable-never-succeeds despite 3 retries")},
 	}
 
 	ctx := testlogging.Context(t)

+ 1 - 1
internal/server/api_error.go

@@ -32,7 +32,7 @@ func accessDeniedError() *apiError {
 }
 
 func repositoryNotWritableError() *apiError {
-	return internalServerError(errors.Errorf("repository is not writable"))
+	return internalServerError(errors.New("repository is not writable"))
 }
 
 func internalServerError(err error) *apiError {

+ 1 - 1
internal/server/api_estimate.go

@@ -147,7 +147,7 @@ func handleEstimate(ctx context.Context, rc requestContext) (interface{}, *apiEr
 
 	task, ok := rc.srv.taskManager().GetTask(taskID)
 	if !ok {
-		return nil, internalServerError(errors.Errorf("task not found"))
+		return nil, internalServerError(errors.New("task not found"))
 	}
 
 	return task, nil

+ 1 - 1
internal/server/api_restore.go

@@ -118,7 +118,7 @@ func handleRestore(ctx context.Context, rc requestContext) (interface{}, *apiErr
 
 	task, ok := rc.srv.taskManager().GetTask(taskID)
 	if !ok {
-		return nil, internalServerError(errors.Errorf("task not found"))
+		return nil, internalServerError(errors.New("task not found"))
 	}
 
 	return task, nil

+ 1 - 1
internal/server/api_snapshots.go

@@ -93,7 +93,7 @@ func handleDeleteSnapshots(ctx context.Context, rc requestContext) (interface{},
 
 			for _, sn := range snaps {
 				if sn.Source != req.SourceInfo {
-					return errors.Errorf("source info does not match snapshot source")
+					return errors.New("source info does not match snapshot source")
 				}
 			}
 

+ 3 - 3
internal/server/grpc_session.go

@@ -186,10 +186,10 @@ func handleSessionRequest(ctx context.Context, dw repo.DirectRepositoryWriter, a
 		respond(handleApplyRetentionPolicyRequest(ctx, dw, authz, usernameAtHostname, inner.ApplyRetentionPolicy))
 
 	case *grpcapi.SessionRequest_InitializeSession:
-		respond(errorResponse(errors.Errorf("InitializeSession must be the first request in a session")))
+		respond(errorResponse(errors.New("InitializeSession must be the first request in a session")))
 
 	default:
-		respond(errorResponse(errors.Errorf("unhandled session request")))
+		respond(errorResponse(errors.New("unhandled session request")))
 	}
 }
 
@@ -546,7 +546,7 @@ func (s *Server) handleInitialSessionHandshake(srv grpcapi.KopiaRepository_Sessi
 
 	ir := initializeReq.GetInitializeSession()
 	if ir == nil {
-		return repo.WriteSessionOptions{}, errors.Errorf("missing initialization request")
+		return repo.WriteSessionOptions{}, errors.New("missing initialization request")
 	}
 
 	scc := dr.ContentReader().SupportsContentCompression()

+ 3 - 3
internal/server/server.go

@@ -848,7 +848,7 @@ func (s *Server) InitRepositoryAsync(ctx context.Context, mode string, initializ
 
 		if cctx.Err() != nil {
 			// context canceled
-			return errors.Errorf("operation has been canceled")
+			return errors.New("operation has been canceled")
 		}
 
 		if err != nil {
@@ -1044,11 +1044,11 @@ func (s *Server) refreshScheduler(reason string) {
 // The server will manage sources for a given username@hostname.
 func New(ctx context.Context, options *Options) (*Server, error) {
 	if options.Authorizer == nil {
-		return nil, errors.Errorf("missing authorizer")
+		return nil, errors.New("missing authorizer")
 	}
 
 	if options.PasswordPersist == nil {
-		return nil, errors.Errorf("missing password persistence")
+		return nil, errors.New("missing password persistence")
 	}
 
 	if options.AuthCookieSigningKey == "" {

+ 1 - 1
internal/server/server_maintenance_test.go

@@ -78,7 +78,7 @@ func TestServerMaintenance(t *testing.T) {
 	}, 3*time.Second, 10*time.Millisecond)
 
 	ts.mu.Lock()
-	ts.err = errors.Errorf("some error")
+	ts.err = errors.New("some error")
 	ts.mu.Unlock()
 
 	mm.trigger()

+ 3 - 3
internal/server/server_test.go

@@ -190,7 +190,7 @@ func remoteRepositoryTest(ctx context.Context, t *testing.T, rep repo.Repository
 		require.NoError(t, w.Flush(ctx))
 
 		if uploaded == 0 {
-			return errors.Errorf("did not report uploaded bytes")
+			return errors.New("did not report uploaded bytes")
 		}
 
 		uploaded = 0
@@ -198,7 +198,7 @@ func remoteRepositoryTest(ctx context.Context, t *testing.T, rep repo.Repository
 		require.NoError(t, w.Flush(ctx))
 
 		if uploaded != 0 {
-			return errors.Errorf("unexpected upload when writing duplicate object")
+			return errors.New("unexpected upload when writing duplicate object")
 		}
 
 		if result != result2 {
@@ -218,7 +218,7 @@ func remoteRepositoryTest(ctx context.Context, t *testing.T, rep repo.Repository
 
 		_, err = ow.Result()
 		if err == nil {
-			return errors.Errorf("unexpected success writing object with 'm' prefix")
+			return errors.New("unexpected success writing object with 'm' prefix")
 		}
 
 		manifestID, err = snapshot.SaveSnapshot(ctx, w, &snapshot.Manifest{

+ 1 - 1
internal/uitask/uitask_test.go

@@ -185,7 +185,7 @@ func testUITaskInternal(t *testing.T, ctx context.Context, m *uitask.Manager) {
 			t.Fatalf("unexpected summary: %v", diff)
 		}
 
-		return errors.Errorf("some error")
+		return errors.New("some error")
 	})
 
 	verifyTaskList(t, m, map[string]uitask.Status{

+ 3 - 3
internal/user/user_manager.go

@@ -135,11 +135,11 @@ var validUsernameRegexp = regexp.MustCompile(`^[a-z0-9\-_.]+@[a-z0-9\-_.]+$`)
 // ValidateUsername returns an error if the given username is invalid.
 func ValidateUsername(name string) error {
 	if name == "" {
-		return errors.Errorf("username is required")
+		return errors.New("username is required")
 	}
 
 	if !validUsernameRegexp.MatchString(name) {
-		return errors.Errorf("username must be specified as lowercase 'user@hostname'")
+		return errors.New("username must be specified as lowercase 'user@hostname'")
 	}
 
 	return nil
@@ -167,7 +167,7 @@ func SetUserProfile(ctx context.Context, w repo.RepositoryWriter, p *Profile) er
 // DeleteUserProfile removes user profile with a given username.
 func DeleteUserProfile(ctx context.Context, w repo.RepositoryWriter, username string) error {
 	if username == "" {
-		return errors.Errorf("username is required")
+		return errors.New("username is required")
 	}
 
 	manifests, err := w.FindManifests(ctx, map[string]string{

+ 1 - 1
repo/blob/azure/azure_storage.go

@@ -404,7 +404,7 @@ func New(ctx context.Context, opt *Options, isCreate bool) (blob.Storage, error)
 		service, serviceErr = azblob.NewClient(fmt.Sprintf("https://%s/", storageHostname), cred, nil)
 
 	default:
-		return nil, errors.Errorf("one of the storage key, SAS token or client secret must be provided")
+		return nil, errors.New("one of the storage key, SAS token or client secret must be provided")
 	}
 
 	if serviceErr != nil {

+ 1 - 1
repo/blob/azure/azure_versioned.go

@@ -22,7 +22,7 @@ type versionMetadataCallback func(versionMetadata) error
 
 func (az *azPointInTimeStorage) getVersionedBlobMeta(it *azblobmodels.BlobItem) (*versionMetadata, error) {
 	if it.VersionID == nil {
-		return nil, errors.Errorf("versionID is nil. Versioning must be enabled on the container for PIT")
+		return nil, errors.New("versionID is nil. Versioning must be enabled on the container for PIT")
 	}
 
 	bm := az.getBlobMeta(it)

+ 2 - 2
repo/blob/filesystem/filesystem_storage.go

@@ -43,7 +43,7 @@ type fsImpl struct {
 	osi osInterface
 }
 
-var errRetriableInvalidLength = errors.Errorf("invalid length (retriable)")
+var errRetriableInvalidLength = errors.New("invalid length (retriable)")
 
 func (fs *fsImpl) isRetriable(err error) bool {
 	if err == nil {
@@ -115,7 +115,7 @@ func (fs *fsImpl) GetBlobFromPath(ctx context.Context, dirPath, path string, off
 				}
 			}
 
-			return errors.Errorf("invalid length")
+			return errors.New("invalid length")
 		}
 
 		return nil

+ 1 - 1
repo/blob/filesystem/osinterface_mock_other_test.go

@@ -12,7 +12,7 @@ import (
 
 func (osi *mockOS) Stat(fname string) (fs.FileInfo, error) {
 	if osi.statRemainingErrors.Add(-1) >= 0 {
-		return nil, &os.PathError{Op: "stat", Err: errors.Errorf("underlying problem")}
+		return nil, &os.PathError{Op: "stat", Err: errors.New("underlying problem")}
 	}
 
 	return osi.osInterface.Stat(fname)

+ 11 - 11
repo/blob/filesystem/osinterface_mock_test.go

@@ -10,7 +10,7 @@ import (
 	"github.com/pkg/errors"
 )
 
-var errNonRetriable = errors.Errorf("some non-retriable error")
+var errNonRetriable = errors.New("some non-retriable error")
 
 type mockOS struct {
 	readFileRemainingErrors             atomic.Int32
@@ -53,7 +53,7 @@ func (osi *mockOS) Open(fname string) (osReadFile, error) {
 
 func (osi *mockOS) Rename(oldname, newname string) error {
 	if osi.renameRemainingErrors.Add(-1) >= 0 {
-		return &os.LinkError{Op: "rename", Old: oldname, New: newname, Err: errors.Errorf("underlying problem")}
+		return &os.LinkError{Op: "rename", Old: oldname, New: newname, Err: errors.New("underlying problem")}
 	}
 
 	return osi.osInterface.Rename(oldname, newname)
@@ -63,7 +63,7 @@ func (osi *mockOS) IsPathSeparator(c byte) bool { return os.IsPathSeparator(c) }
 
 func (osi *mockOS) ReadDir(dirname string) ([]fs.DirEntry, error) {
 	if osi.readDirRemainingErrors.Add(-1) >= 0 {
-		return nil, &os.PathError{Op: "readdir", Err: errors.Errorf("underlying problem")}
+		return nil, &os.PathError{Op: "readdir", Err: errors.New("underlying problem")}
 	}
 
 	if osi.readDirRemainingNonRetriableErrors.Add(-1) >= 0 {
@@ -88,7 +88,7 @@ func (osi *mockOS) ReadDir(dirname string) ([]fs.DirEntry, error) {
 
 func (osi *mockOS) Remove(fname string) error {
 	if osi.removeRemainingRetriableErrors.Add(-1) >= 0 {
-		return &os.PathError{Op: "unlink", Err: errors.Errorf("underlying problem")}
+		return &os.PathError{Op: "unlink", Err: errors.New("underlying problem")}
 	}
 
 	if osi.removeRemainingNonRetriableErrors.Add(-1) >= 0 {
@@ -100,7 +100,7 @@ func (osi *mockOS) Remove(fname string) error {
 
 func (osi *mockOS) Chtimes(fname string, atime, mtime time.Time) error {
 	if osi.chtimesRemainingErrors.Add(-1) >= 0 {
-		return &os.PathError{Op: "chtimes", Err: errors.Errorf("underlying problem")}
+		return &os.PathError{Op: "chtimes", Err: errors.New("underlying problem")}
 	}
 
 	return osi.osInterface.Chtimes(fname, atime, mtime)
@@ -108,7 +108,7 @@ func (osi *mockOS) Chtimes(fname string, atime, mtime time.Time) error {
 
 func (osi *mockOS) Chown(fname string, uid, gid int) error {
 	if osi.chownRemainingErrors.Add(-1) >= 0 {
-		return &os.PathError{Op: "chown", Err: errors.Errorf("underlying problem")}
+		return &os.PathError{Op: "chown", Err: errors.New("underlying problem")}
 	}
 
 	return osi.osInterface.Chown(fname, uid, gid)
@@ -116,7 +116,7 @@ func (osi *mockOS) Chown(fname string, uid, gid int) error {
 
 func (osi *mockOS) CreateNewFile(fname string, perm os.FileMode) (osWriteFile, error) {
 	if osi.createNewFileRemainingErrors.Add(-1) >= 0 {
-		return nil, &os.PathError{Op: "create", Err: errors.Errorf("underlying problem")}
+		return nil, &os.PathError{Op: "create", Err: errors.New("underlying problem")}
 	}
 
 	wf, err := osi.osInterface.CreateNewFile(fname, perm)
@@ -137,7 +137,7 @@ func (osi *mockOS) CreateNewFile(fname string, perm os.FileMode) (osWriteFile, e
 
 func (osi *mockOS) Mkdir(fname string, mode os.FileMode) error {
 	if osi.mkdirAllRemainingErrors.Add(-1) >= 0 {
-		return &os.PathError{Op: "mkdir", Err: errors.Errorf("underlying problem")}
+		return &os.PathError{Op: "mkdir", Err: errors.New("underlying problem")}
 	}
 
 	return osi.osInterface.Mkdir(fname, mode)
@@ -152,7 +152,7 @@ type readFailureFile struct {
 }
 
 func (f readFailureFile) Read(b []byte) (int, error) {
-	return 0, &os.PathError{Op: "read", Err: errors.Errorf("underlying problem")}
+	return 0, &os.PathError{Op: "read", Err: errors.New("underlying problem")}
 }
 
 type writeFailureFile struct {
@@ -160,7 +160,7 @@ type writeFailureFile struct {
 }
 
 func (f writeFailureFile) Write(b []byte) (int, error) {
-	return 0, &os.PathError{Op: "write", Err: errors.Errorf("underlying problem")}
+	return 0, &os.PathError{Op: "write", Err: errors.New("underlying problem")}
 }
 
 type writeCloseFailureFile struct {
@@ -168,7 +168,7 @@ type writeCloseFailureFile struct {
 }
 
 func (f writeCloseFailureFile) Close() error {
-	return &os.PathError{Op: "close", Err: errors.Errorf("underlying problem")}
+	return &os.PathError{Op: "close", Err: errors.New("underlying problem")}
 }
 
 type mockDirEntryInfoError struct {

+ 1 - 1
repo/blob/filesystem/osinterface_mock_unix_test.go

@@ -13,7 +13,7 @@ import (
 
 func (osi *mockOS) Stat(fname string) (fs.FileInfo, error) {
 	if osi.statRemainingErrors.Add(-1) >= 0 {
-		return nil, &os.PathError{Op: "stat", Err: errors.Errorf("underlying problem")}
+		return nil, &os.PathError{Op: "stat", Err: errors.New("underlying problem")}
 	}
 
 	if osi.eStaleRemainingErrors.Add(-1) >= 0 {

+ 1 - 1
repo/blob/rclone/rclone_storage.go

@@ -240,7 +240,7 @@ func (r *rcloneStorage) runRCloneAndWaitForServerAddress(ctx context.Context, c
 		return rcloneURLs{}, err
 
 	case <-time.After(startupTimeout):
-		return rcloneURLs{}, errors.Errorf("timed out waiting for rclone to start")
+		return rcloneURLs{}, errors.New("timed out waiting for rclone to start")
 	}
 }
 

+ 1 - 1
repo/blob/readonly/readonly_storage.go

@@ -10,7 +10,7 @@ import (
 )
 
 // ErrReadonly returns an error indicating that storage is read only.
-var ErrReadonly = errors.Errorf("storage is read-only")
+var ErrReadonly = errors.New("storage is read-only")
 
 // readonlyStorage prevents all mutations on the underlying storage.
 type readonlyStorage struct {

+ 1 - 1
repo/blob/s3/s3_storage.go

@@ -314,7 +314,7 @@ func getCustomTransport(opt *Options) (*http.Transport, error) {
 		rootcas := x509.NewCertPool()
 
 		if ok := rootcas.AppendCertsFromPEM(opt.RootCA); !ok {
-			return nil, errors.Errorf("cannot parse provided CA")
+			return nil, errors.New("cannot parse provided CA")
 		}
 
 		transport.TLSClientConfig.RootCAs = rootcas

+ 2 - 2
repo/blob/sftp/sftp_storage.go

@@ -379,7 +379,7 @@ func getHostKeyCallback(opt *Options) (ssh.HostKeyCallback, error) {
 	}
 
 	if f := opt.knownHostsFile(); !ospath.IsAbs(f) {
-		return nil, errors.Errorf("known hosts path must be absolute")
+		return nil, errors.New("known hosts path must be absolute")
 	}
 
 	//nolint:wrapcheck
@@ -400,7 +400,7 @@ func getSigner(opt *Options) (ssh.Signer, error) {
 		var err error
 
 		if f := opt.Keyfile; !ospath.IsAbs(f) {
-			return nil, errors.Errorf("key file path must be absolute")
+			return nil, errors.New("key file path must be absolute")
 		}
 
 		privateKeyData, err = os.ReadFile(opt.Keyfile)

+ 2 - 2
repo/blob/storage.go

@@ -17,10 +17,10 @@ import (
 var log = logging.Module("blob")
 
 // ErrSetTimeUnsupported is returned by implementations of Storage that don't support SetTime.
-var ErrSetTimeUnsupported = errors.Errorf("SetTime is not supported")
+var ErrSetTimeUnsupported = errors.New("SetTime is not supported")
 
 // ErrInvalidRange is returned when the requested blob offset or length is invalid.
-var ErrInvalidRange = errors.Errorf("invalid blob offset or length")
+var ErrInvalidRange = errors.New("invalid blob offset or length")
 
 // InvalidCredentialsErrStr is the error string returned by the provider
 // when a token has expired.

+ 1 - 1
repo/blob/storage_test.go

@@ -90,7 +90,7 @@ func TestIterateAllPrefixesInParallel(t *testing.T) {
 
 	require.ElementsMatch(t, []blob.ID{"foo", "bar", "boo"}, got)
 
-	errDummy := errors.Errorf("dummy")
+	errDummy := errors.New("dummy")
 
 	require.ErrorIs(t, errDummy, blob.IterateAllPrefixesInParallel(ctx, 10, st, []blob.ID{
 		"b",

+ 8 - 8
repo/blob/storagemetrics/storage_metrics_test.go

@@ -16,7 +16,7 @@ import (
 
 func TestStorageMetrics_PutBlob(t *testing.T) {
 	ctx := testlogging.Context(t)
-	someError := errors.Errorf("foo")
+	someError := errors.New("foo")
 	st := blobtesting.NewMapStorage(blobtesting.DataMap{}, nil, nil)
 
 	fs := blobtesting.NewFaultyStorage(st)
@@ -42,7 +42,7 @@ func TestStorageMetrics_PutBlob(t *testing.T) {
 
 func TestStorageMetrics_GetBlob(t *testing.T) {
 	ctx := testlogging.Context(t)
-	someError := errors.Errorf("foo")
+	someError := errors.New("foo")
 	st := blobtesting.NewMapStorage(blobtesting.DataMap{}, nil, nil)
 
 	require.NoError(t, st.PutBlob(ctx, "someBlob", gather.FromSlice([]byte{1, 2, 3, 4, 5}), blob.PutOptions{}))
@@ -84,7 +84,7 @@ func TestStorageMetrics_GetBlob(t *testing.T) {
 
 func TestStorageMetrics_GetMetadata(t *testing.T) {
 	ctx := testlogging.Context(t)
-	someError := errors.Errorf("foo")
+	someError := errors.New("foo")
 	st := blobtesting.NewMapStorage(blobtesting.DataMap{}, nil, nil)
 
 	require.NoError(t, st.PutBlob(ctx, "someBlob", gather.FromSlice([]byte{1, 2, 3, 4, 5}), blob.PutOptions{}))
@@ -116,7 +116,7 @@ func TestStorageMetrics_GetMetadata(t *testing.T) {
 
 func TestStorageMetrics_GetCapacity(t *testing.T) {
 	ctx := testlogging.Context(t)
-	someError := errors.Errorf("foo")
+	someError := errors.New("foo")
 	st := blobtesting.NewMapStorage(blobtesting.DataMap{}, nil, nil)
 
 	fs := blobtesting.NewFaultyStorage(st)
@@ -146,7 +146,7 @@ func TestStorageMetrics_GetCapacity(t *testing.T) {
 
 func TestStorageMetrics_DeleteBlob(t *testing.T) {
 	ctx := testlogging.Context(t)
-	someError := errors.Errorf("foo")
+	someError := errors.New("foo")
 	st := blobtesting.NewMapStorage(blobtesting.DataMap{}, nil, nil)
 
 	require.NoError(t, st.PutBlob(ctx, "someBlob", gather.FromSlice([]byte{1, 2, 3, 4, 5}), blob.PutOptions{}))
@@ -178,7 +178,7 @@ func TestStorageMetrics_DeleteBlob(t *testing.T) {
 
 func TestStorageMetrics_Close(t *testing.T) {
 	ctx := testlogging.Context(t)
-	someError := errors.Errorf("foo")
+	someError := errors.New("foo")
 	st := blobtesting.NewMapStorage(blobtesting.DataMap{}, nil, nil)
 
 	fs := blobtesting.NewFaultyStorage(st)
@@ -208,7 +208,7 @@ func TestStorageMetrics_Close(t *testing.T) {
 
 func TestStorageMetrics_FlushCaches(t *testing.T) {
 	ctx := testlogging.Context(t)
-	someError := errors.Errorf("foo")
+	someError := errors.New("foo")
 	st := blobtesting.NewMapStorage(blobtesting.DataMap{}, nil, nil)
 
 	fs := blobtesting.NewFaultyStorage(st)
@@ -238,7 +238,7 @@ func TestStorageMetrics_FlushCaches(t *testing.T) {
 
 func TestStorageMetrics_ListBlobs(t *testing.T) {
 	ctx := testlogging.Context(t)
-	someError := errors.Errorf("foo")
+	someError := errors.New("foo")
 	st := blobtesting.NewMapStorage(blobtesting.DataMap{}, nil, nil)
 	require.NoError(t, st.PutBlob(ctx, "someBlob1", gather.FromSlice([]byte{1, 2, 3, 4, 5}), blob.PutOptions{}))
 	require.NoError(t, st.PutBlob(ctx, "someBlob2", gather.FromSlice([]byte{1, 2, 3, 4, 5}), blob.PutOptions{}))

+ 1 - 1
repo/blob/throttling/throttling_semaphore.go

@@ -46,7 +46,7 @@ func (s *semaphore) SetLimit(limit int) error {
 	defer s.mu.Unlock()
 
 	if limit < 0 {
-		return errors.Errorf("invalid limit")
+		return errors.New("invalid limit")
 	}
 
 	if limit > 0 {

+ 1 - 1
repo/blob/throttling/token_bucket.go

@@ -87,7 +87,7 @@ func (b *tokenBucket) SetLimit(maxTokens float64) error {
 	defer b.mu.Unlock()
 
 	if maxTokens < 0 {
-		return errors.Errorf("limit cannot be negative")
+		return errors.New("limit cannot be negative")
 	}
 
 	b.maxTokens = maxTokens

+ 2 - 2
repo/connect.go

@@ -22,7 +22,7 @@ type ConnectOptions struct {
 
 // ErrRepositoryNotInitialized is returned when attempting to connect to repository that has not
 // been initialized.
-var ErrRepositoryNotInitialized = errors.Errorf("repository not initialized in the provided storage")
+var ErrRepositoryNotInitialized = errors.New("repository not initialized in the provided storage")
 
 // Connect connects to the repository in the specified storage and persists the configuration and credentials in the file provided.
 func Connect(ctx context.Context, configFile string, st blob.Storage, password string, opt *ConnectOptions) error {
@@ -89,7 +89,7 @@ func Disconnect(ctx context.Context, configFile string) error {
 
 	if cfg.Caching != nil && cfg.Caching.CacheDirectory != "" {
 		if !ospath.IsAbs(cfg.Caching.CacheDirectory) {
-			return errors.Errorf("cache directory was not absolute, refusing to delete")
+			return errors.New("cache directory was not absolute, refusing to delete")
 		}
 
 		if err = os.RemoveAll(cfg.Caching.CacheDirectory); err != nil {

+ 1 - 1
repo/content/committed_content_index_disk_cache.go

@@ -159,7 +159,7 @@ func writeTempFileAtomic(dirname string, data []byte) (string, error) {
 	}
 
 	if err := tf.Close(); err != nil {
-		return "", errors.Errorf("can't close tmp file")
+		return "", errors.New("can't close tmp file")
 	}
 
 	return tf.Name(), nil

+ 1 - 1
repo/content/content_manager_lock_free.go

@@ -39,7 +39,7 @@ func (sm *SharedManager) maybeCompressAndEncryptDataForPacking(data gather.Bytes
 	//nolint:nestif
 	if comp != NoCompression {
 		if mp.IndexVersion < index.Version2 {
-			return NoCompression, errors.Errorf("compression is not enabled for this repository")
+			return NoCompression, errors.New("compression is not enabled for this repository")
 		}
 
 		var tmp gather.WriteBuffer

+ 1 - 1
repo/content/content_manager_test.go

@@ -1250,7 +1250,7 @@ func (s *contentManagerSuite) TestHandleWriteErrors(t *testing.T) {
 				}
 			} else {
 				if cnt > 0 {
-					result = append(result, fault.New().Repeat(cnt-1).ErrorInstead(errors.Errorf("some write error")))
+					result = append(result, fault.New().Repeat(cnt-1).ErrorInstead(errors.New("some write error")))
 				}
 			}
 		}

+ 5 - 5
repo/content/index/id.go

@@ -26,7 +26,7 @@ func (p IDPrefix) ValidateSingle() error {
 		}
 	}
 
-	return errors.Errorf("invalid prefix, must be empty or a single letter between 'g' and 'z'")
+	return errors.New("invalid prefix, must be empty or a single letter between 'g' and 'z'")
 }
 
 // ID is an identifier of content in content-addressable storage.
@@ -158,11 +158,11 @@ func IDFromHash(prefix IDPrefix, hash []byte) (ID, error) {
 	var id ID
 
 	if len(hash) > len(id.data) {
-		return EmptyID, errors.Errorf("hash too long")
+		return EmptyID, errors.New("hash too long")
 	}
 
 	if len(hash) == 0 {
-		return EmptyID, errors.Errorf("hash too short")
+		return EmptyID, errors.New("hash too short")
 	}
 
 	if err := prefix.ValidateSingle(); err != nil {
@@ -193,14 +193,14 @@ func ParseID(s string) (ID, error) {
 		id.prefix = s[0]
 
 		if id.prefix < 'g' || id.prefix > 'z' {
-			return id, errors.Errorf("invalid content prefix")
+			return id, errors.New("invalid content prefix")
 		}
 
 		s = s[1:]
 	}
 
 	if len(s) > 2*len(id.data) {
-		return id, errors.Errorf("hash too long")
+		return id, errors.New("hash too long")
 	}
 
 	n, err := hex.Decode(id.data[:], []byte(s))

+ 1 - 1
repo/content/index/index_builder.go

@@ -185,7 +185,7 @@ func (b Builder) shard(maxShardSize int) []Builder {
 // Returns shard bytes and function to clean up after the shards have been written.
 func (b Builder) BuildShards(indexVersion int, stable bool, shardSize int) ([]gather.Bytes, func(), error) {
 	if shardSize == 0 {
-		return nil, nil, errors.Errorf("invalid shard size")
+		return nil, nil, errors.New("invalid shard size")
 	}
 
 	var (

+ 3 - 3
repo/content/index/index_v1.go

@@ -340,11 +340,11 @@ func (b *indexBuilderV1) writeEntry(w io.Writer, it Info, entry []byte) error {
 	}
 
 	if it.CompressionHeaderID != 0 {
-		return errors.Errorf("compression not supported in index v1")
+		return errors.New("compression not supported in index v1")
 	}
 
 	if it.EncryptionKeyID != 0 {
-		return errors.Errorf("encryption key ID not supported in index v1")
+		return errors.New("encryption key ID not supported in index v1")
 	}
 
 	if err := b.formatEntry(entry, it); err != nil {
@@ -411,7 +411,7 @@ func v1ReadHeader(data []byte) (v1HeaderInfo, error) {
 	}
 
 	if hi.keySize <= 1 || hi.valueSize < 0 || hi.entryCount < 0 {
-		return v1HeaderInfo{}, errors.Errorf("invalid header")
+		return v1HeaderInfo{}, errors.New("invalid header")
 	}
 
 	return hi, nil

+ 5 - 5
repo/content/index/index_v2.go

@@ -697,12 +697,12 @@ func openV2PackIndex(data []byte, closer func() error) (Index, error) {
 	}
 
 	if hi.keySize <= 1 || hi.entrySize < v2EntryMinLength || hi.entrySize > v2EntryMaxLength || hi.entryCount < 0 || hi.formatCount > v2MaxFormatCount {
-		return nil, errors.Errorf("invalid header")
+		return nil, errors.New("invalid header")
 	}
 
 	hi.entryStride = int64(hi.keySize + hi.entrySize)
 	if hi.entryStride > v2MaxEntrySize {
-		return nil, errors.Errorf("invalid header - entry stride too big")
+		return nil, errors.New("invalid header - entry stride too big")
 	}
 
 	hi.entriesOffset = v2IndexHeaderSize
@@ -712,7 +712,7 @@ func openV2PackIndex(data []byte, closer func() error) (Index, error) {
 	// pre-read formats section
 	formatsBuf, err := safeSlice(data, hi.formatsOffset, int(hi.formatCount)*v2FormatInfoSize)
 	if err != nil {
-		return nil, errors.Errorf("unable to read formats section")
+		return nil, errors.New("unable to read formats section")
 	}
 
 	packIDs := make([]blob.ID, hi.packCount)
@@ -720,7 +720,7 @@ func openV2PackIndex(data []byte, closer func() error) (Index, error) {
 	for i := range int(hi.packCount) { //nolint:gosec
 		buf, err := safeSlice(data, hi.packsOffset+int64(v2PackInfoSize*i), v2PackInfoSize)
 		if err != nil {
-			return nil, errors.Errorf("unable to read pack blob IDs section - 1")
+			return nil, errors.New("unable to read pack blob IDs section - 1")
 		}
 
 		nameLength := int(buf[0])
@@ -728,7 +728,7 @@ func openV2PackIndex(data []byte, closer func() error) (Index, error) {
 
 		nameBuf, err := safeSliceString(data, int64(nameOffset), nameLength)
 		if err != nil {
-			return nil, errors.Errorf("unable to read pack blob IDs section - 2")
+			return nil, errors.New("unable to read pack blob IDs section - 2")
 		}
 
 		packIDs[i] = blob.ID(nameBuf)

+ 2 - 2
repo/content/index/merged_test.go

@@ -60,7 +60,7 @@ func TestMerged(t *testing.T) {
 	fmt.Println("=========== START")
 
 	// error is propagated.
-	someErr := errors.Errorf("some error")
+	someErr := errors.New("some error")
 	require.ErrorIs(t, m.Iterate(AllIDs, func(i Info) error {
 		if i.ContentID == mustParseID(t, "aabbcc") {
 			return someErr
@@ -160,7 +160,7 @@ func (i failingIndex) GetInfo(contentID ID, result *Info) (bool, error) {
 }
 
 func TestMergedGetInfoError(t *testing.T) {
-	someError := errors.Errorf("some error")
+	someError := errors.New("some error")
 
 	m := Merged{failingIndex{nil, someError}}
 

+ 2 - 2
repo/content/indexblob/index_blob_encryption_test.go

@@ -68,14 +68,14 @@ func TestEncryptedBlobManager(t *testing.T) {
 
 	require.ErrorIs(t, ebm.GetEncryptedBlob(ctx, "no-such-blob", &tmp), blob.ErrBlobNotFound)
 
-	someError := errors.Errorf("some error")
+	someError := errors.New("some error")
 
 	fs.AddFault(blobtesting.MethodPutBlob).ErrorInstead(someError)
 
 	_, err = ebm.EncryptAndWriteBlob(ctx, gather.FromSlice([]byte{1, 2, 3, 4}), "x", "session1")
 	require.ErrorIs(t, err, someError)
 
-	someError2 := errors.Errorf("some error 2")
+	someError2 := errors.New("some error 2")
 
 	ebm.crypter = blobcrypto.StaticCrypter{Hash: hf, Encryption: failingEncryptor{nil, someError2}}
 

+ 3 - 3
repo/format/blobcfg_blob.go

@@ -31,11 +31,11 @@ func (r *BlobStorageConfiguration) IsRetentionEnabled() bool {
 // Validate validates the blob config parameters.
 func (r *BlobStorageConfiguration) Validate() error {
 	if (r.RetentionMode == "") != (r.RetentionPeriod == 0) {
-		return errors.Errorf("both retention mode and period must be provided when setting blob retention properties")
+		return errors.New("both retention mode and period must be provided when setting blob retention properties")
 	}
 
 	if r.RetentionPeriod != 0 && r.RetentionPeriod < 24*time.Hour {
-		return errors.Errorf("invalid retention-period, the minimum required is 1-day and there is no maximum limit")
+		return errors.New("invalid retention-period, the minimum required is 1-day and there is no maximum limit")
 	}
 
 	return nil
@@ -78,7 +78,7 @@ func deserializeBlobCfgBytes(j *KopiaRepositoryJSON, encryptedBlobCfgBytes, form
 	case aes256GcmEncryption:
 		plainText, err = decryptRepositoryBlobBytesAes256Gcm(encryptedBlobCfgBytes, formatEncryptionKey, j.UniqueID)
 		if err != nil {
-			return BlobStorageConfiguration{}, errors.Errorf("unable to decrypt repository blobcfg blob")
+			return BlobStorageConfiguration{}, errors.New("unable to decrypt repository blobcfg blob")
 		}
 
 	default:

+ 1 - 1
repo/format/content_format.go

@@ -80,7 +80,7 @@ func (v *MutableParameters) Validate() error {
 	}
 
 	if v.IndexVersion < 0 || v.IndexVersion > index.Version2 {
-		return errors.Errorf("invalid index version, supported versions are 1 & 2")
+		return errors.New("invalid index version, supported versions are 1 & 2")
 	}
 
 	if err := v.EpochParameters.Validate(); err != nil {

+ 1 - 1
repo/format/format_blob.go

@@ -31,7 +31,7 @@ const (
 const KopiaRepositoryBlobID = "kopia.repository"
 
 // ErrInvalidPassword is returned when repository password is invalid.
-var ErrInvalidPassword = errors.Errorf("invalid repository password") // +checklocksignore
+var ErrInvalidPassword = errors.New("invalid repository password") // +checklocksignore
 
 //nolint:gochecknoglobals
 var (

+ 1 - 1
repo/format/format_change_password.go

@@ -15,7 +15,7 @@ func (m *Manager) ChangePassword(ctx context.Context, newPassword string) error
 	defer m.mu.Unlock()
 
 	if !m.repoConfig.EnablePasswordChange {
-		return errors.Errorf("password changes are not supported for repositories created using Kopia v0.8 or older")
+		return errors.New("password changes are not supported for repositories created using Kopia v0.8 or older")
 	}
 
 	newFormatEncryptionKey, err := m.j.DeriveFormatEncryptionKeyFromPassword(newPassword)

+ 4 - 4
repo/format/format_manager.go

@@ -138,7 +138,7 @@ func (m *Manager) refresh(ctx context.Context) error {
 
 	b, err = addFormatBlobChecksumAndLength(b)
 	if err != nil {
-		return errors.Errorf("unable to add checksum")
+		return errors.New("unable to add checksum")
 	}
 
 	// use old key, if present to avoid deriving it, which is expensive
@@ -309,7 +309,7 @@ func (m *Manager) LoadedTime() time.Time {
 // +checklocks:m.mu
 func (m *Manager) updateRepoConfigLocked(ctx context.Context) error {
 	if err := m.j.EncryptRepositoryConfig(m.repoConfig, m.formatEncryptionKey); err != nil {
-		return errors.Errorf("unable to encrypt format bytes")
+		return errors.New("unable to encrypt format bytes")
 	}
 
 	if err := m.j.WriteKopiaRepositoryBlob(ctx, m.blobs, m.blobCfgBlob); err != nil {
@@ -420,7 +420,7 @@ func NewManagerWithCache(
 }
 
 // ErrAlreadyInitialized indicates that repository has already been initialized.
-var ErrAlreadyInitialized = errors.Errorf("repository already initialized")
+var ErrAlreadyInitialized = errors.New("repository already initialized")
 
 // Initialize initializes the format blob in a given storage.
 func Initialize(ctx context.Context, st blob.Storage, formatBlob *KopiaRepositoryJSON, repoConfig *RepositoryConfig, blobcfg BlobStorageConfiguration, password string) error {
@@ -439,7 +439,7 @@ func Initialize(ctx context.Context, st blob.Storage, formatBlob *KopiaRepositor
 
 	err = st.GetBlob(ctx, KopiaBlobCfgBlobID, 0, -1, &tmp)
 	if err == nil {
-		return errors.Errorf("possible corruption: blobcfg blob exists, but format blob is not found")
+		return errors.New("possible corruption: blobcfg blob exists, but format blob is not found")
 	}
 
 	if !errors.Is(err, blob.ErrBlobNotFound) {

+ 1 - 1
repo/format/format_manager_test.go

@@ -22,7 +22,7 @@ import (
 )
 
 var (
-	errSomeError = errors.Errorf("some error")
+	errSomeError = errors.New("some error")
 
 	cf = format.ContentFormat{
 		MutableParameters: format.MutableParameters{

+ 1 - 1
repo/format/format_set_parameters.go

@@ -31,7 +31,7 @@ func (m *Manager) SetParameters(
 	m.repoConfig.RequiredFeatures = requiredFeatures
 
 	if err := m.j.EncryptRepositoryConfig(m.repoConfig, m.formatEncryptionKey); err != nil {
-		return errors.Errorf("unable to encrypt format bytes")
+		return errors.New("unable to encrypt format bytes")
 	}
 
 	if err := m.j.WriteBlobCfgBlob(ctx, m.blobs, blobcfg, m.formatEncryptionKey); err != nil {

+ 1 - 1
repo/format/repository_config.go

@@ -29,7 +29,7 @@ func (f *KopiaRepositoryJSON) decryptRepositoryConfig(masterKey []byte) (*Reposi
 	case aes256GcmEncryption:
 		plainText, err := decryptRepositoryBlobBytesAes256Gcm(f.EncryptedFormatBytes, masterKey, f.UniqueID)
 		if err != nil {
-			return nil, errors.Errorf("unable to decrypt repository format")
+			return nil, errors.New("unable to decrypt repository format")
 		}
 
 		var erc EncryptedRepositoryConfig

+ 2 - 2
repo/grpc_repository_client.go

@@ -722,7 +722,7 @@ func (r *grpcRepositoryClient) WriteContent(ctx context.Context, data gather.Byt
 
 	// we will be writing asynchronously and server will reject this write, fail early.
 	if prefix == manifest.ContentPrefix {
-		return content.EmptyID, errors.Errorf("writing manifest contents not allowed")
+		return content.EmptyID, errors.New("writing manifest contents not allowed")
 	}
 
 	var hashOutput [128]byte
@@ -867,7 +867,7 @@ func baseURLToURI(baseURL string) (uri string, err error) {
 	}
 
 	if u.Scheme != "kopia" && u.Scheme != "https" && u.Scheme != "unix+https" {
-		return "", errors.Errorf("invalid server address, must be 'https://host:port' or 'unix+https://<path>")
+		return "", errors.New("invalid server address, must be 'https://host:port' or 'unix+https://<path>")
 	}
 
 	uri = net.JoinHostPort(u.Hostname(), u.Port())

+ 1 - 1
repo/local_config.go

@@ -21,7 +21,7 @@ import (
 const configDirMode = 0o700
 
 // ErrCannotWriteToRepoConnectionWithPermissiveCacheLoading error to indicate.
-var ErrCannotWriteToRepoConnectionWithPermissiveCacheLoading = errors.Errorf("cannot write to repo connection with permissive cache loading")
+var ErrCannotWriteToRepoConnectionWithPermissiveCacheLoading = errors.New("cannot write to repo connection with permissive cache loading")
 
 // ClientOptions contains client-specific options that are persisted in local configuration file.
 type ClientOptions struct {

+ 1 - 1
repo/maintenance/content_rewrite.go

@@ -39,7 +39,7 @@ type contentInfoOrError struct {
 // blobs and index entries to point at them.
 func RewriteContents(ctx context.Context, rep repo.DirectRepositoryWriter, opt *RewriteContentsOptions, safety SafetyParameters) error {
 	if opt == nil {
-		return errors.Errorf("missing options")
+		return errors.New("missing options")
 	}
 
 	if opt.ShortPacks {

+ 1 - 1
repo/maintenance/maintenance_schedule.go

@@ -135,7 +135,7 @@ func GetSchedule(ctx context.Context, rep repo.DirectRepository) (*Schedule, err
 	v := tmp.ToByteSlice()
 
 	if len(v) < c.NonceSize() {
-		return nil, errors.Errorf("invalid schedule blob")
+		return nil, errors.New("invalid schedule blob")
 	}
 
 	j, err := c.Open(nil, v[0:c.NonceSize()], v[c.NonceSize():], maintenanceScheduleAEADExtraData)

+ 1 - 1
repo/manifest/manifest_manager.go

@@ -70,7 +70,7 @@ type Manager struct {
 // Put serializes the provided payload to JSON and persists it. Returns unique identifier that represents the manifest.
 func (m *Manager) Put(ctx context.Context, labels map[string]string, payload interface{}) (ID, error) {
 	if labels[TypeLabelKey] == "" {
-		return "", errors.Errorf("'type' label is required")
+		return "", errors.New("'type' label is required")
 	}
 
 	random := make([]byte, manifestIDLength)

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác