Browse Source

Dependency update and cleanup

Achilleas Koutsou 5 years ago
parent
commit
885ecca43b
100 changed files with 6197 additions and 4299 deletions
  1. 2 3
      go.mod
  2. 10 11
      go.sum
  3. 0 49
      vendor/gitea.com/xorm/builder/cond_if.go
  4. 0 116
      vendor/gitea.com/xorm/core/README.md
  5. 0 1
      vendor/gitea.com/xorm/core/benchmark.sh
  6. 0 95
      vendor/gitea.com/xorm/core/cache.go
  7. 0 166
      vendor/gitea.com/xorm/core/column.go
  8. 0 12
      vendor/gitea.com/xorm/core/converstion.go
  9. 0 225
      vendor/gitea.com/xorm/core/db.go
  10. 0 320
      vendor/gitea.com/xorm/core/dialect.go
  11. 0 31
      vendor/gitea.com/xorm/core/driver.go
  12. 0 12
      vendor/gitea.com/xorm/core/error.go
  13. 0 68
      vendor/gitea.com/xorm/core/filter.go
  14. 0 13
      vendor/gitea.com/xorm/core/go.mod
  15. 0 23
      vendor/gitea.com/xorm/core/go.sum
  16. 0 35
      vendor/gitea.com/xorm/core/ilogger.go
  17. 0 71
      vendor/gitea.com/xorm/core/index.go
  18. 0 258
      vendor/gitea.com/xorm/core/mapper.go
  19. 0 30
      vendor/gitea.com/xorm/core/pk.go
  20. 0 338
      vendor/gitea.com/xorm/core/rows.go
  21. 0 66
      vendor/gitea.com/xorm/core/scan.go
  22. 0 165
      vendor/gitea.com/xorm/core/stmt.go
  23. 0 154
      vendor/gitea.com/xorm/core/table.go
  24. 0 153
      vendor/gitea.com/xorm/core/tx.go
  25. 0 323
      vendor/gitea.com/xorm/core/type.go
  26. 0 1253
      vendor/gitea.com/xorm/xorm/dialect_postgres.go
  27. 0 19
      vendor/gitea.com/xorm/xorm/go.mod
  28. 0 2
      vendor/github.com/G-Node/gig/README.md
  29. 3 0
      vendor/github.com/G-Node/git-module/.gitignore
  30. 18 0
      vendor/github.com/G-Node/git-module/.travis.yml
  31. 4 2
      vendor/github.com/G-Node/git-module/README.md
  32. 1 13
      vendor/github.com/G-Node/git-module/commit.go
  33. 9 9
      vendor/github.com/G-Node/git-module/commit_archive.go
  34. 77 0
      vendor/github.com/G-Node/git-module/deprecated.go
  35. 0 6
      vendor/github.com/G-Node/git-module/git.go
  36. 9 0
      vendor/github.com/G-Node/git-module/go.mod
  37. 16 0
      vendor/github.com/G-Node/git-module/go.sum
  38. 31 0
      vendor/github.com/G-Node/git-module/remote.go
  39. 31 27
      vendor/github.com/G-Node/git-module/repo.go
  40. 6 12
      vendor/github.com/G-Node/git-module/repo_branch.go
  41. 14 42
      vendor/github.com/G-Node/git-module/repo_commit.go
  42. 24 24
      vendor/github.com/G-Node/git-module/repo_diff.go
  43. 4 4
      vendor/github.com/G-Node/git-module/repo_object.go
  44. 1 1
      vendor/github.com/G-Node/git-module/repo_pull.go
  45. 8 8
      vendor/github.com/G-Node/git-module/repo_tag.go
  46. 1 1
      vendor/github.com/G-Node/git-module/sha1.go
  47. 10 10
      vendor/github.com/G-Node/git-module/tree.go
  48. 2 2
      vendor/github.com/G-Node/git-module/tree_blob.go
  49. 9 9
      vendor/github.com/G-Node/git-module/tree_entry.go
  50. 4 4
      vendor/github.com/G-Node/git-module/utils.go
  51. 0 29
      vendor/github.com/G-Node/go-annex/LICENSE
  52. 0 5
      vendor/github.com/G-Node/go-annex/util.go
  53. 0 65
      vendor/github.com/G-Node/godML/odml/odml.go
  54. 7 1
      vendor/github.com/G-Node/libgin/LICENSE
  55. 2 7
      vendor/github.com/G-Node/libgin/libgin/annex/add.go
  56. 4 5
      vendor/github.com/G-Node/libgin/libgin/annex/file.go
  57. 12 0
      vendor/github.com/G-Node/libgin/libgin/annex/util.go
  58. 1 1
      vendor/github.com/G-Node/libgin/libgin/dex.go
  59. 0 0
      vendor/github.com/G-Node/libgin/libgin/gig/delta.go
  60. 0 0
      vendor/github.com/G-Node/libgin/libgin/gig/objects.go
  61. 0 0
      vendor/github.com/G-Node/libgin/libgin/gig/pack.go
  62. 0 0
      vendor/github.com/G-Node/libgin/libgin/gig/parse.go
  63. 0 0
      vendor/github.com/G-Node/libgin/libgin/gig/refs.go
  64. 0 0
      vendor/github.com/G-Node/libgin/libgin/gig/repo.go
  65. 0 0
      vendor/github.com/G-Node/libgin/libgin/gig/util.go
  66. 0 0
      vendor/github.com/G-Node/libgin/libgin/gig/walk.go
  67. 0 0
      vendor/github.com/G-Node/libgin/libgin/gig/write.go
  68. 20 0
      vendor/github.com/beorn7/perks/LICENSE
  69. 2388 0
      vendor/github.com/beorn7/perks/quantile/exampledata.txt
  70. 316 0
      vendor/github.com/beorn7/perks/quantile/stream.go
  71. 1 0
      vendor/github.com/boombuler/barcode/.gitignore
  72. 21 0
      vendor/github.com/boombuler/barcode/LICENSE
  73. 53 0
      vendor/github.com/boombuler/barcode/README.md
  74. 42 0
      vendor/github.com/boombuler/barcode/barcode.go
  75. 1 0
      vendor/github.com/boombuler/barcode/go.mod
  76. 66 0
      vendor/github.com/boombuler/barcode/qr/alphanumeric.go
  77. 23 0
      vendor/github.com/boombuler/barcode/qr/automatic.go
  78. 59 0
      vendor/github.com/boombuler/barcode/qr/blocks.go
  79. 416 0
      vendor/github.com/boombuler/barcode/qr/encoder.go
  80. 29 0
      vendor/github.com/boombuler/barcode/qr/errorcorrection.go
  81. 56 0
      vendor/github.com/boombuler/barcode/qr/numeric.go
  82. 166 0
      vendor/github.com/boombuler/barcode/qr/qrcode.go
  83. 27 0
      vendor/github.com/boombuler/barcode/qr/unicode.go
  84. 310 0
      vendor/github.com/boombuler/barcode/qr/versioninfo.go
  85. 134 0
      vendor/github.com/boombuler/barcode/scaledbarcode.go
  86. 57 0
      vendor/github.com/boombuler/barcode/utils/base1dcode.go
  87. 119 0
      vendor/github.com/boombuler/barcode/utils/bitlist.go
  88. 65 0
      vendor/github.com/boombuler/barcode/utils/galoisfield.go
  89. 103 0
      vendor/github.com/boombuler/barcode/utils/gfpoly.go
  90. 44 0
      vendor/github.com/boombuler/barcode/utils/reedsolomon.go
  91. 19 0
      vendor/github.com/boombuler/barcode/utils/runeint.go
  92. 202 0
      vendor/github.com/bradfitz/gomemcache/LICENSE
  93. 687 0
      vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
  94. 129 0
      vendor/github.com/bradfitz/gomemcache/memcache/selector.go
  95. 8 0
      vendor/github.com/cespare/xxhash/v2/.travis.yml
  96. 22 0
      vendor/github.com/cespare/xxhash/v2/LICENSE.txt
  97. 55 0
      vendor/github.com/cespare/xxhash/v2/README.md
  98. 3 0
      vendor/github.com/cespare/xxhash/v2/go.mod
  99. 0 0
      vendor/github.com/cespare/xxhash/v2/go.sum
  100. 236 0
      vendor/github.com/cespare/xxhash/v2/xxhash.go

+ 2 - 3
go.mod

@@ -3,9 +3,8 @@ module github.com/G-Node/gogs
 go 1.13
 
 require (
-	github.com/G-Node/git-module v0.8.4-0.20191218161021-3fd4d7aaf932
-	github.com/G-Node/go-annex v0.1.0
-	github.com/G-Node/libgin v0.0.0-20191216094436-47f8aadc0067
+	github.com/G-Node/git-module v0.8.4-gnode
+	github.com/G-Node/libgin v0.3.1
 	github.com/bgentry/speakeasy v0.1.0 // indirect
 	github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0
 	github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0

+ 10 - 11
go.sum

@@ -2,16 +2,14 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
 cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/G-Node/gig v0.0.0-20171025133355-6d784b40b534 h1:ZdsP0JR5T1lDcsXSeARuI83++uHNoSZiN6kaMgf1EPM=
 github.com/G-Node/gig v0.0.0-20171025133355-6d784b40b534/go.mod h1:H+82VbQUp9AzlbEiZl3bri3mlPlN2q6CxlIh0CyJWnc=
-github.com/G-Node/git-module v0.8.4-0.20191218161021-3fd4d7aaf932 h1:dZrf6z91eDajQqj8vdmOkKXZtCc3NGpkBikXwZGOZYM=
 github.com/G-Node/git-module v0.8.4-0.20191218161021-3fd4d7aaf932/go.mod h1:VYqGwMiaOacy+pch//bmTW0HnfVxZA6VnFSvzOOwpX0=
-github.com/G-Node/go-annex v0.0.0-20190628200220-9382e24e7eb6 h1:XyZqJ0xWFiSEs/sIYyKGG03aWcVMzIVlavkb6Lph1ao=
-github.com/G-Node/go-annex v0.0.0-20190628200220-9382e24e7eb6/go.mod h1:y9O/u0tJBzrDM4NqgkyzI5YoZMi7P0o+6llu141LA0s=
-github.com/G-Node/go-annex v0.1.0 h1:JI0sJX9KAMPdjEVVR5CGF9hGoQc9EtQJdEU+hHcZchE=
-github.com/G-Node/go-annex v0.1.0/go.mod h1:lskjkgAQ7vasN8gZ+MKHIBWVjjfV5kkz4+Leh8rZgMg=
-github.com/G-Node/libgin v0.0.0-20191216094436-47f8aadc0067 h1:JXz3L3qJZiS0sHilcJFTyfx/e/6d3INXibDFArGytZQ=
+github.com/G-Node/git-module v0.8.4-gnode h1:BNtyp/2J+zisHObMqrxazJ2fb2ASPu30uN8sNfliSgI=
+github.com/G-Node/git-module v0.8.4-gnode/go.mod h1:TdKR+8dChXtB7Hw3xS4Bfn5QQenPnihuoWx/vnSnb1k=
 github.com/G-Node/libgin v0.0.0-20191216094436-47f8aadc0067/go.mod h1:2yLXQnNbwjH8mslxnzU8Kb+d7c2Zqo8DIgR6Pgp7lCg=
+github.com/G-Node/libgin v0.3.0/go.mod h1:VjulCBq7k/kgf4Eabk2f4w9SDNowWhLnK+yZvy5Nppk=
+github.com/G-Node/libgin v0.3.1 h1:R6yDRkJ0DlUgD7Y4vZrdKX0DzaTDMYfSbWRm+hw3k3I=
+github.com/G-Node/libgin v0.3.1/go.mod h1:2znNE//YIHBGGXeYCHf0q5+IiOalliAbpG3nGZq9hKY=
 github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
 github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
 github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -38,12 +36,10 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSY
 github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
 github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0 h1:epsH3lb7KVbXHYk7LYGN5EiE0MxcevHU85CKITJ0wUY=
 github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
-github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo=
 github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
 github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0 h1:90Ly+6UfUypEF6vvvW5rQIv9opIL8CbmW9FT20LDQoY=
 github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0/go.mod h1:V+Qd57rJe8gd4eiGzZyg4h54VLHmYVVw54iMnlAMrF8=
@@ -92,6 +88,7 @@ github.com/gogs/go-gogs-client v0.0.0-20190710002546-4c3c18947c15 h1:tgEyCCe4+o8
 github.com/gogs/go-gogs-client v0.0.0-20190710002546-4c3c18947c15/go.mod h1:fR6z1Ie6rtF7kl/vBYMfgD5/G5B1blui7z426/sj2DU=
 github.com/gogs/go-libravatar v0.0.0-20191106065024-33a75213d0a0 h1:K02vod+sn3M1OOkdqi2tPxN2+xESK4qyITVQ3JkGEv4=
 github.com/gogs/go-libravatar v0.0.0-20191106065024-33a75213d0a0/go.mod h1:Zas3BtO88pk1cwUfEYlvnl/CRwh0ybDxRWSwRjG8I3w=
+github.com/gogs/minwinsvc v0.0.0-20170301035411-95be6356811a h1:8DZwxETOVWIinYxDK+i6L+rMb7eGATGaakD6ZucfHVk=
 github.com/gogs/minwinsvc v0.0.0-20170301035411-95be6356811a/go.mod h1:TUIZ+29jodWQ8Gk6Pvtg4E09aMsc3C/VLZiVYfUhWQU=
 github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
 github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
@@ -117,6 +114,7 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI
 github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
 github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
 github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
 github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
 github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@@ -172,6 +170,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
 github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
 github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9 h1:ZivaaKmjs9q90zi6I4gTLW6tbVGtlBjellr3hMYaly0=
 github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9/go.mod h1:np1wUFZ6tyoke22qDJZY40URn9Ae51gX7ljIWXN5TJs=
 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
@@ -235,7 +234,6 @@ github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1
 github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
 github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
 github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
-github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
 github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
 github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
@@ -245,7 +243,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
 github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
 github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
 github.com/unknwon/cae v1.0.0 h1:i39lOFaBXZxhGjQOy/RNbi8uzettCs6OQxpR0xXohGU=
@@ -327,6 +324,7 @@ google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMt
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
 google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -334,6 +332,7 @@ google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRn
 google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
 gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=

+ 0 - 49
vendor/gitea.com/xorm/builder/cond_if.go

@@ -1,49 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package builder
-
-type condIf struct {
-	condition bool
-	condTrue  Cond
-	condFalse Cond
-}
-
-var _ Cond = condIf{}
-
-// If returns Cond via condition
-func If(condition bool, condTrue Cond, condFalse ...Cond) Cond {
-	var c = condIf{
-		condition: condition,
-		condTrue:  condTrue,
-	}
-	if len(condFalse) > 0 {
-		c.condFalse = condFalse[0]
-	}
-	return c
-}
-
-func (condIf condIf) WriteTo(w Writer) error {
-	if condIf.condition {
-		return condIf.condTrue.WriteTo(w)
-	} else if condIf.condFalse != nil {
-		return condIf.condFalse.WriteTo(w)
-	}
-	return nil
-}
-
-func (condIf condIf) And(conds ...Cond) Cond {
-	return And(condIf, And(conds...))
-}
-
-func (condIf condIf) Or(conds ...Cond) Cond {
-	return Or(condIf, Or(conds...))
-}
-
-func (condIf condIf) IsValid() bool {
-	if condIf.condition {
-		return condIf.condTrue != nil
-	}
-	return condIf.condFalse != nil
-}

+ 0 - 116
vendor/gitea.com/xorm/core/README.md

@@ -1,116 +0,0 @@
-Core is a lightweight wrapper of sql.DB.
-
-[![Build Status](https://drone.gitea.com/api/badges/xorm/core/status.svg)](https://drone.gitea.com/xorm/core)
-
-# Open
-```Go
-db, _ := core.Open(db, connstr)
-```
-
-# SetMapper
-```Go
-db.SetMapper(SameMapper())
-```
-
-## Scan usage
-
-### Scan
-```Go
-rows, _ := db.Query()
-for rows.Next() {
-    rows.Scan()
-}
-```
-
-### ScanMap
-```Go
-rows, _ := db.Query()
-for rows.Next() {
-    rows.ScanMap()
-```
-
-### ScanSlice
-
-You can use `[]string`, `[][]byte`, `[]interface{}`, `[]*string`, `[]sql.NullString` to ScanSclice. Notice, slice's length should be equal or less than select columns.
-
-```Go
-rows, _ := db.Query()
-cols, _ := rows.Columns()
-for rows.Next() {
-    var s = make([]string, len(cols))
-    rows.ScanSlice(&s)
-}
-```
-
-```Go
-rows, _ := db.Query()
-cols, _ := rows.Columns()
-for rows.Next() {
-    var s = make([]*string, len(cols))
-    rows.ScanSlice(&s)
-}
-```
-
-### ScanStruct
-```Go
-rows, _ := db.Query()
-for rows.Next() {
-    rows.ScanStructByName()
-    rows.ScanStructByIndex()
-}
-```
-
-## Query usage
-```Go
-rows, err := db.Query("select * from table where name = ?", name)
-
-user = User{
-    Name:"lunny",
-}
-rows, err := db.QueryStruct("select * from table where name = ?Name",
-            &user)
-
-var user = map[string]interface{}{
-    "name": "lunny",
-}
-rows, err = db.QueryMap("select * from table where name = ?name",
-            &user)
-```
-
-## QueryRow usage
-```Go
-row := db.QueryRow("select * from table where name = ?", name)
-
-user = User{
-    Name:"lunny",
-}
-row := db.QueryRowStruct("select * from table where name = ?Name",
-            &user)
-
-var user = map[string]interface{}{
-    "name": "lunny",
-}
-row = db.QueryRowMap("select * from table where name = ?name",
-            &user)
-```
-
-## Exec usage
-```Go
-db.Exec("insert into user (`name`, title, age, alias, nick_name,created) values (?,?,?,?,?,?)", name, title, age, alias...)
-
-user = User{
-    Name:"lunny",
-    Title:"test",
-    Age: 18,
-}
-result, err = db.ExecStruct("insert into user (`name`, title, age, alias, nick_name,created) values (?Name,?Title,?Age,?Alias,?NickName,?Created)",
-            &user)
-
-var user = map[string]interface{}{
-    "Name": "lunny",
-    "Title": "test",
-    "Age": 18,
-}
-result, err = db.ExecMap("insert into user (`name`, title, age, alias, nick_name,created) values (?Name,?Title,?Age,?Alias,?NickName,?Created)",
-            &user)
-```

+ 0 - 1
vendor/gitea.com/xorm/core/benchmark.sh

@@ -1 +0,0 @@
-go test -v -bench=. -run=XXX

+ 0 - 95
vendor/gitea.com/xorm/core/cache.go

@@ -1,95 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"bytes"
-	"encoding/gob"
-	"errors"
-	"fmt"
-	"strings"
-	"time"
-)
-
-const (
-	// CacheExpired is default cache expired time
-	CacheExpired = 60 * time.Minute
-	// CacheMaxMemory is not use now
-	CacheMaxMemory = 256
-	// CacheGcInterval represents interval time to clear all expired nodes
-	CacheGcInterval = 10 * time.Minute
-	// CacheGcMaxRemoved represents max nodes removed when gc
-	CacheGcMaxRemoved = 20
-)
-
-// list all the errors
-var (
-	ErrCacheMiss = errors.New("xorm/cache: key not found")
-	ErrNotStored = errors.New("xorm/cache: not stored")
-)
-
-// CacheStore is a interface to store cache
-type CacheStore interface {
-	// key is primary key or composite primary key
-	// value is struct's pointer
-	// key format : <tablename>-p-<pk1>-<pk2>...
-	Put(key string, value interface{}) error
-	Get(key string) (interface{}, error)
-	Del(key string) error
-}
-
-// Cacher is an interface to provide cache
-// id format : u-<pk1>-<pk2>...
-type Cacher interface {
-	GetIds(tableName, sql string) interface{}
-	GetBean(tableName string, id string) interface{}
-	PutIds(tableName, sql string, ids interface{})
-	PutBean(tableName string, id string, obj interface{})
-	DelIds(tableName, sql string)
-	DelBean(tableName string, id string)
-	ClearIds(tableName string)
-	ClearBeans(tableName string)
-}
-
-func encodeIds(ids []PK) (string, error) {
-	buf := new(bytes.Buffer)
-	enc := gob.NewEncoder(buf)
-	err := enc.Encode(ids)
-
-	return buf.String(), err
-}
-
-func decodeIds(s string) ([]PK, error) {
-	pks := make([]PK, 0)
-
-	dec := gob.NewDecoder(strings.NewReader(s))
-	err := dec.Decode(&pks)
-
-	return pks, err
-}
-
-// GetCacheSql returns cacher PKs via SQL
-func GetCacheSql(m Cacher, tableName, sql string, args interface{}) ([]PK, error) {
-	bytes := m.GetIds(tableName, GenSqlKey(sql, args))
-	if bytes == nil {
-		return nil, errors.New("Not Exist")
-	}
-	return decodeIds(bytes.(string))
-}
-
-// PutCacheSql puts cacher SQL and PKs
-func PutCacheSql(m Cacher, ids []PK, tableName, sql string, args interface{}) error {
-	bytes, err := encodeIds(ids)
-	if err != nil {
-		return err
-	}
-	m.PutIds(tableName, GenSqlKey(sql, args), bytes)
-	return nil
-}
-
-// GenSqlKey generates cache key
-func GenSqlKey(sql string, args interface{}) string {
-	return fmt.Sprintf("%v-%v", sql, args)
-}

+ 0 - 166
vendor/gitea.com/xorm/core/column.go

@@ -1,166 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"fmt"
-	"reflect"
-	"strings"
-	"time"
-)
-
-const (
-	TWOSIDES = iota + 1
-	ONLYTODB
-	ONLYFROMDB
-)
-
-// Column defines database column
-type Column struct {
-	Name            string
-	TableName       string
-	FieldName       string
-	SQLType         SQLType
-	IsJSON          bool
-	Length          int
-	Length2         int
-	Nullable        bool
-	Default         string
-	Indexes         map[string]int
-	IsPrimaryKey    bool
-	IsAutoIncrement bool
-	MapType         int
-	IsCreated       bool
-	IsUpdated       bool
-	IsDeleted       bool
-	IsCascade       bool
-	IsVersion       bool
-	DefaultIsEmpty  bool
-	EnumOptions     map[string]int
-	SetOptions      map[string]int
-	DisableTimeZone bool
-	TimeZone        *time.Location // column specified time zone
-	Comment         string
-}
-
-// NewColumn creates a new column
-func NewColumn(name, fieldName string, sqlType SQLType, len1, len2 int, nullable bool) *Column {
-	return &Column{
-		Name:            name,
-		TableName:       "",
-		FieldName:       fieldName,
-		SQLType:         sqlType,
-		Length:          len1,
-		Length2:         len2,
-		Nullable:        nullable,
-		Default:         "",
-		Indexes:         make(map[string]int),
-		IsPrimaryKey:    false,
-		IsAutoIncrement: false,
-		MapType:         TWOSIDES,
-		IsCreated:       false,
-		IsUpdated:       false,
-		IsDeleted:       false,
-		IsCascade:       false,
-		IsVersion:       false,
-		DefaultIsEmpty:  false,
-		EnumOptions:     make(map[string]int),
-		Comment:         "",
-	}
-}
-
-// String generate column description string according dialect
-func (col *Column) String(d Dialect) string {
-	sql := d.QuoteStr() + col.Name + d.QuoteStr() + " "
-
-	sql += d.SqlType(col) + " "
-
-	if col.IsPrimaryKey {
-		sql += "PRIMARY KEY "
-		if col.IsAutoIncrement {
-			sql += d.AutoIncrStr() + " "
-		}
-	}
-
-	if col.Default != "" {
-		sql += "DEFAULT " + col.Default + " "
-	}
-
-	if d.ShowCreateNull() {
-		if col.Nullable {
-			sql += "NULL "
-		} else {
-			sql += "NOT NULL "
-		}
-	}
-
-	return sql
-}
-
-// StringNoPk generate column description string according dialect without primary keys
-func (col *Column) StringNoPk(d Dialect) string {
-	sql := d.QuoteStr() + col.Name + d.QuoteStr() + " "
-
-	sql += d.SqlType(col) + " "
-
-	if col.Default != "" {
-		sql += "DEFAULT " + col.Default + " "
-	}
-
-	if d.ShowCreateNull() {
-		if col.Nullable {
-			sql += "NULL "
-		} else {
-			sql += "NOT NULL "
-		}
-	}
-
-	return sql
-}
-
-// ValueOf returns column's filed of struct's value
-func (col *Column) ValueOf(bean interface{}) (*reflect.Value, error) {
-	dataStruct := reflect.Indirect(reflect.ValueOf(bean))
-	return col.ValueOfV(&dataStruct)
-}
-
-// ValueOfV returns column's filed of struct's value accept reflevt value
-func (col *Column) ValueOfV(dataStruct *reflect.Value) (*reflect.Value, error) {
-	var fieldValue reflect.Value
-	fieldPath := strings.Split(col.FieldName, ".")
-
-	if dataStruct.Type().Kind() == reflect.Map {
-		keyValue := reflect.ValueOf(fieldPath[len(fieldPath)-1])
-		fieldValue = dataStruct.MapIndex(keyValue)
-		return &fieldValue, nil
-	} else if dataStruct.Type().Kind() == reflect.Interface {
-		structValue := reflect.ValueOf(dataStruct.Interface())
-		dataStruct = &structValue
-	}
-
-	level := len(fieldPath)
-	fieldValue = dataStruct.FieldByName(fieldPath[0])
-	for i := 0; i < level-1; i++ {
-		if !fieldValue.IsValid() {
-			break
-		}
-		if fieldValue.Kind() == reflect.Struct {
-			fieldValue = fieldValue.FieldByName(fieldPath[i+1])
-		} else if fieldValue.Kind() == reflect.Ptr {
-			if fieldValue.IsNil() {
-				fieldValue.Set(reflect.New(fieldValue.Type().Elem()))
-			}
-			fieldValue = fieldValue.Elem().FieldByName(fieldPath[i+1])
-		} else {
-			return nil, fmt.Errorf("field %v is not valid", col.FieldName)
-		}
-	}
-
-	if !fieldValue.IsValid() {
-		return nil, fmt.Errorf("field %v is not valid", col.FieldName)
-	}
-
-	return &fieldValue, nil
-}

+ 0 - 12
vendor/gitea.com/xorm/core/converstion.go

@@ -1,12 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-// Conversion is an interface. A type implements Conversion will according
-// the custom method to fill into database and retrieve from database.
-type Conversion interface {
-	FromDB([]byte) error
-	ToDB() ([]byte, error)
-}

+ 0 - 225
vendor/gitea.com/xorm/core/db.go

@@ -1,225 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"context"
-	"database/sql"
-	"database/sql/driver"
-	"fmt"
-	"reflect"
-	"regexp"
-	"sync"
-)
-
-var (
-	DefaultCacheSize = 200
-)
-
-func MapToSlice(query string, mp interface{}) (string, []interface{}, error) {
-	vv := reflect.ValueOf(mp)
-	if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map {
-		return "", []interface{}{}, ErrNoMapPointer
-	}
-
-	args := make([]interface{}, 0, len(vv.Elem().MapKeys()))
-	var err error
-	query = re.ReplaceAllStringFunc(query, func(src string) string {
-		v := vv.Elem().MapIndex(reflect.ValueOf(src[1:]))
-		if !v.IsValid() {
-			err = fmt.Errorf("map key %s is missing", src[1:])
-		} else {
-			args = append(args, v.Interface())
-		}
-		return "?"
-	})
-
-	return query, args, err
-}
-
-func StructToSlice(query string, st interface{}) (string, []interface{}, error) {
-	vv := reflect.ValueOf(st)
-	if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct {
-		return "", []interface{}{}, ErrNoStructPointer
-	}
-
-	args := make([]interface{}, 0)
-	var err error
-	query = re.ReplaceAllStringFunc(query, func(src string) string {
-		fv := vv.Elem().FieldByName(src[1:]).Interface()
-		if v, ok := fv.(driver.Valuer); ok {
-			var value driver.Value
-			value, err = v.Value()
-			if err != nil {
-				return "?"
-			}
-			args = append(args, value)
-		} else {
-			args = append(args, fv)
-		}
-		return "?"
-	})
-	if err != nil {
-		return "", []interface{}{}, err
-	}
-	return query, args, nil
-}
-
-type cacheStruct struct {
-	value reflect.Value
-	idx   int
-}
-
-// DB is a wrap of sql.DB with extra contents
-type DB struct {
-	*sql.DB
-	Mapper            IMapper
-	reflectCache      map[reflect.Type]*cacheStruct
-	reflectCacheMutex sync.RWMutex
-}
-
-// Open opens a database
-func Open(driverName, dataSourceName string) (*DB, error) {
-	db, err := sql.Open(driverName, dataSourceName)
-	if err != nil {
-		return nil, err
-	}
-	return &DB{
-		DB:           db,
-		Mapper:       NewCacheMapper(&SnakeMapper{}),
-		reflectCache: make(map[reflect.Type]*cacheStruct),
-	}, nil
-}
-
-// FromDB creates a DB from a sql.DB
-func FromDB(db *sql.DB) *DB {
-	return &DB{
-		DB:           db,
-		Mapper:       NewCacheMapper(&SnakeMapper{}),
-		reflectCache: make(map[reflect.Type]*cacheStruct),
-	}
-}
-
-func (db *DB) reflectNew(typ reflect.Type) reflect.Value {
-	db.reflectCacheMutex.Lock()
-	defer db.reflectCacheMutex.Unlock()
-	cs, ok := db.reflectCache[typ]
-	if !ok || cs.idx+1 > DefaultCacheSize-1 {
-		cs = &cacheStruct{reflect.MakeSlice(reflect.SliceOf(typ), DefaultCacheSize, DefaultCacheSize), 0}
-		db.reflectCache[typ] = cs
-	} else {
-		cs.idx = cs.idx + 1
-	}
-	return cs.value.Index(cs.idx).Addr()
-}
-
-// QueryContext overwrites sql.DB.QueryContext
-func (db *DB) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
-	rows, err := db.DB.QueryContext(ctx, query, args...)
-	if err != nil {
-		if rows != nil {
-			rows.Close()
-		}
-		return nil, err
-	}
-	return &Rows{rows, db}, nil
-}
-
-// Query overwrites sql.DB.Query
-func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
-	return db.QueryContext(context.Background(), query, args...)
-}
-
-// QueryMapContext executes query with parameters via map and context
-func (db *DB) QueryMapContext(ctx context.Context, query string, mp interface{}) (*Rows, error) {
-	query, args, err := MapToSlice(query, mp)
-	if err != nil {
-		return nil, err
-	}
-	return db.QueryContext(ctx, query, args...)
-}
-
-// QueryMap executes query with parameters via map
-func (db *DB) QueryMap(query string, mp interface{}) (*Rows, error) {
-	return db.QueryMapContext(context.Background(), query, mp)
-}
-
-func (db *DB) QueryStructContext(ctx context.Context, query string, st interface{}) (*Rows, error) {
-	query, args, err := StructToSlice(query, st)
-	if err != nil {
-		return nil, err
-	}
-	return db.QueryContext(ctx, query, args...)
-}
-
-func (db *DB) QueryStruct(query string, st interface{}) (*Rows, error) {
-	return db.QueryStructContext(context.Background(), query, st)
-}
-
-func (db *DB) QueryRowContext(ctx context.Context, query string, args ...interface{}) *Row {
-	rows, err := db.QueryContext(ctx, query, args...)
-	if err != nil {
-		return &Row{nil, err}
-	}
-	return &Row{rows, nil}
-}
-
-func (db *DB) QueryRow(query string, args ...interface{}) *Row {
-	return db.QueryRowContext(context.Background(), query, args...)
-}
-
-func (db *DB) QueryRowMapContext(ctx context.Context, query string, mp interface{}) *Row {
-	query, args, err := MapToSlice(query, mp)
-	if err != nil {
-		return &Row{nil, err}
-	}
-	return db.QueryRowContext(ctx, query, args...)
-}
-
-func (db *DB) QueryRowMap(query string, mp interface{}) *Row {
-	return db.QueryRowMapContext(context.Background(), query, mp)
-}
-
-func (db *DB) QueryRowStructContext(ctx context.Context, query string, st interface{}) *Row {
-	query, args, err := StructToSlice(query, st)
-	if err != nil {
-		return &Row{nil, err}
-	}
-	return db.QueryRowContext(ctx, query, args...)
-}
-
-func (db *DB) QueryRowStruct(query string, st interface{}) *Row {
-	return db.QueryRowStructContext(context.Background(), query, st)
-}
-
-var (
-	re = regexp.MustCompile(`[?](\w+)`)
-)
-
-// insert into (name) values (?)
-// insert into (name) values (?name)
-func (db *DB) ExecMapContext(ctx context.Context, query string, mp interface{}) (sql.Result, error) {
-	query, args, err := MapToSlice(query, mp)
-	if err != nil {
-		return nil, err
-	}
-	return db.DB.ExecContext(ctx, query, args...)
-}
-
-func (db *DB) ExecMap(query string, mp interface{}) (sql.Result, error) {
-	return db.ExecMapContext(context.Background(), query, mp)
-}
-
-func (db *DB) ExecStructContext(ctx context.Context, query string, st interface{}) (sql.Result, error) {
-	query, args, err := StructToSlice(query, st)
-	if err != nil {
-		return nil, err
-	}
-	return db.DB.ExecContext(ctx, query, args...)
-}
-
-func (db *DB) ExecStruct(query string, st interface{}) (sql.Result, error) {
-	return db.ExecStructContext(context.Background(), query, st)
-}

+ 0 - 320
vendor/gitea.com/xorm/core/dialect.go

@@ -1,320 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"fmt"
-	"strings"
-	"time"
-)
-
-type DbType string
-
-type Uri struct {
-	DbType  DbType
-	Proto   string
-	Host    string
-	Port    string
-	DbName  string
-	User    string
-	Passwd  string
-	Charset string
-	Laddr   string
-	Raddr   string
-	Timeout time.Duration
-	Schema  string
-}
-
-// a dialect is a driver's wrapper
-type Dialect interface {
-	SetLogger(logger ILogger)
-	Init(*DB, *Uri, string, string) error
-	URI() *Uri
-	DB() *DB
-	DBType() DbType
-	SqlType(*Column) string
-	FormatBytes(b []byte) string
-
-	DriverName() string
-	DataSourceName() string
-
-	QuoteStr() string
-	IsReserved(string) bool
-	Quote(string) string
-	AndStr() string
-	OrStr() string
-	EqStr() string
-	RollBackStr() string
-	AutoIncrStr() string
-
-	SupportInsertMany() bool
-	SupportEngine() bool
-	SupportCharset() bool
-	SupportDropIfExists() bool
-	IndexOnTable() bool
-	ShowCreateNull() bool
-
-	IndexCheckSql(tableName, idxName string) (string, []interface{})
-	TableCheckSql(tableName string) (string, []interface{})
-
-	IsColumnExist(tableName string, colName string) (bool, error)
-
-	CreateTableSql(table *Table, tableName, storeEngine, charset string) string
-	DropTableSql(tableName string) string
-	CreateIndexSql(tableName string, index *Index) string
-	DropIndexSql(tableName string, index *Index) string
-
-	ModifyColumnSql(tableName string, col *Column) string
-
-	ForUpdateSql(query string) string
-
-	//CreateTableIfNotExists(table *Table, tableName, storeEngine, charset string) error
-	//MustDropTable(tableName string) error
-
-	GetColumns(tableName string) ([]string, map[string]*Column, error)
-	GetTables() ([]*Table, error)
-	GetIndexes(tableName string) (map[string]*Index, error)
-
-	Filters() []Filter
-	SetParams(params map[string]string)
-}
-
-func OpenDialect(dialect Dialect) (*DB, error) {
-	return Open(dialect.DriverName(), dialect.DataSourceName())
-}
-
-// Base represents a basic dialect and all real dialects could embed this struct
-type Base struct {
-	db             *DB
-	dialect        Dialect
-	driverName     string
-	dataSourceName string
-	logger         ILogger
-	*Uri
-}
-
-func (b *Base) DB() *DB {
-	return b.db
-}
-
-func (b *Base) SetLogger(logger ILogger) {
-	b.logger = logger
-}
-
-func (b *Base) Init(db *DB, dialect Dialect, uri *Uri, drivername, dataSourceName string) error {
-	b.db, b.dialect, b.Uri = db, dialect, uri
-	b.driverName, b.dataSourceName = drivername, dataSourceName
-	return nil
-}
-
-func (b *Base) URI() *Uri {
-	return b.Uri
-}
-
-func (b *Base) DBType() DbType {
-	return b.Uri.DbType
-}
-
-func (b *Base) FormatBytes(bs []byte) string {
-	return fmt.Sprintf("0x%x", bs)
-}
-
-func (b *Base) DriverName() string {
-	return b.driverName
-}
-
-func (b *Base) ShowCreateNull() bool {
-	return true
-}
-
-func (b *Base) DataSourceName() string {
-	return b.dataSourceName
-}
-
-func (b *Base) AndStr() string {
-	return "AND"
-}
-
-func (b *Base) OrStr() string {
-	return "OR"
-}
-
-func (b *Base) EqStr() string {
-	return "="
-}
-
-func (db *Base) RollBackStr() string {
-	return "ROLL BACK"
-}
-
-func (db *Base) SupportDropIfExists() bool {
-	return true
-}
-
-func (db *Base) DropTableSql(tableName string) string {
-	quote := db.dialect.Quote
-	return fmt.Sprintf("DROP TABLE IF EXISTS %s", quote(tableName))
-}
-
-func (db *Base) HasRecords(query string, args ...interface{}) (bool, error) {
-	db.LogSQL(query, args)
-	rows, err := db.DB().Query(query, args...)
-	if err != nil {
-		return false, err
-	}
-	defer rows.Close()
-
-	if rows.Next() {
-		return true, nil
-	}
-	return false, nil
-}
-
-func (db *Base) IsColumnExist(tableName, colName string) (bool, error) {
-	query := "SELECT `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `COLUMN_NAME` = ?"
-	query = strings.Replace(query, "`", db.dialect.QuoteStr(), -1)
-	return db.HasRecords(query, db.DbName, tableName, colName)
-}
-
-/*
-func (db *Base) CreateTableIfNotExists(table *Table, tableName, storeEngine, charset string) error {
-	sql, args := db.dialect.TableCheckSql(tableName)
-	rows, err := db.DB().Query(sql, args...)
-	if db.Logger != nil {
-		db.Logger.Info("[sql]", sql, args)
-	}
-	if err != nil {
-		return err
-	}
-	defer rows.Close()
-
-	if rows.Next() {
-		return nil
-	}
-
-	sql = db.dialect.CreateTableSql(table, tableName, storeEngine, charset)
-	_, err = db.DB().Exec(sql)
-	if db.Logger != nil {
-		db.Logger.Info("[sql]", sql)
-	}
-	return err
-}*/
-
-func (db *Base) CreateIndexSql(tableName string, index *Index) string {
-	quote := db.dialect.Quote
-	var unique string
-	var idxName string
-	if index.Type == UniqueType {
-		unique = " UNIQUE"
-	}
-	idxName = index.XName(tableName)
-	return fmt.Sprintf("CREATE%s INDEX %v ON %v (%v)", unique,
-		quote(idxName), quote(tableName),
-		quote(strings.Join(index.Cols, quote(","))))
-}
-
-func (db *Base) DropIndexSql(tableName string, index *Index) string {
-	quote := db.dialect.Quote
-	var name string
-	if index.IsRegular {
-		name = index.XName(tableName)
-	} else {
-		name = index.Name
-	}
-	return fmt.Sprintf("DROP INDEX %v ON %s", quote(name), quote(tableName))
-}
-
-func (db *Base) ModifyColumnSql(tableName string, col *Column) string {
-	return fmt.Sprintf("alter table %s MODIFY COLUMN %s", tableName, col.StringNoPk(db.dialect))
-}
-
-func (b *Base) CreateTableSql(table *Table, tableName, storeEngine, charset string) string {
-	var sql string
-	sql = "CREATE TABLE IF NOT EXISTS "
-	if tableName == "" {
-		tableName = table.Name
-	}
-
-	sql += b.dialect.Quote(tableName)
-	sql += " ("
-
-	if len(table.ColumnsSeq()) > 0 {
-		pkList := table.PrimaryKeys
-
-		for _, colName := range table.ColumnsSeq() {
-			col := table.GetColumn(colName)
-			if col.IsPrimaryKey && len(pkList) == 1 {
-				sql += col.String(b.dialect)
-			} else {
-				sql += col.StringNoPk(b.dialect)
-			}
-			sql = strings.TrimSpace(sql)
-			if b.DriverName() == MYSQL && len(col.Comment) > 0 {
-				sql += " COMMENT '" + col.Comment + "'"
-			}
-			sql += ", "
-		}
-
-		if len(pkList) > 1 {
-			sql += "PRIMARY KEY ( "
-			sql += b.dialect.Quote(strings.Join(pkList, b.dialect.Quote(",")))
-			sql += " ), "
-		}
-
-		sql = sql[:len(sql)-2]
-	}
-	sql += ")"
-
-	if b.dialect.SupportEngine() && storeEngine != "" {
-		sql += " ENGINE=" + storeEngine
-	}
-	if b.dialect.SupportCharset() {
-		if len(charset) == 0 {
-			charset = b.dialect.URI().Charset
-		}
-		if len(charset) > 0 {
-			sql += " DEFAULT CHARSET " + charset
-		}
-	}
-
-	return sql
-}
-
-func (b *Base) ForUpdateSql(query string) string {
-	return query + " FOR UPDATE"
-}
-
-func (b *Base) LogSQL(sql string, args []interface{}) {
-	if b.logger != nil && b.logger.IsShowSQL() {
-		if len(args) > 0 {
-			b.logger.Infof("[SQL] %v %v", sql, args)
-		} else {
-			b.logger.Infof("[SQL] %v", sql)
-		}
-	}
-}
-
-func (b *Base) SetParams(params map[string]string) {
-}
-
-var (
-	dialects = map[string]func() Dialect{}
-)
-
-// RegisterDialect register database dialect
-func RegisterDialect(dbName DbType, dialectFunc func() Dialect) {
-	if dialectFunc == nil {
-		panic("core: Register dialect is nil")
-	}
-	dialects[strings.ToLower(string(dbName))] = dialectFunc // !nashtsai! allow override dialect
-}
-
-// QueryDialect query if registed database dialect
-func QueryDialect(dbName DbType) Dialect {
-	if d, ok := dialects[strings.ToLower(string(dbName))]; ok {
-		return d()
-	}
-	return nil
-}

+ 0 - 31
vendor/gitea.com/xorm/core/driver.go

@@ -1,31 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-type Driver interface {
-	Parse(string, string) (*Uri, error)
-}
-
-var (
-	drivers = map[string]Driver{}
-)
-
-func RegisterDriver(driverName string, driver Driver) {
-	if driver == nil {
-		panic("core: Register driver is nil")
-	}
-	if _, dup := drivers[driverName]; dup {
-		panic("core: Register called twice for driver " + driverName)
-	}
-	drivers[driverName] = driver
-}
-
-func QueryDriver(driverName string) Driver {
-	return drivers[driverName]
-}
-
-func RegisteredDriverSize() int {
-	return len(drivers)
-}

+ 0 - 12
vendor/gitea.com/xorm/core/error.go

@@ -1,12 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import "errors"
-
-var (
-	ErrNoMapPointer    = errors.New("mp should be a map's pointer")
-	ErrNoStructPointer = errors.New("mp should be a struct's pointer")
-)

+ 0 - 68
vendor/gitea.com/xorm/core/filter.go

@@ -1,68 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"fmt"
-	"strings"
-)
-
-// Filter is an interface to filter SQL
-type Filter interface {
-	Do(sql string, dialect Dialect, table *Table) string
-}
-
-// QuoteFilter filter SQL replace ` to database's own quote character
-type QuoteFilter struct {
-}
-
-func (s *QuoteFilter) Do(sql string, dialect Dialect, table *Table) string {
-	return strings.Replace(sql, "`", dialect.QuoteStr(), -1)
-}
-
-// IdFilter filter SQL replace (id) to primary key column name
-type IdFilter struct {
-}
-
-type Quoter struct {
-	dialect Dialect
-}
-
-func NewQuoter(dialect Dialect) *Quoter {
-	return &Quoter{dialect}
-}
-
-func (q *Quoter) Quote(content string) string {
-	return q.dialect.QuoteStr() + content + q.dialect.QuoteStr()
-}
-
-func (i *IdFilter) Do(sql string, dialect Dialect, table *Table) string {
-	quoter := NewQuoter(dialect)
-	if table != nil && len(table.PrimaryKeys) == 1 {
-		sql = strings.Replace(sql, " `(id)` ", " "+quoter.Quote(table.PrimaryKeys[0])+" ", -1)
-		sql = strings.Replace(sql, " "+quoter.Quote("(id)")+" ", " "+quoter.Quote(table.PrimaryKeys[0])+" ", -1)
-		return strings.Replace(sql, " (id) ", " "+quoter.Quote(table.PrimaryKeys[0])+" ", -1)
-	}
-	return sql
-}
-
-// SeqFilter filter SQL replace ?, ? ... to $1, $2 ...
-type SeqFilter struct {
-	Prefix string
-	Start  int
-}
-
-func (s *SeqFilter) Do(sql string, dialect Dialect, table *Table) string {
-	segs := strings.Split(sql, "?")
-	size := len(segs)
-	res := ""
-	for i, c := range segs {
-		if i < size-1 {
-			res += c + fmt.Sprintf("%s%v", s.Prefix, i+s.Start)
-		}
-	}
-	res += segs[size-1]
-	return res
-}

+ 0 - 13
vendor/gitea.com/xorm/core/go.mod

@@ -1,13 +0,0 @@
-module xorm.io/core
-
-require (
-	github.com/go-sql-driver/mysql v1.4.1
-	github.com/golang/protobuf v1.3.1 // indirect
-	github.com/mattn/go-sqlite3 v1.10.0
-	golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 // indirect
-	golang.org/x/net v0.0.0-20190603091049-60506f45cf65 // indirect
-	golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed // indirect
-	golang.org/x/text v0.3.2 // indirect
-	golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468 // indirect
-	google.golang.org/appengine v1.6.0 // indirect
-)

+ 0 - 23
vendor/gitea.com/xorm/core/go.sum

@@ -1,23 +0,0 @@
-github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
-github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
-github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=

+ 0 - 35
vendor/gitea.com/xorm/core/ilogger.go

@@ -1,35 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-type LogLevel int
-
-const (
-	// !nashtsai! following level also match syslog.Priority value
-	LOG_DEBUG LogLevel = iota
-	LOG_INFO
-	LOG_WARNING
-	LOG_ERR
-	LOG_OFF
-	LOG_UNKNOWN
-)
-
-// logger interface
-type ILogger interface {
-	Debug(v ...interface{})
-	Debugf(format string, v ...interface{})
-	Error(v ...interface{})
-	Errorf(format string, v ...interface{})
-	Info(v ...interface{})
-	Infof(format string, v ...interface{})
-	Warn(v ...interface{})
-	Warnf(format string, v ...interface{})
-
-	Level() LogLevel
-	SetLevel(l LogLevel)
-
-	ShowSQL(show ...bool)
-	IsShowSQL() bool
-}

+ 0 - 71
vendor/gitea.com/xorm/core/index.go

@@ -1,71 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"fmt"
-	"strings"
-)
-
-const (
-	IndexType = iota + 1
-	UniqueType
-)
-
-// database index
-type Index struct {
-	IsRegular bool
-	Name      string
-	Type      int
-	Cols      []string
-}
-
-func (index *Index) XName(tableName string) string {
-	if !strings.HasPrefix(index.Name, "UQE_") &&
-		!strings.HasPrefix(index.Name, "IDX_") {
-		tableName = strings.Replace(tableName, `"`, "", -1)
-		tableName = strings.Replace(tableName, `.`, "_", -1)
-		if index.Type == UniqueType {
-			return fmt.Sprintf("UQE_%v_%v", tableName, index.Name)
-		}
-		return fmt.Sprintf("IDX_%v_%v", tableName, index.Name)
-	}
-	return index.Name
-}
-
-// add columns which will be composite index
-func (index *Index) AddColumn(cols ...string) {
-	for _, col := range cols {
-		index.Cols = append(index.Cols, col)
-	}
-}
-
-func (index *Index) Equal(dst *Index) bool {
-	if index.Type != dst.Type {
-		return false
-	}
-	if len(index.Cols) != len(dst.Cols) {
-		return false
-	}
-
-	for i := 0; i < len(index.Cols); i++ {
-		var found bool
-		for j := 0; j < len(dst.Cols); j++ {
-			if index.Cols[i] == dst.Cols[j] {
-				found = true
-				break
-			}
-		}
-		if !found {
-			return false
-		}
-	}
-	return true
-}
-
-// new an index
-func NewIndex(name string, indexType int) *Index {
-	return &Index{true, name, indexType, make([]string, 0)}
-}

+ 0 - 258
vendor/gitea.com/xorm/core/mapper.go

@@ -1,258 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"strings"
-	"sync"
-)
-
-// name translation between struct, fields names and table, column names
-type IMapper interface {
-	Obj2Table(string) string
-	Table2Obj(string) string
-}
-
-type CacheMapper struct {
-	oriMapper      IMapper
-	obj2tableCache map[string]string
-	obj2tableMutex sync.RWMutex
-	table2objCache map[string]string
-	table2objMutex sync.RWMutex
-}
-
-func NewCacheMapper(mapper IMapper) *CacheMapper {
-	return &CacheMapper{oriMapper: mapper, obj2tableCache: make(map[string]string),
-		table2objCache: make(map[string]string),
-	}
-}
-
-func (m *CacheMapper) Obj2Table(o string) string {
-	m.obj2tableMutex.RLock()
-	t, ok := m.obj2tableCache[o]
-	m.obj2tableMutex.RUnlock()
-	if ok {
-		return t
-	}
-
-	t = m.oriMapper.Obj2Table(o)
-	m.obj2tableMutex.Lock()
-	m.obj2tableCache[o] = t
-	m.obj2tableMutex.Unlock()
-	return t
-}
-
-func (m *CacheMapper) Table2Obj(t string) string {
-	m.table2objMutex.RLock()
-	o, ok := m.table2objCache[t]
-	m.table2objMutex.RUnlock()
-	if ok {
-		return o
-	}
-
-	o = m.oriMapper.Table2Obj(t)
-	m.table2objMutex.Lock()
-	m.table2objCache[t] = o
-	m.table2objMutex.Unlock()
-	return o
-}
-
-// SameMapper implements IMapper and provides same name between struct and
-// database table
-type SameMapper struct {
-}
-
-func (m SameMapper) Obj2Table(o string) string {
-	return o
-}
-
-func (m SameMapper) Table2Obj(t string) string {
-	return t
-}
-
-// SnakeMapper implements IMapper and provides name transaltion between
-// struct and database table
-type SnakeMapper struct {
-}
-
-func snakeCasedName(name string) string {
-	newstr := make([]rune, 0)
-	for idx, chr := range name {
-		if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
-			if idx > 0 {
-				newstr = append(newstr, '_')
-			}
-			chr -= ('A' - 'a')
-		}
-		newstr = append(newstr, chr)
-	}
-
-	return string(newstr)
-}
-
-func (mapper SnakeMapper) Obj2Table(name string) string {
-	return snakeCasedName(name)
-}
-
-func titleCasedName(name string) string {
-	newstr := make([]rune, 0)
-	upNextChar := true
-
-	name = strings.ToLower(name)
-
-	for _, chr := range name {
-		switch {
-		case upNextChar:
-			upNextChar = false
-			if 'a' <= chr && chr <= 'z' {
-				chr -= ('a' - 'A')
-			}
-		case chr == '_':
-			upNextChar = true
-			continue
-		}
-
-		newstr = append(newstr, chr)
-	}
-
-	return string(newstr)
-}
-
-func (mapper SnakeMapper) Table2Obj(name string) string {
-	return titleCasedName(name)
-}
-
-// GonicMapper implements IMapper. It will consider initialisms when mapping names.
-// E.g. id -> ID, user -> User and to table names: UserID -> user_id, MyUID -> my_uid
-type GonicMapper map[string]bool
-
-func isASCIIUpper(r rune) bool {
-	return 'A' <= r && r <= 'Z'
-}
-
-func toASCIIUpper(r rune) rune {
-	if 'a' <= r && r <= 'z' {
-		r -= ('a' - 'A')
-	}
-	return r
-}
-
-func gonicCasedName(name string) string {
-	newstr := make([]rune, 0, len(name)+3)
-	for idx, chr := range name {
-		if isASCIIUpper(chr) && idx > 0 {
-			if !isASCIIUpper(newstr[len(newstr)-1]) {
-				newstr = append(newstr, '_')
-			}
-		}
-
-		if !isASCIIUpper(chr) && idx > 1 {
-			l := len(newstr)
-			if isASCIIUpper(newstr[l-1]) && isASCIIUpper(newstr[l-2]) {
-				newstr = append(newstr, newstr[l-1])
-				newstr[l-1] = '_'
-			}
-		}
-
-		newstr = append(newstr, chr)
-	}
-	return strings.ToLower(string(newstr))
-}
-
-func (mapper GonicMapper) Obj2Table(name string) string {
-	return gonicCasedName(name)
-}
-
-func (mapper GonicMapper) Table2Obj(name string) string {
-	newstr := make([]rune, 0)
-
-	name = strings.ToLower(name)
-	parts := strings.Split(name, "_")
-
-	for _, p := range parts {
-		_, isInitialism := mapper[strings.ToUpper(p)]
-		for i, r := range p {
-			if i == 0 || isInitialism {
-				r = toASCIIUpper(r)
-			}
-			newstr = append(newstr, r)
-		}
-	}
-
-	return string(newstr)
-}
-
-// A GonicMapper that contains a list of common initialisms taken from golang/lint
-var LintGonicMapper = GonicMapper{
-	"API":   true,
-	"ASCII": true,
-	"CPU":   true,
-	"CSS":   true,
-	"DNS":   true,
-	"EOF":   true,
-	"GUID":  true,
-	"HTML":  true,
-	"HTTP":  true,
-	"HTTPS": true,
-	"ID":    true,
-	"IP":    true,
-	"JSON":  true,
-	"LHS":   true,
-	"QPS":   true,
-	"RAM":   true,
-	"RHS":   true,
-	"RPC":   true,
-	"SLA":   true,
-	"SMTP":  true,
-	"SSH":   true,
-	"TLS":   true,
-	"TTL":   true,
-	"UI":    true,
-	"UID":   true,
-	"UUID":  true,
-	"URI":   true,
-	"URL":   true,
-	"UTF8":  true,
-	"VM":    true,
-	"XML":   true,
-	"XSRF":  true,
-	"XSS":   true,
-}
-
-// provide prefix table name support
-type PrefixMapper struct {
-	Mapper IMapper
-	Prefix string
-}
-
-func (mapper PrefixMapper) Obj2Table(name string) string {
-	return mapper.Prefix + mapper.Mapper.Obj2Table(name)
-}
-
-func (mapper PrefixMapper) Table2Obj(name string) string {
-	return mapper.Mapper.Table2Obj(name[len(mapper.Prefix):])
-}
-
-func NewPrefixMapper(mapper IMapper, prefix string) PrefixMapper {
-	return PrefixMapper{mapper, prefix}
-}
-
-// provide suffix table name support
-type SuffixMapper struct {
-	Mapper IMapper
-	Suffix string
-}
-
-func (mapper SuffixMapper) Obj2Table(name string) string {
-	return mapper.Mapper.Obj2Table(name) + mapper.Suffix
-}
-
-func (mapper SuffixMapper) Table2Obj(name string) string {
-	return mapper.Mapper.Table2Obj(name[:len(name)-len(mapper.Suffix)])
-}
-
-func NewSuffixMapper(mapper IMapper, suffix string) SuffixMapper {
-	return SuffixMapper{mapper, suffix}
-}

+ 0 - 30
vendor/gitea.com/xorm/core/pk.go

@@ -1,30 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"bytes"
-	"encoding/gob"
-)
-
-type PK []interface{}
-
-func NewPK(pks ...interface{}) *PK {
-	p := PK(pks)
-	return &p
-}
-
-func (p *PK) ToString() (string, error) {
-	buf := new(bytes.Buffer)
-	enc := gob.NewEncoder(buf)
-	err := enc.Encode(*p)
-	return buf.String(), err
-}
-
-func (p *PK) FromString(content string) error {
-	dec := gob.NewDecoder(bytes.NewBufferString(content))
-	err := dec.Decode(p)
-	return err
-}

+ 0 - 338
vendor/gitea.com/xorm/core/rows.go

@@ -1,338 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"database/sql"
-	"errors"
-	"reflect"
-	"sync"
-)
-
-type Rows struct {
-	*sql.Rows
-	db *DB
-}
-
-func (rs *Rows) ToMapString() ([]map[string]string, error) {
-	cols, err := rs.Columns()
-	if err != nil {
-		return nil, err
-	}
-
-	var results = make([]map[string]string, 0, 10)
-	for rs.Next() {
-		var record = make(map[string]string, len(cols))
-		err = rs.ScanMap(&record)
-		if err != nil {
-			return nil, err
-		}
-		results = append(results, record)
-	}
-	return results, nil
-}
-
-// scan data to a struct's pointer according field index
-func (rs *Rows) ScanStructByIndex(dest ...interface{}) error {
-	if len(dest) == 0 {
-		return errors.New("at least one struct")
-	}
-
-	vvvs := make([]reflect.Value, len(dest))
-	for i, s := range dest {
-		vv := reflect.ValueOf(s)
-		if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct {
-			return errors.New("dest should be a struct's pointer")
-		}
-
-		vvvs[i] = vv.Elem()
-	}
-
-	cols, err := rs.Columns()
-	if err != nil {
-		return err
-	}
-	newDest := make([]interface{}, len(cols))
-
-	var i = 0
-	for _, vvv := range vvvs {
-		for j := 0; j < vvv.NumField(); j++ {
-			newDest[i] = vvv.Field(j).Addr().Interface()
-			i = i + 1
-		}
-	}
-
-	return rs.Rows.Scan(newDest...)
-}
-
-var (
-	fieldCache      = make(map[reflect.Type]map[string]int)
-	fieldCacheMutex sync.RWMutex
-)
-
-func fieldByName(v reflect.Value, name string) reflect.Value {
-	t := v.Type()
-	fieldCacheMutex.RLock()
-	cache, ok := fieldCache[t]
-	fieldCacheMutex.RUnlock()
-	if !ok {
-		cache = make(map[string]int)
-		for i := 0; i < v.NumField(); i++ {
-			cache[t.Field(i).Name] = i
-		}
-		fieldCacheMutex.Lock()
-		fieldCache[t] = cache
-		fieldCacheMutex.Unlock()
-	}
-
-	if i, ok := cache[name]; ok {
-		return v.Field(i)
-	}
-
-	return reflect.Zero(t)
-}
-
-// scan data to a struct's pointer according field name
-func (rs *Rows) ScanStructByName(dest interface{}) error {
-	vv := reflect.ValueOf(dest)
-	if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct {
-		return errors.New("dest should be a struct's pointer")
-	}
-
-	cols, err := rs.Columns()
-	if err != nil {
-		return err
-	}
-
-	newDest := make([]interface{}, len(cols))
-	var v EmptyScanner
-	for j, name := range cols {
-		f := fieldByName(vv.Elem(), rs.db.Mapper.Table2Obj(name))
-		if f.IsValid() {
-			newDest[j] = f.Addr().Interface()
-		} else {
-			newDest[j] = &v
-		}
-	}
-
-	return rs.Rows.Scan(newDest...)
-}
-
-// scan data to a slice's pointer, slice's length should equal to columns' number
-func (rs *Rows) ScanSlice(dest interface{}) error {
-	vv := reflect.ValueOf(dest)
-	if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Slice {
-		return errors.New("dest should be a slice's pointer")
-	}
-
-	vvv := vv.Elem()
-	cols, err := rs.Columns()
-	if err != nil {
-		return err
-	}
-
-	newDest := make([]interface{}, len(cols))
-
-	for j := 0; j < len(cols); j++ {
-		if j >= vvv.Len() {
-			newDest[j] = reflect.New(vvv.Type().Elem()).Interface()
-		} else {
-			newDest[j] = vvv.Index(j).Addr().Interface()
-		}
-	}
-
-	err = rs.Rows.Scan(newDest...)
-	if err != nil {
-		return err
-	}
-
-	srcLen := vvv.Len()
-	for i := srcLen; i < len(cols); i++ {
-		vvv = reflect.Append(vvv, reflect.ValueOf(newDest[i]).Elem())
-	}
-	return nil
-}
-
-// scan data to a map's pointer
-func (rs *Rows) ScanMap(dest interface{}) error {
-	vv := reflect.ValueOf(dest)
-	if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map {
-		return errors.New("dest should be a map's pointer")
-	}
-
-	cols, err := rs.Columns()
-	if err != nil {
-		return err
-	}
-
-	newDest := make([]interface{}, len(cols))
-	vvv := vv.Elem()
-
-	for i, _ := range cols {
-		newDest[i] = rs.db.reflectNew(vvv.Type().Elem()).Interface()
-	}
-
-	err = rs.Rows.Scan(newDest...)
-	if err != nil {
-		return err
-	}
-
-	for i, name := range cols {
-		vname := reflect.ValueOf(name)
-		vvv.SetMapIndex(vname, reflect.ValueOf(newDest[i]).Elem())
-	}
-
-	return nil
-}
-
-type Row struct {
-	rows *Rows
-	// One of these two will be non-nil:
-	err error // deferred error for easy chaining
-}
-
-// ErrorRow return an error row
-func ErrorRow(err error) *Row {
-	return &Row{
-		err: err,
-	}
-}
-
-// NewRow from rows
-func NewRow(rows *Rows, err error) *Row {
-	return &Row{rows, err}
-}
-
-func (row *Row) Columns() ([]string, error) {
-	if row.err != nil {
-		return nil, row.err
-	}
-	return row.rows.Columns()
-}
-
-func (row *Row) Scan(dest ...interface{}) error {
-	if row.err != nil {
-		return row.err
-	}
-	defer row.rows.Close()
-
-	for _, dp := range dest {
-		if _, ok := dp.(*sql.RawBytes); ok {
-			return errors.New("sql: RawBytes isn't allowed on Row.Scan")
-		}
-	}
-
-	if !row.rows.Next() {
-		if err := row.rows.Err(); err != nil {
-			return err
-		}
-		return sql.ErrNoRows
-	}
-	err := row.rows.Scan(dest...)
-	if err != nil {
-		return err
-	}
-	// Make sure the query can be processed to completion with no errors.
-	return row.rows.Close()
-}
-
-func (row *Row) ScanStructByName(dest interface{}) error {
-	if row.err != nil {
-		return row.err
-	}
-	defer row.rows.Close()
-
-	if !row.rows.Next() {
-		if err := row.rows.Err(); err != nil {
-			return err
-		}
-		return sql.ErrNoRows
-	}
-	err := row.rows.ScanStructByName(dest)
-	if err != nil {
-		return err
-	}
-	// Make sure the query can be processed to completion with no errors.
-	return row.rows.Close()
-}
-
-func (row *Row) ScanStructByIndex(dest interface{}) error {
-	if row.err != nil {
-		return row.err
-	}
-	defer row.rows.Close()
-
-	if !row.rows.Next() {
-		if err := row.rows.Err(); err != nil {
-			return err
-		}
-		return sql.ErrNoRows
-	}
-	err := row.rows.ScanStructByIndex(dest)
-	if err != nil {
-		return err
-	}
-	// Make sure the query can be processed to completion with no errors.
-	return row.rows.Close()
-}
-
-// scan data to a slice's pointer, slice's length should equal to columns' number
-func (row *Row) ScanSlice(dest interface{}) error {
-	if row.err != nil {
-		return row.err
-	}
-	defer row.rows.Close()
-
-	if !row.rows.Next() {
-		if err := row.rows.Err(); err != nil {
-			return err
-		}
-		return sql.ErrNoRows
-	}
-	err := row.rows.ScanSlice(dest)
-	if err != nil {
-		return err
-	}
-
-	// Make sure the query can be processed to completion with no errors.
-	return row.rows.Close()
-}
-
-// scan data to a map's pointer
-func (row *Row) ScanMap(dest interface{}) error {
-	if row.err != nil {
-		return row.err
-	}
-	defer row.rows.Close()
-
-	if !row.rows.Next() {
-		if err := row.rows.Err(); err != nil {
-			return err
-		}
-		return sql.ErrNoRows
-	}
-	err := row.rows.ScanMap(dest)
-	if err != nil {
-		return err
-	}
-
-	// Make sure the query can be processed to completion with no errors.
-	return row.rows.Close()
-}
-
-func (row *Row) ToMapString() (map[string]string, error) {
-	cols, err := row.Columns()
-	if err != nil {
-		return nil, err
-	}
-
-	var record = make(map[string]string, len(cols))
-	err = row.ScanMap(&record)
-	if err != nil {
-		return nil, err
-	}
-
-	return record, nil
-}

+ 0 - 66
vendor/gitea.com/xorm/core/scan.go

@@ -1,66 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"database/sql/driver"
-	"fmt"
-	"time"
-)
-
-type NullTime time.Time
-
-var (
-	_ driver.Valuer = NullTime{}
-)
-
-func (ns *NullTime) Scan(value interface{}) error {
-	if value == nil {
-		return nil
-	}
-	return convertTime(ns, value)
-}
-
-// Value implements the driver Valuer interface.
-func (ns NullTime) Value() (driver.Value, error) {
-	if (time.Time)(ns).IsZero() {
-		return nil, nil
-	}
-	return (time.Time)(ns).Format("2006-01-02 15:04:05"), nil
-}
-
-func convertTime(dest *NullTime, src interface{}) error {
-	// Common cases, without reflect.
-	switch s := src.(type) {
-	case string:
-		t, err := time.Parse("2006-01-02 15:04:05", s)
-		if err != nil {
-			return err
-		}
-		*dest = NullTime(t)
-		return nil
-	case []uint8:
-		t, err := time.Parse("2006-01-02 15:04:05", string(s))
-		if err != nil {
-			return err
-		}
-		*dest = NullTime(t)
-		return nil
-	case time.Time:
-		*dest = NullTime(s)
-		return nil
-	case nil:
-	default:
-		return fmt.Errorf("unsupported driver -> Scan pair: %T -> %T", src, dest)
-	}
-	return nil
-}
-
-type EmptyScanner struct {
-}
-
-func (EmptyScanner) Scan(src interface{}) error {
-	return nil
-}

+ 0 - 165
vendor/gitea.com/xorm/core/stmt.go

@@ -1,165 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"context"
-	"database/sql"
-	"errors"
-	"reflect"
-)
-
-type Stmt struct {
-	*sql.Stmt
-	db    *DB
-	names map[string]int
-}
-
-func (db *DB) PrepareContext(ctx context.Context, query string) (*Stmt, error) {
-	names := make(map[string]int)
-	var i int
-	query = re.ReplaceAllStringFunc(query, func(src string) string {
-		names[src[1:]] = i
-		i += 1
-		return "?"
-	})
-
-	stmt, err := db.DB.PrepareContext(ctx, query)
-	if err != nil {
-		return nil, err
-	}
-	return &Stmt{stmt, db, names}, nil
-}
-
-func (db *DB) Prepare(query string) (*Stmt, error) {
-	return db.PrepareContext(context.Background(), query)
-}
-
-func (s *Stmt) ExecMapContext(ctx context.Context, mp interface{}) (sql.Result, error) {
-	vv := reflect.ValueOf(mp)
-	if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map {
-		return nil, errors.New("mp should be a map's pointer")
-	}
-
-	args := make([]interface{}, len(s.names))
-	for k, i := range s.names {
-		args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface()
-	}
-	return s.Stmt.ExecContext(ctx, args...)
-}
-
-func (s *Stmt) ExecMap(mp interface{}) (sql.Result, error) {
-	return s.ExecMapContext(context.Background(), mp)
-}
-
-func (s *Stmt) ExecStructContext(ctx context.Context, st interface{}) (sql.Result, error) {
-	vv := reflect.ValueOf(st)
-	if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct {
-		return nil, errors.New("mp should be a map's pointer")
-	}
-
-	args := make([]interface{}, len(s.names))
-	for k, i := range s.names {
-		args[i] = vv.Elem().FieldByName(k).Interface()
-	}
-	return s.Stmt.ExecContext(ctx, args...)
-}
-
-func (s *Stmt) ExecStruct(st interface{}) (sql.Result, error) {
-	return s.ExecStructContext(context.Background(), st)
-}
-
-func (s *Stmt) QueryContext(ctx context.Context, args ...interface{}) (*Rows, error) {
-	rows, err := s.Stmt.QueryContext(ctx, args...)
-	if err != nil {
-		return nil, err
-	}
-	return &Rows{rows, s.db}, nil
-}
-
-func (s *Stmt) Query(args ...interface{}) (*Rows, error) {
-	return s.QueryContext(context.Background(), args...)
-}
-
-func (s *Stmt) QueryMapContext(ctx context.Context, mp interface{}) (*Rows, error) {
-	vv := reflect.ValueOf(mp)
-	if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map {
-		return nil, errors.New("mp should be a map's pointer")
-	}
-
-	args := make([]interface{}, len(s.names))
-	for k, i := range s.names {
-		args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface()
-	}
-
-	return s.QueryContext(ctx, args...)
-}
-
-func (s *Stmt) QueryMap(mp interface{}) (*Rows, error) {
-	return s.QueryMapContext(context.Background(), mp)
-}
-
-func (s *Stmt) QueryStructContext(ctx context.Context, st interface{}) (*Rows, error) {
-	vv := reflect.ValueOf(st)
-	if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct {
-		return nil, errors.New("mp should be a map's pointer")
-	}
-
-	args := make([]interface{}, len(s.names))
-	for k, i := range s.names {
-		args[i] = vv.Elem().FieldByName(k).Interface()
-	}
-
-	return s.Query(args...)
-}
-
-func (s *Stmt) QueryStruct(st interface{}) (*Rows, error) {
-	return s.QueryStructContext(context.Background(), st)
-}
-
-func (s *Stmt) QueryRowContext(ctx context.Context, args ...interface{}) *Row {
-	rows, err := s.QueryContext(ctx, args...)
-	return &Row{rows, err}
-}
-
-func (s *Stmt) QueryRow(args ...interface{}) *Row {
-	return s.QueryRowContext(context.Background(), args...)
-}
-
-func (s *Stmt) QueryRowMapContext(ctx context.Context, mp interface{}) *Row {
-	vv := reflect.ValueOf(mp)
-	if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map {
-		return &Row{nil, errors.New("mp should be a map's pointer")}
-	}
-
-	args := make([]interface{}, len(s.names))
-	for k, i := range s.names {
-		args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface()
-	}
-
-	return s.QueryRowContext(ctx, args...)
-}
-
-func (s *Stmt) QueryRowMap(mp interface{}) *Row {
-	return s.QueryRowMapContext(context.Background(), mp)
-}
-
-func (s *Stmt) QueryRowStructContext(ctx context.Context, st interface{}) *Row {
-	vv := reflect.ValueOf(st)
-	if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct {
-		return &Row{nil, errors.New("st should be a struct's pointer")}
-	}
-
-	args := make([]interface{}, len(s.names))
-	for k, i := range s.names {
-		args[i] = vv.Elem().FieldByName(k).Interface()
-	}
-
-	return s.QueryRowContext(ctx, args...)
-}
-
-func (s *Stmt) QueryRowStruct(st interface{}) *Row {
-	return s.QueryRowStructContext(context.Background(), st)
-}

+ 0 - 154
vendor/gitea.com/xorm/core/table.go

@@ -1,154 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"reflect"
-	"strings"
-)
-
-// database table
-type Table struct {
-	Name          string
-	Type          reflect.Type
-	columnsSeq    []string
-	columnsMap    map[string][]*Column
-	columns       []*Column
-	Indexes       map[string]*Index
-	PrimaryKeys   []string
-	AutoIncrement string
-	Created       map[string]bool
-	Updated       string
-	Deleted       string
-	Version       string
-	Cacher        Cacher
-	StoreEngine   string
-	Charset       string
-	Comment       string
-}
-
-func (table *Table) Columns() []*Column {
-	return table.columns
-}
-
-func (table *Table) ColumnsSeq() []string {
-	return table.columnsSeq
-}
-
-func NewEmptyTable() *Table {
-	return NewTable("", nil)
-}
-
-func NewTable(name string, t reflect.Type) *Table {
-	return &Table{Name: name, Type: t,
-		columnsSeq:  make([]string, 0),
-		columns:     make([]*Column, 0),
-		columnsMap:  make(map[string][]*Column),
-		Indexes:     make(map[string]*Index),
-		Created:     make(map[string]bool),
-		PrimaryKeys: make([]string, 0),
-	}
-}
-
-func (table *Table) columnsByName(name string) []*Column {
-	n := len(name)
-
-	for k := range table.columnsMap {
-		if len(k) != n {
-			continue
-		}
-		if strings.EqualFold(k, name) {
-			return table.columnsMap[k]
-		}
-	}
-	return nil
-}
-
-func (table *Table) GetColumn(name string) *Column {
-
-	cols := table.columnsByName(name)
-
-	if cols != nil {
-		return cols[0]
-	}
-
-	return nil
-}
-
-func (table *Table) GetColumnIdx(name string, idx int) *Column {
-	cols := table.columnsByName(name)
-
-	if cols != nil && idx < len(cols) {
-		return cols[idx]
-	}
-
-	return nil
-}
-
-// if has primary key, return column
-func (table *Table) PKColumns() []*Column {
-	columns := make([]*Column, len(table.PrimaryKeys))
-	for i, name := range table.PrimaryKeys {
-		columns[i] = table.GetColumn(name)
-	}
-	return columns
-}
-
-func (table *Table) ColumnType(name string) reflect.Type {
-	t, _ := table.Type.FieldByName(name)
-	return t.Type
-}
-
-func (table *Table) AutoIncrColumn() *Column {
-	return table.GetColumn(table.AutoIncrement)
-}
-
-func (table *Table) VersionColumn() *Column {
-	return table.GetColumn(table.Version)
-}
-
-func (table *Table) UpdatedColumn() *Column {
-	return table.GetColumn(table.Updated)
-}
-
-func (table *Table) DeletedColumn() *Column {
-	return table.GetColumn(table.Deleted)
-}
-
-// add a column to table
-func (table *Table) AddColumn(col *Column) {
-	table.columnsSeq = append(table.columnsSeq, col.Name)
-	table.columns = append(table.columns, col)
-	colName := strings.ToLower(col.Name)
-	if c, ok := table.columnsMap[colName]; ok {
-		table.columnsMap[colName] = append(c, col)
-	} else {
-		table.columnsMap[colName] = []*Column{col}
-	}
-
-	if col.IsPrimaryKey {
-		table.PrimaryKeys = append(table.PrimaryKeys, col.Name)
-	}
-	if col.IsAutoIncrement {
-		table.AutoIncrement = col.Name
-	}
-	if col.IsCreated {
-		table.Created[col.Name] = true
-	}
-	if col.IsUpdated {
-		table.Updated = col.Name
-	}
-	if col.IsDeleted {
-		table.Deleted = col.Name
-	}
-	if col.IsVersion {
-		table.Version = col.Name
-	}
-}
-
-// add an index or an unique to table
-func (table *Table) AddIndex(index *Index) {
-	table.Indexes[index.Name] = index
-}

+ 0 - 153
vendor/gitea.com/xorm/core/tx.go

@@ -1,153 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"context"
-	"database/sql"
-)
-
-type Tx struct {
-	*sql.Tx
-	db *DB
-}
-
-func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
-	tx, err := db.DB.BeginTx(ctx, opts)
-	if err != nil {
-		return nil, err
-	}
-	return &Tx{tx, db}, nil
-}
-
-func (db *DB) Begin() (*Tx, error) {
-	tx, err := db.DB.Begin()
-	if err != nil {
-		return nil, err
-	}
-	return &Tx{tx, db}, nil
-}
-
-func (tx *Tx) PrepareContext(ctx context.Context, query string) (*Stmt, error) {
-	names := make(map[string]int)
-	var i int
-	query = re.ReplaceAllStringFunc(query, func(src string) string {
-		names[src[1:]] = i
-		i += 1
-		return "?"
-	})
-
-	stmt, err := tx.Tx.PrepareContext(ctx, query)
-	if err != nil {
-		return nil, err
-	}
-	return &Stmt{stmt, tx.db, names}, nil
-}
-
-func (tx *Tx) Prepare(query string) (*Stmt, error) {
-	return tx.PrepareContext(context.Background(), query)
-}
-
-func (tx *Tx) StmtContext(ctx context.Context, stmt *Stmt) *Stmt {
-	stmt.Stmt = tx.Tx.StmtContext(ctx, stmt.Stmt)
-	return stmt
-}
-
-func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
-	return tx.StmtContext(context.Background(), stmt)
-}
-
-func (tx *Tx) ExecMapContext(ctx context.Context, query string, mp interface{}) (sql.Result, error) {
-	query, args, err := MapToSlice(query, mp)
-	if err != nil {
-		return nil, err
-	}
-	return tx.Tx.ExecContext(ctx, query, args...)
-}
-
-func (tx *Tx) ExecMap(query string, mp interface{}) (sql.Result, error) {
-	return tx.ExecMapContext(context.Background(), query, mp)
-}
-
-func (tx *Tx) ExecStructContext(ctx context.Context, query string, st interface{}) (sql.Result, error) {
-	query, args, err := StructToSlice(query, st)
-	if err != nil {
-		return nil, err
-	}
-	return tx.Tx.ExecContext(ctx, query, args...)
-}
-
-func (tx *Tx) ExecStruct(query string, st interface{}) (sql.Result, error) {
-	return tx.ExecStructContext(context.Background(), query, st)
-}
-
-func (tx *Tx) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
-	rows, err := tx.Tx.QueryContext(ctx, query, args...)
-	if err != nil {
-		return nil, err
-	}
-	return &Rows{rows, tx.db}, nil
-}
-
-func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) {
-	return tx.QueryContext(context.Background(), query, args...)
-}
-
-func (tx *Tx) QueryMapContext(ctx context.Context, query string, mp interface{}) (*Rows, error) {
-	query, args, err := MapToSlice(query, mp)
-	if err != nil {
-		return nil, err
-	}
-	return tx.QueryContext(ctx, query, args...)
-}
-
-func (tx *Tx) QueryMap(query string, mp interface{}) (*Rows, error) {
-	return tx.QueryMapContext(context.Background(), query, mp)
-}
-
-func (tx *Tx) QueryStructContext(ctx context.Context, query string, st interface{}) (*Rows, error) {
-	query, args, err := StructToSlice(query, st)
-	if err != nil {
-		return nil, err
-	}
-	return tx.QueryContext(ctx, query, args...)
-}
-
-func (tx *Tx) QueryStruct(query string, st interface{}) (*Rows, error) {
-	return tx.QueryStructContext(context.Background(), query, st)
-}
-
-func (tx *Tx) QueryRowContext(ctx context.Context, query string, args ...interface{}) *Row {
-	rows, err := tx.QueryContext(ctx, query, args...)
-	return &Row{rows, err}
-}
-
-func (tx *Tx) QueryRow(query string, args ...interface{}) *Row {
-	return tx.QueryRowContext(context.Background(), query, args...)
-}
-
-func (tx *Tx) QueryRowMapContext(ctx context.Context, query string, mp interface{}) *Row {
-	query, args, err := MapToSlice(query, mp)
-	if err != nil {
-		return &Row{nil, err}
-	}
-	return tx.QueryRowContext(ctx, query, args...)
-}
-
-func (tx *Tx) QueryRowMap(query string, mp interface{}) *Row {
-	return tx.QueryRowMapContext(context.Background(), query, mp)
-}
-
-func (tx *Tx) QueryRowStructContext(ctx context.Context, query string, st interface{}) *Row {
-	query, args, err := StructToSlice(query, st)
-	if err != nil {
-		return &Row{nil, err}
-	}
-	return tx.QueryRowContext(ctx, query, args...)
-}
-
-func (tx *Tx) QueryRowStruct(query string, st interface{}) *Row {
-	return tx.QueryRowStructContext(context.Background(), query, st)
-}

+ 0 - 323
vendor/gitea.com/xorm/core/type.go

@@ -1,323 +0,0 @@
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package core
-
-import (
-	"reflect"
-	"sort"
-	"strings"
-	"time"
-)
-
-const (
-	POSTGRES = "postgres"
-	SQLITE   = "sqlite3"
-	MYSQL    = "mysql"
-	MSSQL    = "mssql"
-	ORACLE   = "oracle"
-)
-
-// xorm SQL types
-type SQLType struct {
-	Name           string
-	DefaultLength  int
-	DefaultLength2 int
-}
-
-const (
-	UNKNOW_TYPE = iota
-	TEXT_TYPE
-	BLOB_TYPE
-	TIME_TYPE
-	NUMERIC_TYPE
-)
-
-func (s *SQLType) IsType(st int) bool {
-	if t, ok := SqlTypes[s.Name]; ok && t == st {
-		return true
-	}
-	return false
-}
-
-func (s *SQLType) IsText() bool {
-	return s.IsType(TEXT_TYPE)
-}
-
-func (s *SQLType) IsBlob() bool {
-	return s.IsType(BLOB_TYPE)
-}
-
-func (s *SQLType) IsTime() bool {
-	return s.IsType(TIME_TYPE)
-}
-
-func (s *SQLType) IsNumeric() bool {
-	return s.IsType(NUMERIC_TYPE)
-}
-
-func (s *SQLType) IsJson() bool {
-	return s.Name == Json || s.Name == Jsonb
-}
-
-var (
-	Bit       = "BIT"
-	TinyInt   = "TINYINT"
-	SmallInt  = "SMALLINT"
-	MediumInt = "MEDIUMINT"
-	Int       = "INT"
-	Integer   = "INTEGER"
-	BigInt    = "BIGINT"
-
-	Enum = "ENUM"
-	Set  = "SET"
-
-	Char             = "CHAR"
-	Varchar          = "VARCHAR"
-	NChar            = "NCHAR"
-	NVarchar         = "NVARCHAR"
-	TinyText         = "TINYTEXT"
-	Text             = "TEXT"
-	NText            = "NTEXT"
-	Clob             = "CLOB"
-	MediumText       = "MEDIUMTEXT"
-	LongText         = "LONGTEXT"
-	Uuid             = "UUID"
-	UniqueIdentifier = "UNIQUEIDENTIFIER"
-	SysName          = "SYSNAME"
-
-	Date       = "DATE"
-	DateTime   = "DATETIME"
-	SmallDateTime   = "SMALLDATETIME"
-	Time       = "TIME"
-	TimeStamp  = "TIMESTAMP"
-	TimeStampz = "TIMESTAMPZ"
-
-	Decimal = "DECIMAL"
-	Numeric = "NUMERIC"
-	Money   = "MONEY"
-	SmallMoney = "SMALLMONEY"
-
-	Real   = "REAL"
-	Float  = "FLOAT"
-	Double = "DOUBLE"
-
-	Binary     = "BINARY"
-	VarBinary  = "VARBINARY"
-	TinyBlob   = "TINYBLOB"
-	Blob       = "BLOB"
-	MediumBlob = "MEDIUMBLOB"
-	LongBlob   = "LONGBLOB"
-	Bytea      = "BYTEA"
-
-	Bool    = "BOOL"
-	Boolean = "BOOLEAN"
-
-	Serial    = "SERIAL"
-	BigSerial = "BIGSERIAL"
-
-	Json  = "JSON"
-	Jsonb = "JSONB"
-
-	SqlTypes = map[string]int{
-		Bit:       NUMERIC_TYPE,
-		TinyInt:   NUMERIC_TYPE,
-		SmallInt:  NUMERIC_TYPE,
-		MediumInt: NUMERIC_TYPE,
-		Int:       NUMERIC_TYPE,
-		Integer:   NUMERIC_TYPE,
-		BigInt:    NUMERIC_TYPE,
-
-		Enum:  TEXT_TYPE,
-		Set:   TEXT_TYPE,
-		Json:  TEXT_TYPE,
-		Jsonb: TEXT_TYPE,
-
-		Char:       TEXT_TYPE,
-		NChar:      TEXT_TYPE,
-		Varchar:    TEXT_TYPE,
-		NVarchar:   TEXT_TYPE,
-		TinyText:   TEXT_TYPE,
-		Text:       TEXT_TYPE,
-		NText:      TEXT_TYPE,
-		MediumText: TEXT_TYPE,
-		LongText:   TEXT_TYPE,
-		Uuid:       TEXT_TYPE,
-		Clob:       TEXT_TYPE,
-		SysName:    TEXT_TYPE,
-
-		Date:       TIME_TYPE,
-		DateTime:   TIME_TYPE,
-		Time:       TIME_TYPE,
-		TimeStamp:  TIME_TYPE,
-		TimeStampz: TIME_TYPE,
-		SmallDateTime:   TIME_TYPE,
-
-		Decimal: NUMERIC_TYPE,
-		Numeric: NUMERIC_TYPE,
-		Real:    NUMERIC_TYPE,
-		Float:   NUMERIC_TYPE,
-		Double:  NUMERIC_TYPE,
-		Money:   NUMERIC_TYPE,
-		SmallMoney: NUMERIC_TYPE,
-
-		Binary:    BLOB_TYPE,
-		VarBinary: BLOB_TYPE,
-
-		TinyBlob:         BLOB_TYPE,
-		Blob:             BLOB_TYPE,
-		MediumBlob:       BLOB_TYPE,
-		LongBlob:         BLOB_TYPE,
-		Bytea:            BLOB_TYPE,
-		UniqueIdentifier: BLOB_TYPE,
-
-		Bool: NUMERIC_TYPE,
-
-		Serial:    NUMERIC_TYPE,
-		BigSerial: NUMERIC_TYPE,
-	}
-
-	intTypes  = sort.StringSlice{"*int", "*int16", "*int32", "*int8"}
-	uintTypes = sort.StringSlice{"*uint", "*uint16", "*uint32", "*uint8"}
-)
-
-// !nashtsai! treat following var as interal const values, these are used for reflect.TypeOf comparison
-var (
-	c_EMPTY_STRING       string
-	c_BOOL_DEFAULT       bool
-	c_BYTE_DEFAULT       byte
-	c_COMPLEX64_DEFAULT  complex64
-	c_COMPLEX128_DEFAULT complex128
-	c_FLOAT32_DEFAULT    float32
-	c_FLOAT64_DEFAULT    float64
-	c_INT64_DEFAULT      int64
-	c_UINT64_DEFAULT     uint64
-	c_INT32_DEFAULT      int32
-	c_UINT32_DEFAULT     uint32
-	c_INT16_DEFAULT      int16
-	c_UINT16_DEFAULT     uint16
-	c_INT8_DEFAULT       int8
-	c_UINT8_DEFAULT      uint8
-	c_INT_DEFAULT        int
-	c_UINT_DEFAULT       uint
-	c_TIME_DEFAULT       time.Time
-)
-
-var (
-	IntType   = reflect.TypeOf(c_INT_DEFAULT)
-	Int8Type  = reflect.TypeOf(c_INT8_DEFAULT)
-	Int16Type = reflect.TypeOf(c_INT16_DEFAULT)
-	Int32Type = reflect.TypeOf(c_INT32_DEFAULT)
-	Int64Type = reflect.TypeOf(c_INT64_DEFAULT)
-
-	UintType   = reflect.TypeOf(c_UINT_DEFAULT)
-	Uint8Type  = reflect.TypeOf(c_UINT8_DEFAULT)
-	Uint16Type = reflect.TypeOf(c_UINT16_DEFAULT)
-	Uint32Type = reflect.TypeOf(c_UINT32_DEFAULT)
-	Uint64Type = reflect.TypeOf(c_UINT64_DEFAULT)
-
-	Float32Type = reflect.TypeOf(c_FLOAT32_DEFAULT)
-	Float64Type = reflect.TypeOf(c_FLOAT64_DEFAULT)
-
-	Complex64Type  = reflect.TypeOf(c_COMPLEX64_DEFAULT)
-	Complex128Type = reflect.TypeOf(c_COMPLEX128_DEFAULT)
-
-	StringType = reflect.TypeOf(c_EMPTY_STRING)
-	BoolType   = reflect.TypeOf(c_BOOL_DEFAULT)
-	ByteType   = reflect.TypeOf(c_BYTE_DEFAULT)
-	BytesType  = reflect.SliceOf(ByteType)
-
-	TimeType = reflect.TypeOf(c_TIME_DEFAULT)
-)
-
-var (
-	PtrIntType   = reflect.PtrTo(IntType)
-	PtrInt8Type  = reflect.PtrTo(Int8Type)
-	PtrInt16Type = reflect.PtrTo(Int16Type)
-	PtrInt32Type = reflect.PtrTo(Int32Type)
-	PtrInt64Type = reflect.PtrTo(Int64Type)
-
-	PtrUintType   = reflect.PtrTo(UintType)
-	PtrUint8Type  = reflect.PtrTo(Uint8Type)
-	PtrUint16Type = reflect.PtrTo(Uint16Type)
-	PtrUint32Type = reflect.PtrTo(Uint32Type)
-	PtrUint64Type = reflect.PtrTo(Uint64Type)
-
-	PtrFloat32Type = reflect.PtrTo(Float32Type)
-	PtrFloat64Type = reflect.PtrTo(Float64Type)
-
-	PtrComplex64Type  = reflect.PtrTo(Complex64Type)
-	PtrComplex128Type = reflect.PtrTo(Complex128Type)
-
-	PtrStringType = reflect.PtrTo(StringType)
-	PtrBoolType   = reflect.PtrTo(BoolType)
-	PtrByteType   = reflect.PtrTo(ByteType)
-
-	PtrTimeType = reflect.PtrTo(TimeType)
-)
-
-// Type2SQLType generate SQLType acorrding Go's type
-func Type2SQLType(t reflect.Type) (st SQLType) {
-	switch k := t.Kind(); k {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
-		st = SQLType{Int, 0, 0}
-	case reflect.Int64, reflect.Uint64:
-		st = SQLType{BigInt, 0, 0}
-	case reflect.Float32:
-		st = SQLType{Float, 0, 0}
-	case reflect.Float64:
-		st = SQLType{Double, 0, 0}
-	case reflect.Complex64, reflect.Complex128:
-		st = SQLType{Varchar, 64, 0}
-	case reflect.Array, reflect.Slice, reflect.Map:
-		if t.Elem() == reflect.TypeOf(c_BYTE_DEFAULT) {
-			st = SQLType{Blob, 0, 0}
-		} else {
-			st = SQLType{Text, 0, 0}
-		}
-	case reflect.Bool:
-		st = SQLType{Bool, 0, 0}
-	case reflect.String:
-		st = SQLType{Varchar, 255, 0}
-	case reflect.Struct:
-		if t.ConvertibleTo(TimeType) {
-			st = SQLType{DateTime, 0, 0}
-		} else {
-			// TODO need to handle association struct
-			st = SQLType{Text, 0, 0}
-		}
-	case reflect.Ptr:
-		st = Type2SQLType(t.Elem())
-	default:
-		st = SQLType{Text, 0, 0}
-	}
-	return
-}
-
-// default sql type change to go types
-func SQLType2Type(st SQLType) reflect.Type {
-	name := strings.ToUpper(st.Name)
-	switch name {
-	case Bit, TinyInt, SmallInt, MediumInt, Int, Integer, Serial:
-		return reflect.TypeOf(1)
-	case BigInt, BigSerial:
-		return reflect.TypeOf(int64(1))
-	case Float, Real:
-		return reflect.TypeOf(float32(1))
-	case Double:
-		return reflect.TypeOf(float64(1))
-	case Char, NChar, Varchar, NVarchar, TinyText, Text, NText, MediumText, LongText, Enum, Set, Uuid, Clob, SysName:
-		return reflect.TypeOf("")
-	case TinyBlob, Blob, LongBlob, Bytea, Binary, MediumBlob, VarBinary, UniqueIdentifier:
-		return reflect.TypeOf([]byte{})
-	case Bool:
-		return reflect.TypeOf(true)
-	case DateTime, Date, Time, TimeStamp, TimeStampz, SmallDateTime:
-		return reflect.TypeOf(c_TIME_DEFAULT)
-	case Decimal, Numeric, Money, SmallMoney:
-		return reflect.TypeOf("")
-	default:
-		return reflect.TypeOf("")
-	}
-}

+ 0 - 1253
vendor/gitea.com/xorm/xorm/dialect_postgres.go

@@ -1,1253 +0,0 @@
-// Copyright 2015 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xorm
-
-import (
-	"errors"
-	"fmt"
-	"net/url"
-	"strconv"
-	"strings"
-
-	"xorm.io/core"
-)
-
-// from http://www.postgresql.org/docs/current/static/sql-keywords-appendix.html
-var (
-	postgresReservedWords = map[string]bool{
-		"A":                     true,
-		"ABORT":                 true,
-		"ABS":                   true,
-		"ABSENT":                true,
-		"ABSOLUTE":              true,
-		"ACCESS":                true,
-		"ACCORDING":             true,
-		"ACTION":                true,
-		"ADA":                   true,
-		"ADD":                   true,
-		"ADMIN":                 true,
-		"AFTER":                 true,
-		"AGGREGATE":             true,
-		"ALL":                   true,
-		"ALLOCATE":              true,
-		"ALSO":                  true,
-		"ALTER":                 true,
-		"ALWAYS":                true,
-		"ANALYSE":               true,
-		"ANALYZE":               true,
-		"AND":                   true,
-		"ANY":                   true,
-		"ARE":                   true,
-		"ARRAY":                 true,
-		"ARRAY_AGG":             true,
-		"ARRAY_MAX_CARDINALITY": true,
-		"AS":                               true,
-		"ASC":                              true,
-		"ASENSITIVE":                       true,
-		"ASSERTION":                        true,
-		"ASSIGNMENT":                       true,
-		"ASYMMETRIC":                       true,
-		"AT":                               true,
-		"ATOMIC":                           true,
-		"ATTRIBUTE":                        true,
-		"ATTRIBUTES":                       true,
-		"AUTHORIZATION":                    true,
-		"AVG":                              true,
-		"BACKWARD":                         true,
-		"BASE64":                           true,
-		"BEFORE":                           true,
-		"BEGIN":                            true,
-		"BEGIN_FRAME":                      true,
-		"BEGIN_PARTITION":                  true,
-		"BERNOULLI":                        true,
-		"BETWEEN":                          true,
-		"BIGINT":                           true,
-		"BINARY":                           true,
-		"BIT":                              true,
-		"BIT_LENGTH":                       true,
-		"BLOB":                             true,
-		"BLOCKED":                          true,
-		"BOM":                              true,
-		"BOOLEAN":                          true,
-		"BOTH":                             true,
-		"BREADTH":                          true,
-		"BY":                               true,
-		"C":                                true,
-		"CACHE":                            true,
-		"CALL":                             true,
-		"CALLED":                           true,
-		"CARDINALITY":                      true,
-		"CASCADE":                          true,
-		"CASCADED":                         true,
-		"CASE":                             true,
-		"CAST":                             true,
-		"CATALOG":                          true,
-		"CATALOG_NAME":                     true,
-		"CEIL":                             true,
-		"CEILING":                          true,
-		"CHAIN":                            true,
-		"CHAR":                             true,
-		"CHARACTER":                        true,
-		"CHARACTERISTICS":                  true,
-		"CHARACTERS":                       true,
-		"CHARACTER_LENGTH":                 true,
-		"CHARACTER_SET_CATALOG":            true,
-		"CHARACTER_SET_NAME":               true,
-		"CHARACTER_SET_SCHEMA":             true,
-		"CHAR_LENGTH":                      true,
-		"CHECK":                            true,
-		"CHECKPOINT":                       true,
-		"CLASS":                            true,
-		"CLASS_ORIGIN":                     true,
-		"CLOB":                             true,
-		"CLOSE":                            true,
-		"CLUSTER":                          true,
-		"COALESCE":                         true,
-		"COBOL":                            true,
-		"COLLATE":                          true,
-		"COLLATION":                        true,
-		"COLLATION_CATALOG":                true,
-		"COLLATION_NAME":                   true,
-		"COLLATION_SCHEMA":                 true,
-		"COLLECT":                          true,
-		"COLUMN":                           true,
-		"COLUMNS":                          true,
-		"COLUMN_NAME":                      true,
-		"COMMAND_FUNCTION":                 true,
-		"COMMAND_FUNCTION_CODE":            true,
-		"COMMENT":                          true,
-		"COMMENTS":                         true,
-		"COMMIT":                           true,
-		"COMMITTED":                        true,
-		"CONCURRENTLY":                     true,
-		"CONDITION":                        true,
-		"CONDITION_NUMBER":                 true,
-		"CONFIGURATION":                    true,
-		"CONNECT":                          true,
-		"CONNECTION":                       true,
-		"CONNECTION_NAME":                  true,
-		"CONSTRAINT":                       true,
-		"CONSTRAINTS":                      true,
-		"CONSTRAINT_CATALOG":               true,
-		"CONSTRAINT_NAME":                  true,
-		"CONSTRAINT_SCHEMA":                true,
-		"CONSTRUCTOR":                      true,
-		"CONTAINS":                         true,
-		"CONTENT":                          true,
-		"CONTINUE":                         true,
-		"CONTROL":                          true,
-		"CONVERSION":                       true,
-		"CONVERT":                          true,
-		"COPY":                             true,
-		"CORR":                             true,
-		"CORRESPONDING":                    true,
-		"COST":                             true,
-		"COUNT":                            true,
-		"COVAR_POP":                        true,
-		"COVAR_SAMP":                       true,
-		"CREATE":                           true,
-		"CROSS":                            true,
-		"CSV":                              true,
-		"CUBE":                             true,
-		"CUME_DIST":                        true,
-		"CURRENT":                          true,
-		"CURRENT_CATALOG":                  true,
-		"CURRENT_DATE":                     true,
-		"CURRENT_DEFAULT_TRANSFORM_GROUP":  true,
-		"CURRENT_PATH":                     true,
-		"CURRENT_ROLE":                     true,
-		"CURRENT_ROW":                      true,
-		"CURRENT_SCHEMA":                   true,
-		"CURRENT_TIME":                     true,
-		"CURRENT_TIMESTAMP":                true,
-		"CURRENT_TRANSFORM_GROUP_FOR_TYPE": true,
-		"CURRENT_USER":                     true,
-		"CURSOR":                           true,
-		"CURSOR_NAME":                      true,
-		"CYCLE":                            true,
-		"DATA":                             true,
-		"DATABASE":                         true,
-		"DATALINK":                         true,
-		"DATE":                             true,
-		"DATETIME_INTERVAL_CODE":      true,
-		"DATETIME_INTERVAL_PRECISION": true,
-		"DAY":                        true,
-		"DB":                         true,
-		"DEALLOCATE":                 true,
-		"DEC":                        true,
-		"DECIMAL":                    true,
-		"DECLARE":                    true,
-		"DEFAULT":                    true,
-		"DEFAULTS":                   true,
-		"DEFERRABLE":                 true,
-		"DEFERRED":                   true,
-		"DEFINED":                    true,
-		"DEFINER":                    true,
-		"DEGREE":                     true,
-		"DELETE":                     true,
-		"DELIMITER":                  true,
-		"DELIMITERS":                 true,
-		"DENSE_RANK":                 true,
-		"DEPTH":                      true,
-		"DEREF":                      true,
-		"DERIVED":                    true,
-		"DESC":                       true,
-		"DESCRIBE":                   true,
-		"DESCRIPTOR":                 true,
-		"DETERMINISTIC":              true,
-		"DIAGNOSTICS":                true,
-		"DICTIONARY":                 true,
-		"DISABLE":                    true,
-		"DISCARD":                    true,
-		"DISCONNECT":                 true,
-		"DISPATCH":                   true,
-		"DISTINCT":                   true,
-		"DLNEWCOPY":                  true,
-		"DLPREVIOUSCOPY":             true,
-		"DLURLCOMPLETE":              true,
-		"DLURLCOMPLETEONLY":          true,
-		"DLURLCOMPLETEWRITE":         true,
-		"DLURLPATH":                  true,
-		"DLURLPATHONLY":              true,
-		"DLURLPATHWRITE":             true,
-		"DLURLSCHEME":                true,
-		"DLURLSERVER":                true,
-		"DLVALUE":                    true,
-		"DO":                         true,
-		"DOCUMENT":                   true,
-		"DOMAIN":                     true,
-		"DOUBLE":                     true,
-		"DROP":                       true,
-		"DYNAMIC":                    true,
-		"DYNAMIC_FUNCTION":           true,
-		"DYNAMIC_FUNCTION_CODE":      true,
-		"EACH":                       true,
-		"ELEMENT":                    true,
-		"ELSE":                       true,
-		"EMPTY":                      true,
-		"ENABLE":                     true,
-		"ENCODING":                   true,
-		"ENCRYPTED":                  true,
-		"END":                        true,
-		"END-EXEC":                   true,
-		"END_FRAME":                  true,
-		"END_PARTITION":              true,
-		"ENFORCED":                   true,
-		"ENUM":                       true,
-		"EQUALS":                     true,
-		"ESCAPE":                     true,
-		"EVENT":                      true,
-		"EVERY":                      true,
-		"EXCEPT":                     true,
-		"EXCEPTION":                  true,
-		"EXCLUDE":                    true,
-		"EXCLUDING":                  true,
-		"EXCLUSIVE":                  true,
-		"EXEC":                       true,
-		"EXECUTE":                    true,
-		"EXISTS":                     true,
-		"EXP":                        true,
-		"EXPLAIN":                    true,
-		"EXPRESSION":                 true,
-		"EXTENSION":                  true,
-		"EXTERNAL":                   true,
-		"EXTRACT":                    true,
-		"FALSE":                      true,
-		"FAMILY":                     true,
-		"FETCH":                      true,
-		"FILE":                       true,
-		"FILTER":                     true,
-		"FINAL":                      true,
-		"FIRST":                      true,
-		"FIRST_VALUE":                true,
-		"FLAG":                       true,
-		"FLOAT":                      true,
-		"FLOOR":                      true,
-		"FOLLOWING":                  true,
-		"FOR":                        true,
-		"FORCE":                      true,
-		"FOREIGN":                    true,
-		"FORTRAN":                    true,
-		"FORWARD":                    true,
-		"FOUND":                      true,
-		"FRAME_ROW":                  true,
-		"FREE":                       true,
-		"FREEZE":                     true,
-		"FROM":                       true,
-		"FS":                         true,
-		"FULL":                       true,
-		"FUNCTION":                   true,
-		"FUNCTIONS":                  true,
-		"FUSION":                     true,
-		"G":                          true,
-		"GENERAL":                    true,
-		"GENERATED":                  true,
-		"GET":                        true,
-		"GLOBAL":                     true,
-		"GO":                         true,
-		"GOTO":                       true,
-		"GRANT":                      true,
-		"GRANTED":                    true,
-		"GREATEST":                   true,
-		"GROUP":                      true,
-		"GROUPING":                   true,
-		"GROUPS":                     true,
-		"HANDLER":                    true,
-		"HAVING":                     true,
-		"HEADER":                     true,
-		"HEX":                        true,
-		"HIERARCHY":                  true,
-		"HOLD":                       true,
-		"HOUR":                       true,
-		"ID":                         true,
-		"IDENTITY":                   true,
-		"IF":                         true,
-		"IGNORE":                     true,
-		"ILIKE":                      true,
-		"IMMEDIATE":                  true,
-		"IMMEDIATELY":                true,
-		"IMMUTABLE":                  true,
-		"IMPLEMENTATION":             true,
-		"IMPLICIT":                   true,
-		"IMPORT":                     true,
-		"IN":                         true,
-		"INCLUDING":                  true,
-		"INCREMENT":                  true,
-		"INDENT":                     true,
-		"INDEX":                      true,
-		"INDEXES":                    true,
-		"INDICATOR":                  true,
-		"INHERIT":                    true,
-		"INHERITS":                   true,
-		"INITIALLY":                  true,
-		"INLINE":                     true,
-		"INNER":                      true,
-		"INOUT":                      true,
-		"INPUT":                      true,
-		"INSENSITIVE":                true,
-		"INSERT":                     true,
-		"INSTANCE":                   true,
-		"INSTANTIABLE":               true,
-		"INSTEAD":                    true,
-		"INT":                        true,
-		"INTEGER":                    true,
-		"INTEGRITY":                  true,
-		"INTERSECT":                  true,
-		"INTERSECTION":               true,
-		"INTERVAL":                   true,
-		"INTO":                       true,
-		"INVOKER":                    true,
-		"IS":                         true,
-		"ISNULL":                     true,
-		"ISOLATION":                  true,
-		"JOIN":                       true,
-		"K":                          true,
-		"KEY":                        true,
-		"KEY_MEMBER":                 true,
-		"KEY_TYPE":                   true,
-		"LABEL":                      true,
-		"LAG":                        true,
-		"LANGUAGE":                   true,
-		"LARGE":                      true,
-		"LAST":                       true,
-		"LAST_VALUE":                 true,
-		"LATERAL":                    true,
-		"LC_COLLATE":                 true,
-		"LC_CTYPE":                   true,
-		"LEAD":                       true,
-		"LEADING":                    true,
-		"LEAKPROOF":                  true,
-		"LEAST":                      true,
-		"LEFT":                       true,
-		"LENGTH":                     true,
-		"LEVEL":                      true,
-		"LIBRARY":                    true,
-		"LIKE":                       true,
-		"LIKE_REGEX":                 true,
-		"LIMIT":                      true,
-		"LINK":                       true,
-		"LISTEN":                     true,
-		"LN":                         true,
-		"LOAD":                       true,
-		"LOCAL":                      true,
-		"LOCALTIME":                  true,
-		"LOCALTIMESTAMP":             true,
-		"LOCATION":                   true,
-		"LOCATOR":                    true,
-		"LOCK":                       true,
-		"LOWER":                      true,
-		"M":                          true,
-		"MAP":                        true,
-		"MAPPING":                    true,
-		"MATCH":                      true,
-		"MATCHED":                    true,
-		"MATERIALIZED":               true,
-		"MAX":                        true,
-		"MAXVALUE":                   true,
-		"MAX_CARDINALITY":            true,
-		"MEMBER":                     true,
-		"MERGE":                      true,
-		"MESSAGE_LENGTH":             true,
-		"MESSAGE_OCTET_LENGTH":       true,
-		"MESSAGE_TEXT":               true,
-		"METHOD":                     true,
-		"MIN":                        true,
-		"MINUTE":                     true,
-		"MINVALUE":                   true,
-		"MOD":                        true,
-		"MODE":                       true,
-		"MODIFIES":                   true,
-		"MODULE":                     true,
-		"MONTH":                      true,
-		"MORE":                       true,
-		"MOVE":                       true,
-		"MULTISET":                   true,
-		"MUMPS":                      true,
-		"NAME":                       true,
-		"NAMES":                      true,
-		"NAMESPACE":                  true,
-		"NATIONAL":                   true,
-		"NATURAL":                    true,
-		"NCHAR":                      true,
-		"NCLOB":                      true,
-		"NESTING":                    true,
-		"NEW":                        true,
-		"NEXT":                       true,
-		"NFC":                        true,
-		"NFD":                        true,
-		"NFKC":                       true,
-		"NFKD":                       true,
-		"NIL":                        true,
-		"NO":                         true,
-		"NONE":                       true,
-		"NORMALIZE":                  true,
-		"NORMALIZED":                 true,
-		"NOT":                        true,
-		"NOTHING":                    true,
-		"NOTIFY":                     true,
-		"NOTNULL":                    true,
-		"NOWAIT":                     true,
-		"NTH_VALUE":                  true,
-		"NTILE":                      true,
-		"NULL":                       true,
-		"NULLABLE":                   true,
-		"NULLIF":                     true,
-		"NULLS":                      true,
-		"NUMBER":                     true,
-		"NUMERIC":                    true,
-		"OBJECT":                     true,
-		"OCCURRENCES_REGEX":          true,
-		"OCTETS":                     true,
-		"OCTET_LENGTH":               true,
-		"OF":                         true,
-		"OFF":                        true,
-		"OFFSET":                     true,
-		"OIDS":                       true,
-		"OLD":                        true,
-		"ON":                         true,
-		"ONLY":                       true,
-		"OPEN":                       true,
-		"OPERATOR":                   true,
-		"OPTION":                     true,
-		"OPTIONS":                    true,
-		"OR":                         true,
-		"ORDER":                      true,
-		"ORDERING":                   true,
-		"ORDINALITY":                 true,
-		"OTHERS":                     true,
-		"OUT":                        true,
-		"OUTER":                      true,
-		"OUTPUT":                     true,
-		"OVER":                       true,
-		"OVERLAPS":                   true,
-		"OVERLAY":                    true,
-		"OVERRIDING":                 true,
-		"OWNED":                      true,
-		"OWNER":                      true,
-		"P":                          true,
-		"PAD":                        true,
-		"PARAMETER":                  true,
-		"PARAMETER_MODE":             true,
-		"PARAMETER_NAME":             true,
-		"PARAMETER_ORDINAL_POSITION": true,
-		"PARAMETER_SPECIFIC_CATALOG": true,
-		"PARAMETER_SPECIFIC_NAME":    true,
-		"PARAMETER_SPECIFIC_SCHEMA":  true,
-		"PARSER":                     true,
-		"PARTIAL":                    true,
-		"PARTITION":                  true,
-		"PASCAL":                     true,
-		"PASSING":                    true,
-		"PASSTHROUGH":                true,
-		"PASSWORD":                   true,
-		"PATH":                       true,
-		"PERCENT":                    true,
-		"PERCENTILE_CONT":            true,
-		"PERCENTILE_DISC":            true,
-		"PERCENT_RANK":               true,
-		"PERIOD":                     true,
-		"PERMISSION":                 true,
-		"PLACING":                    true,
-		"PLANS":                      true,
-		"PLI":                        true,
-		"PORTION":                    true,
-		"POSITION":                   true,
-		"POSITION_REGEX":             true,
-		"POWER":                      true,
-		"PRECEDES":                   true,
-		"PRECEDING":                  true,
-		"PRECISION":                  true,
-		"PREPARE":                    true,
-		"PREPARED":                   true,
-		"PRESERVE":                   true,
-		"PRIMARY":                    true,
-		"PRIOR":                      true,
-		"PRIVILEGES":                 true,
-		"PROCEDURAL":                 true,
-		"PROCEDURE":                  true,
-		"PROGRAM":                    true,
-		"PUBLIC":                     true,
-		"QUOTE":                      true,
-		"RANGE":                      true,
-		"RANK":                       true,
-		"READ":                       true,
-		"READS":                      true,
-		"REAL":                       true,
-		"REASSIGN":                   true,
-		"RECHECK":                    true,
-		"RECOVERY":                   true,
-		"RECURSIVE":                  true,
-		"REF":                        true,
-		"REFERENCES":                 true,
-		"REFERENCING":                true,
-		"REFRESH":                    true,
-		"REGR_AVGX":                  true,
-		"REGR_AVGY":                  true,
-		"REGR_COUNT":                 true,
-		"REGR_INTERCEPT":             true,
-		"REGR_R2":                    true,
-		"REGR_SLOPE":                 true,
-		"REGR_SXX":                   true,
-		"REGR_SXY":                   true,
-		"REGR_SYY":                   true,
-		"REINDEX":                    true,
-		"RELATIVE":                   true,
-		"RELEASE":                    true,
-		"RENAME":                     true,
-		"REPEATABLE":                 true,
-		"REPLACE":                    true,
-		"REPLICA":                    true,
-		"REQUIRING":                  true,
-		"RESET":                      true,
-		"RESPECT":                    true,
-		"RESTART":                    true,
-		"RESTORE":                    true,
-		"RESTRICT":                   true,
-		"RESULT":                     true,
-		"RETURN":                     true,
-		"RETURNED_CARDINALITY":       true,
-		"RETURNED_LENGTH":            true,
-		"RETURNED_OCTET_LENGTH":      true,
-		"RETURNED_SQLSTATE":          true,
-		"RETURNING":                  true,
-		"RETURNS":                    true,
-		"REVOKE":                     true,
-		"RIGHT":                      true,
-		"ROLE":                       true,
-		"ROLLBACK":                   true,
-		"ROLLUP":                     true,
-		"ROUTINE":                    true,
-		"ROUTINE_CATALOG":            true,
-		"ROUTINE_NAME":               true,
-		"ROUTINE_SCHEMA":             true,
-		"ROW":                        true,
-		"ROWS":                       true,
-		"ROW_COUNT":                  true,
-		"ROW_NUMBER":                 true,
-		"RULE":                       true,
-		"SAVEPOINT":                  true,
-		"SCALE":                      true,
-		"SCHEMA":                     true,
-		"SCHEMA_NAME":                true,
-		"SCOPE":                      true,
-		"SCOPE_CATALOG":              true,
-		"SCOPE_NAME":                 true,
-		"SCOPE_SCHEMA":               true,
-		"SCROLL":                     true,
-		"SEARCH":                     true,
-		"SECOND":                     true,
-		"SECTION":                    true,
-		"SECURITY":                   true,
-		"SELECT":                     true,
-		"SELECTIVE":                  true,
-		"SELF":                       true,
-		"SENSITIVE":                  true,
-		"SEQUENCE":                   true,
-		"SEQUENCES":                  true,
-		"SERIALIZABLE":               true,
-		"SERVER":                     true,
-		"SERVER_NAME":                true,
-		"SESSION":                    true,
-		"SESSION_USER":               true,
-		"SET":                        true,
-		"SETOF":                      true,
-		"SETS":                       true,
-		"SHARE":                      true,
-		"SHOW":                       true,
-		"SIMILAR":                    true,
-		"SIMPLE":                     true,
-		"SIZE":                       true,
-		"SMALLINT":                   true,
-		"SNAPSHOT":                   true,
-		"SOME":                       true,
-		"SOURCE":                     true,
-		"SPACE":                      true,
-		"SPECIFIC":                   true,
-		"SPECIFICTYPE":               true,
-		"SPECIFIC_NAME":              true,
-		"SQL":                        true,
-		"SQLCODE":                    true,
-		"SQLERROR":                   true,
-		"SQLEXCEPTION":               true,
-		"SQLSTATE":                   true,
-		"SQLWARNING":                 true,
-		"SQRT":                       true,
-		"STABLE":                     true,
-		"STANDALONE":                 true,
-		"START":                      true,
-		"STATE":                      true,
-		"STATEMENT":                  true,
-		"STATIC":                     true,
-		"STATISTICS":                 true,
-		"STDDEV_POP":                 true,
-		"STDDEV_SAMP":                true,
-		"STDIN":                      true,
-		"STDOUT":                     true,
-		"STORAGE":                    true,
-		"STRICT":                     true,
-		"STRIP":                      true,
-		"STRUCTURE":                  true,
-		"STYLE":                      true,
-		"SUBCLASS_ORIGIN":            true,
-		"SUBMULTISET":                true,
-		"SUBSTRING":                  true,
-		"SUBSTRING_REGEX":            true,
-		"SUCCEEDS":                   true,
-		"SUM":                        true,
-		"SYMMETRIC":                  true,
-		"SYSID":                      true,
-		"SYSTEM":                     true,
-		"SYSTEM_TIME":                true,
-		"SYSTEM_USER":                true,
-		"T":                          true,
-		"TABLE":                      true,
-		"TABLES":                     true,
-		"TABLESAMPLE":                true,
-		"TABLESPACE":                 true,
-		"TABLE_NAME":                 true,
-		"TEMP":                       true,
-		"TEMPLATE":                   true,
-		"TEMPORARY":                  true,
-		"TEXT":                       true,
-		"THEN":                       true,
-		"TIES":                       true,
-		"TIME":                       true,
-		"TIMESTAMP":                  true,
-		"TIMEZONE_HOUR":              true,
-		"TIMEZONE_MINUTE":            true,
-		"TO":                         true,
-		"TOKEN":                      true,
-		"TOP_LEVEL_COUNT":            true,
-		"TRAILING":                   true,
-		"TRANSACTION":                true,
-		"TRANSACTIONS_COMMITTED":     true,
-		"TRANSACTIONS_ROLLED_BACK":   true,
-		"TRANSACTION_ACTIVE":         true,
-		"TRANSFORM":                  true,
-		"TRANSFORMS":                 true,
-		"TRANSLATE":                  true,
-		"TRANSLATE_REGEX":            true,
-		"TRANSLATION":                true,
-		"TREAT":                      true,
-		"TRIGGER":                    true,
-		"TRIGGER_CATALOG":            true,
-		"TRIGGER_NAME":               true,
-		"TRIGGER_SCHEMA":             true,
-		"TRIM":                       true,
-		"TRIM_ARRAY":                 true,
-		"TRUE":                       true,
-		"TRUNCATE":                   true,
-		"TRUSTED":                    true,
-		"TYPE":                       true,
-		"TYPES":                      true,
-		"UESCAPE":                    true,
-		"UNBOUNDED":                  true,
-		"UNCOMMITTED":                true,
-		"UNDER":                      true,
-		"UNENCRYPTED":                true,
-		"UNION":                      true,
-		"UNIQUE":                     true,
-		"UNKNOWN":                    true,
-		"UNLINK":                     true,
-		"UNLISTEN":                   true,
-		"UNLOGGED":                   true,
-		"UNNAMED":                    true,
-		"UNNEST":                     true,
-		"UNTIL":                      true,
-		"UNTYPED":                    true,
-		"UPDATE":                     true,
-		"UPPER":                      true,
-		"URI":                        true,
-		"USAGE":                      true,
-		"USER":                       true,
-		"USER_DEFINED_TYPE_CATALOG": true,
-		"USER_DEFINED_TYPE_CODE":    true,
-		"USER_DEFINED_TYPE_NAME":    true,
-		"USER_DEFINED_TYPE_SCHEMA":  true,
-		"USING":                     true,
-		"VACUUM":                    true,
-		"VALID":                     true,
-		"VALIDATE":                  true,
-		"VALIDATOR":                 true,
-		"VALUE":                     true,
-		"VALUES":                    true,
-		"VALUE_OF":                  true,
-		"VARBINARY":                 true,
-		"VARCHAR":                   true,
-		"VARIADIC":                  true,
-		"VARYING":                   true,
-		"VAR_POP":                   true,
-		"VAR_SAMP":                  true,
-		"VERBOSE":                   true,
-		"VERSION":                   true,
-		"VERSIONING":                true,
-		"VIEW":                      true,
-		"VOLATILE":                  true,
-		"WHEN":                      true,
-		"WHENEVER":                  true,
-		"WHERE":                     true,
-		"WHITESPACE":                true,
-		"WIDTH_BUCKET":              true,
-		"WINDOW":                    true,
-		"WITH":                      true,
-		"WITHIN":                    true,
-		"WITHOUT":                   true,
-		"WORK":                      true,
-		"WRAPPER":                   true,
-		"WRITE":                     true,
-		"XML":                       true,
-		"XMLAGG":                    true,
-		"XMLATTRIBUTES":             true,
-		"XMLBINARY":                 true,
-		"XMLCAST":                   true,
-		"XMLCOMMENT":                true,
-		"XMLCONCAT":                 true,
-		"XMLDECLARATION":            true,
-		"XMLDOCUMENT":               true,
-		"XMLELEMENT":                true,
-		"XMLEXISTS":                 true,
-		"XMLFOREST":                 true,
-		"XMLITERATE":                true,
-		"XMLNAMESPACES":             true,
-		"XMLPARSE":                  true,
-		"XMLPI":                     true,
-		"XMLQUERY":                  true,
-		"XMLROOT":                   true,
-		"XMLSCHEMA":                 true,
-		"XMLSERIALIZE":              true,
-		"XMLTABLE":                  true,
-		"XMLTEXT":                   true,
-		"XMLVALIDATE":               true,
-		"YEAR":                      true,
-		"YES":                       true,
-		"ZONE":                      true,
-	}
-
-	// DefaultPostgresSchema default postgres schema
-	DefaultPostgresSchema = "public"
-)
-
-const postgresPublicSchema = "public"
-
-type postgres struct {
-	core.Base
-}
-
-func (db *postgres) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error {
-	err := db.Base.Init(d, db, uri, drivername, dataSourceName)
-	if err != nil {
-		return err
-	}
-	if db.Schema == "" {
-		db.Schema = DefaultPostgresSchema
-	}
-	return nil
-}
-
-func (db *postgres) SqlType(c *core.Column) string {
-	var res string
-	switch t := c.SQLType.Name; t {
-	case core.TinyInt:
-		res = core.SmallInt
-		return res
-	case core.Bit:
-		res = core.Boolean
-		return res
-	case core.MediumInt, core.Int, core.Integer:
-		if c.IsAutoIncrement {
-			return core.Serial
-		}
-		return core.Integer
-	case core.BigInt:
-		if c.IsAutoIncrement {
-			return core.BigSerial
-		}
-		return core.BigInt
-	case core.Serial, core.BigSerial:
-		c.IsAutoIncrement = true
-		c.Nullable = false
-		res = t
-	case core.Binary, core.VarBinary:
-		return core.Bytea
-	case core.DateTime:
-		res = core.TimeStamp
-	case core.TimeStampz:
-		return "timestamp with time zone"
-	case core.Float:
-		res = core.Real
-	case core.TinyText, core.MediumText, core.LongText:
-		res = core.Text
-	case core.NVarchar:
-		res = core.Varchar
-	case core.Uuid:
-		return core.Uuid
-	case core.Blob, core.TinyBlob, core.MediumBlob, core.LongBlob:
-		return core.Bytea
-	case core.Double:
-		return "DOUBLE PRECISION"
-	default:
-		if c.IsAutoIncrement {
-			return core.Serial
-		}
-		res = t
-	}
-
-	if strings.EqualFold(res, "bool") {
-		// for bool, we don't need length information
-		return res
-	}
-	hasLen1 := (c.Length > 0)
-	hasLen2 := (c.Length2 > 0)
-
-	if hasLen2 {
-		res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")"
-	} else if hasLen1 {
-		res += "(" + strconv.Itoa(c.Length) + ")"
-	}
-	return res
-}
-
-func (db *postgres) SupportInsertMany() bool {
-	return true
-}
-
-func (db *postgres) IsReserved(name string) bool {
-	_, ok := postgresReservedWords[name]
-	return ok
-}
-
-func (db *postgres) Quote(name string) string {
-	name = strings.Replace(name, ".", `"."`, -1)
-	return "\"" + name + "\""
-}
-
-func (db *postgres) QuoteStr() string {
-	return "\""
-}
-
-func (db *postgres) AutoIncrStr() string {
-	return ""
-}
-
-func (db *postgres) SupportEngine() bool {
-	return false
-}
-
-func (db *postgres) SupportCharset() bool {
-	return false
-}
-
-func (db *postgres) IndexOnTable() bool {
-	return false
-}
-
-func (db *postgres) IndexCheckSql(tableName, idxName string) (string, []interface{}) {
-	if len(db.Schema) == 0 {
-		args := []interface{}{tableName, idxName}
-		return `SELECT indexname FROM pg_indexes WHERE tablename = ? AND indexname = ?`, args
-	}
-
-	args := []interface{}{db.Schema, tableName, idxName}
-	return `SELECT indexname FROM pg_indexes ` +
-		`WHERE schemaname = ? AND tablename = ? AND indexname = ?`, args
-}
-
-func (db *postgres) TableCheckSql(tableName string) (string, []interface{}) {
-	if len(db.Schema) == 0 {
-		args := []interface{}{tableName}
-		return `SELECT tablename FROM pg_tables WHERE tablename = ?`, args
-	}
-
-	args := []interface{}{db.Schema, tableName}
-	return `SELECT tablename FROM pg_tables WHERE schemaname = ? AND tablename = ?`, args
-}
-
-func (db *postgres) ModifyColumnSql(tableName string, col *core.Column) string {
-	if len(db.Schema) == 0 {
-		return fmt.Sprintf("alter table %s ALTER COLUMN %s TYPE %s",
-			tableName, col.Name, db.SqlType(col))
-	}
-	return fmt.Sprintf("alter table %s.%s ALTER COLUMN %s TYPE %s",
-		db.Schema, tableName, col.Name, db.SqlType(col))
-}
-
-func (db *postgres) DropIndexSql(tableName string, index *core.Index) string {
-	quote := db.Quote
-	idxName := index.Name
-
-	tableName = strings.Replace(tableName, `"`, "", -1)
-	tableName = strings.Replace(tableName, `.`, "_", -1)
-
-	if !strings.HasPrefix(idxName, "UQE_") &&
-		!strings.HasPrefix(idxName, "IDX_") {
-		if index.Type == core.UniqueType {
-			idxName = fmt.Sprintf("UQE_%v_%v", tableName, index.Name)
-		} else {
-			idxName = fmt.Sprintf("IDX_%v_%v", tableName, index.Name)
-		}
-	}
-	if db.Uri.Schema != "" {
-		idxName = db.Uri.Schema + "." + idxName
-	}
-	return fmt.Sprintf("DROP INDEX %v", quote(idxName))
-}
-
-func (db *postgres) IsColumnExist(tableName, colName string) (bool, error) {
-	args := []interface{}{db.Schema, tableName, colName}
-	query := "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = $1 AND table_name = $2" +
-		" AND column_name = $3"
-	if len(db.Schema) == 0 {
-		args = []interface{}{tableName, colName}
-		query = "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = $1" +
-			" AND column_name = $2"
-	}
-	db.LogSQL(query, args)
-
-	rows, err := db.DB().Query(query, args...)
-	if err != nil {
-		return false, err
-	}
-	defer rows.Close()
-
-	return rows.Next(), nil
-}
-
-func (db *postgres) GetColumns(tableName string) ([]string, map[string]*core.Column, error) {
-	args := []interface{}{tableName}
-	s := `SELECT column_name, column_default, is_nullable, data_type, character_maximum_length, numeric_precision, numeric_precision_radix ,
-    CASE WHEN p.contype = 'p' THEN true ELSE false END AS primarykey,
-    CASE WHEN p.contype = 'u' THEN true ELSE false END AS uniquekey
-FROM pg_attribute f
-    JOIN pg_class c ON c.oid = f.attrelid JOIN pg_type t ON t.oid = f.atttypid
-    LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum
-    LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
-    LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
-    LEFT JOIN pg_class AS g ON p.confrelid = g.oid
-    LEFT JOIN INFORMATION_SCHEMA.COLUMNS s ON s.column_name=f.attname AND c.relname=s.table_name
-WHERE c.relkind = 'r'::char AND c.relname = $1%s AND f.attnum > 0 ORDER BY f.attnum;`
-
-	var f string
-	if len(db.Schema) != 0 {
-		args = append(args, db.Schema)
-		f = " AND s.table_schema = $2"
-	}
-	s = fmt.Sprintf(s, f)
-
-	db.LogSQL(s, args)
-
-	rows, err := db.DB().Query(s, args...)
-	if err != nil {
-		return nil, nil, err
-	}
-	defer rows.Close()
-
-	cols := make(map[string]*core.Column)
-	colSeq := make([]string, 0)
-
-	for rows.Next() {
-		col := new(core.Column)
-		col.Indexes = make(map[string]int)
-
-		var colName, isNullable, dataType string
-		var maxLenStr, colDefault, numPrecision, numRadix *string
-		var isPK, isUnique bool
-		err = rows.Scan(&colName, &colDefault, &isNullable, &dataType, &maxLenStr, &numPrecision, &numRadix, &isPK, &isUnique)
-		if err != nil {
-			return nil, nil, err
-		}
-
-		//fmt.Println(args, colName, isNullable, dataType, maxLenStr, colDefault, numPrecision, numRadix, isPK, isUnique)
-		var maxLen int
-		if maxLenStr != nil {
-			maxLen, err = strconv.Atoi(*maxLenStr)
-			if err != nil {
-				return nil, nil, err
-			}
-		}
-
-		col.Name = strings.Trim(colName, `" `)
-
-		if colDefault != nil || isPK {
-			if isPK {
-				col.IsPrimaryKey = true
-			} else {
-				col.Default = *colDefault
-			}
-		}
-
-		if colDefault != nil && strings.HasPrefix(*colDefault, "nextval(") {
-			col.IsAutoIncrement = true
-		}
-
-		col.Nullable = (isNullable == "YES")
-
-		switch dataType {
-		case "character varying", "character":
-			col.SQLType = core.SQLType{Name: core.Varchar, DefaultLength: 0, DefaultLength2: 0}
-		case "timestamp without time zone":
-			col.SQLType = core.SQLType{Name: core.DateTime, DefaultLength: 0, DefaultLength2: 0}
-		case "timestamp with time zone":
-			col.SQLType = core.SQLType{Name: core.TimeStampz, DefaultLength: 0, DefaultLength2: 0}
-		case "double precision":
-			col.SQLType = core.SQLType{Name: core.Double, DefaultLength: 0, DefaultLength2: 0}
-		case "boolean":
-			col.SQLType = core.SQLType{Name: core.Bool, DefaultLength: 0, DefaultLength2: 0}
-		case "time without time zone":
-			col.SQLType = core.SQLType{Name: core.Time, DefaultLength: 0, DefaultLength2: 0}
-		case "oid":
-			col.SQLType = core.SQLType{Name: core.BigInt, DefaultLength: 0, DefaultLength2: 0}
-		default:
-			col.SQLType = core.SQLType{Name: strings.ToUpper(dataType), DefaultLength: 0, DefaultLength2: 0}
-		}
-		if _, ok := core.SqlTypes[col.SQLType.Name]; !ok {
-			return nil, nil, fmt.Errorf("Unknown colType: %v", dataType)
-		}
-
-		col.Length = maxLen
-
-		if col.SQLType.IsText() || col.SQLType.IsTime() {
-			if col.Default != "" {
-				col.Default = "'" + col.Default + "'"
-			} else {
-				if col.DefaultIsEmpty {
-					col.Default = "''"
-				}
-			}
-		}
-		cols[col.Name] = col
-		colSeq = append(colSeq, col.Name)
-	}
-
-	return colSeq, cols, nil
-}
-
-func (db *postgres) GetTables() ([]*core.Table, error) {
-	args := []interface{}{}
-	s := "SELECT tablename FROM pg_tables"
-	if len(db.Schema) != 0 {
-		args = append(args, db.Schema)
-		s = s + " WHERE schemaname = $1"
-	}
-
-	db.LogSQL(s, args)
-
-	rows, err := db.DB().Query(s, args...)
-	if err != nil {
-		return nil, err
-	}
-	defer rows.Close()
-
-	tables := make([]*core.Table, 0)
-	for rows.Next() {
-		table := core.NewEmptyTable()
-		var name string
-		err = rows.Scan(&name)
-		if err != nil {
-			return nil, err
-		}
-		table.Name = name
-		tables = append(tables, table)
-	}
-	return tables, nil
-}
-
-
-func getIndexColName(indexdef string) []string {
-	var colNames []string
-
-	cs := strings.Split(indexdef, "(")
-	for _, v := range strings.Split(strings.Split(cs[1], ")")[0], ",") {
-		colNames = append(colNames, strings.Split(strings.TrimLeft(v, " "), " ")[0])
-	}
-
-	return colNames
-}
-
-
-func (db *postgres) GetIndexes(tableName string) (map[string]*core.Index, error) {
-	args := []interface{}{tableName}
-	s := fmt.Sprintf("SELECT indexname, indexdef FROM pg_indexes WHERE tablename=$1")
-	if len(db.Schema) != 0 {
-		args = append(args, db.Schema)
-		s = s + " AND schemaname=$2"
-	}
-	db.LogSQL(s, args)
-
-	rows, err := db.DB().Query(s, args...)
-	if err != nil {
-		return nil, err
-	}
-	defer rows.Close()
-
-	indexes := make(map[string]*core.Index, 0)
-	for rows.Next() {
-		var indexType int
-		var indexName, indexdef string
-		var colNames []string
-		err = rows.Scan(&indexName, &indexdef)
-		if err != nil {
-			return nil, err
-		}
-		indexName = strings.Trim(indexName, `" `)
-		if strings.HasSuffix(indexName, "_pkey") {
-			continue
-		}
-		if strings.HasPrefix(indexdef, "CREATE UNIQUE INDEX") {
-			indexType = core.UniqueType
-		} else {
-			indexType = core.IndexType
-		}
-		colNames = getIndexColName(indexdef)
-		var isRegular bool
-		if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) {
-			newIdxName := indexName[5+len(tableName):]
-			isRegular = true
-			if newIdxName != "" {
-				indexName = newIdxName
-			}
-		}
-
-		index := &core.Index{Name: indexName, Type: indexType, Cols: make([]string, 0)}
-		for _, colName := range colNames {
-			index.Cols = append(index.Cols, strings.Trim(colName, `" `))
-		}
-		index.IsRegular = isRegular
-		indexes[index.Name] = index
-	}
-	return indexes, nil
-}
-
-func (db *postgres) Filters() []core.Filter {
-	return []core.Filter{&core.IdFilter{}, &core.QuoteFilter{}, &core.SeqFilter{Prefix: "$", Start: 1}}
-}
-
-type pqDriver struct {
-}
-
-type values map[string]string
-
-func (vs values) Set(k, v string) {
-	vs[k] = v
-}
-
-func (vs values) Get(k string) (v string) {
-	return vs[k]
-}
-
-func parseURL(connstr string) (string, error) {
-	u, err := url.Parse(connstr)
-	if err != nil {
-		return "", err
-	}
-
-	if u.Scheme != "postgresql" && u.Scheme != "postgres" {
-		return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme)
-	}
-
-	escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`)
-
-	if u.Path != "" {
-		return escaper.Replace(u.Path[1:]), nil
-	}
-
-	return "", nil
-}
-
-func parseOpts(name string, o values) error {
-	if len(name) == 0 {
-		return fmt.Errorf("invalid options: %s", name)
-	}
-
-	name = strings.TrimSpace(name)
-
-	ps := strings.Split(name, " ")
-	for _, p := range ps {
-		kv := strings.Split(p, "=")
-		if len(kv) < 2 {
-			return fmt.Errorf("invalid option: %q", p)
-		}
-		o.Set(kv[0], kv[1])
-	}
-
-	return nil
-}
-
-func (p *pqDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) {
-	db := &core.Uri{DbType: core.POSTGRES}
-	var err error
-
-	if strings.HasPrefix(dataSourceName, "postgresql://") || strings.HasPrefix(dataSourceName, "postgres://") {
-		db.DbName, err = parseURL(dataSourceName)
-		if err != nil {
-			return nil, err
-		}
-	} else {
-		o := make(values)
-		err = parseOpts(dataSourceName, o)
-		if err != nil {
-			return nil, err
-		}
-
-		db.DbName = o.Get("dbname")
-	}
-
-	if db.DbName == "" {
-		return nil, errors.New("dbname is empty")
-	}
-
-	return db, nil
-}
-
-type pqDriverPgx struct {
-	pqDriver
-}
-
-func (pgx *pqDriverPgx) Parse(driverName, dataSourceName string) (*core.Uri, error) {
-	// Remove the leading characters for driver to work
-	if len(dataSourceName) >= 9 && dataSourceName[0] == 0 {
-		dataSourceName = dataSourceName[9:]
-	}
-	return pgx.pqDriver.Parse(driverName, dataSourceName)
-}

+ 0 - 19
vendor/gitea.com/xorm/xorm/go.mod

@@ -1,19 +0,0 @@
-module github.com/go-xorm/xorm
-
-require (
-	github.com/cockroachdb/apd v1.1.0 // indirect
-	github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4
-	github.com/go-sql-driver/mysql v1.4.1
-	github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
-	github.com/jackc/pgx v3.3.0+incompatible
-	github.com/kr/pretty v0.1.0 // indirect
-	github.com/lib/pq v1.0.0
-	github.com/mattn/go-sqlite3 v1.10.0
-	github.com/pkg/errors v0.8.1 // indirect
-	github.com/satori/go.uuid v1.2.0 // indirect
-	github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
-	github.com/stretchr/testify v1.3.0
-	github.com/ziutek/mymysql v1.5.4
-	xorm.io/builder v0.3.5
-	xorm.io/core v0.6.3
-)

+ 0 - 2
vendor/github.com/G-Node/gig/README.md

@@ -1,2 +0,0 @@
-# gig
-gig  is (some) Git in Go

+ 3 - 0
vendor/github.com/G-Node/git-module/.gitignore

@@ -0,0 +1,3 @@
+.idea
+*.sublime-project
+*.sublime-workspace

+ 18 - 0
vendor/github.com/G-Node/git-module/.travis.yml

@@ -0,0 +1,18 @@
+sudo: false
+language: go
+go:
+  - 1.4.x
+  - 1.5.x
+  - 1.6.x
+  - 1.7.x
+  - 1.8.x
+  - 1.9.x
+  - 1.10.x
+  - 1.11.x
+  - 1.12.x
+  - 1.13.x
+
+script: 
+  - go get golang.org/x/tools/cmd/cover
+  - go get github.com/smartystreets/goconvey
+  - go test -v -cover -race

+ 4 - 2
vendor/github.com/G-Node/git-module/README.md

@@ -1,11 +1,13 @@
-# Git Module [![Build Status](https://travis-ci.org/gogs/git-module.svg?branch=master)](https://travis-ci.org/gogs/git-module)
+# Git Module 
+
+[![Build Status](https://img.shields.io/travis/gogs/git-module/master.svg?style=for-the-badge&logo=travis)](https://travis-ci.org/gogs/git-module) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/gogs/git-module)
 
 Package git-module is a Go module for Git access through shell commands.
 
 ## Limitations
 
 - Go version must be at least **1.4**.
-- Git version must be no less than **1.7.1**, and greater than or equal to **1.8.3** is recommended.
+- Git version must be no less than **1.8.3**.
 - For Windows users, try use as new a version as possible.
 
 ## License

+ 1 - 13
vendor/github.com/G-Node/git-module/commit.go

@@ -13,8 +13,6 @@ import (
 	"net/http"
 	"strconv"
 	"strings"
-
-	"github.com/mcuadros/go-version"
 )
 
 // Commit represents a git commit.
@@ -140,14 +138,7 @@ func CommitChanges(repoPath string, opts CommitChangesOptions) error {
 
 func commitsCount(repoPath, revision, relpath string) (int64, error) {
 	var cmd *Command
-	isFallback := false
-	if version.Compare(gitVersion, "1.8.0", "<") {
-		isFallback = true
-		cmd = NewCommand("log", "--pretty=format:''")
-	} else {
-		cmd = NewCommand("rev-list", "--count")
-	}
-	cmd.AddArguments(revision)
+	cmd = NewCommand("rev-list", "--count").AddArguments(revision)
 	if len(relpath) > 0 {
 		cmd.AddArguments("--", relpath)
 	}
@@ -157,9 +148,6 @@ func commitsCount(repoPath, revision, relpath string) (int64, error) {
 		return 0, err
 	}
 
-	if isFallback {
-		return int64(strings.Count(stdout, "\n")) + 1, nil
-	}
 	return strconv.ParseInt(strings.TrimSpace(stdout), 10, 64)
 }
 

+ 9 - 9
vendor/github.com/G-Node/git-module/commit_archive.go

@@ -10,28 +10,28 @@ import (
 	"path/filepath"
 	"strings"
 
-	"github.com/G-Node/gogs/pkg/setting"
 	"github.com/G-Node/libgin/libgin"
 )
 
 type ArchiveType int
 
 const (
-	ZIP ArchiveType = iota + 1
-	TARGZ
-	GIN
+	ArchiveZip ArchiveType = iota + 1
+	ArchiveTarGz
+	ArchiveGIN
 )
 
 func (c *Commit) CreateArchive(target string, archiveType ArchiveType, cloneL string) error {
 	var format string
 	switch archiveType {
-	case ZIP:
+	case ArchiveZip:
 		format = "zip"
-	case TARGZ:
+	case ArchiveTarGz:
 		format = "tar.gz"
-	case GIN:
-		// TODO: Fix me!
-		to := filepath.Join(setting.Repository.Upload.TempPath, "archives", filepath.Base(strings.TrimSuffix(c.repo.Path, ".git")))
+	case ArchiveGIN:
+		// tmppath := setting.Repository.Upload.TempPath // Circular module dependency
+		tmppath := "/data/tmp/uploads" // live config location
+		to := filepath.Join(tmppath, "archives", filepath.Base(strings.TrimSuffix(c.repo.Path, ".git")))
 		defer os.RemoveAll(to)
 		_, err := NewCommand("clone", c.repo.Path, to).RunTimeout(-1)
 		if err != nil {

+ 77 - 0
vendor/github.com/G-Node/git-module/deprecated.go

@@ -0,0 +1,77 @@
+// Copyright 2019 The Gogs Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package git
+
+const (
+	// DEPRECATED: use ArchiveZip instead
+	ZIP = ArchiveZip
+	// DEPRECATED: use ArchiveTarGz instead
+	TARGZ = ArchiveTarGz
+)
+
+// DEPRECATED: use BranchPrefix instead
+const BRANCH_PREFIX = BranchPrefix
+
+// DEPRECATED: use RemotePrefix instead
+const REMOTE_PREFIX = RemotePrefix
+
+const (
+	// DEPRECATED: use DiffLinePlain instead
+	DIFF_LINE_PLAIN = DiffLinePlain
+	// DEPRECATED: use DiffLineAdd instead
+	DIFF_LINE_ADD = DiffLineAdd
+	// DEPRECATED: use DiffLineDel instead
+	DIFF_LINE_DEL = DiffLineDel
+	// DEPRECATED: use DiffLineSection instead
+	DIFF_LINE_SECTION = DiffLineSection
+)
+
+const (
+	// DEPRECATED: use DiffFileAdd instead
+	DIFF_FILE_ADD = DiffFileAdd
+	// DEPRECATED: use DiffFileChange instead
+	DIFF_FILE_CHANGE = DiffFileChange
+	// DEPRECATED: use DiffFileDel instead
+	DIFF_FILE_DEL = DiffFileDel
+	// DEPRECATED: use DiffFileRename instead
+	DIFF_FILE_RENAME = DiffFileRename
+)
+
+const (
+	// DEPRECATED: use RawDiffNormal instead
+	RAW_DIFF_NORMAL = RawDiffNormal
+	// DEPRECATED: use RawDiffPatch instead
+	RAW_DIFF_PATCH = RawDiffPatch
+)
+
+const (
+	// DEPRECATED: use ObjectCommit instead
+	OBJECT_COMMIT = ObjectCommit
+	// DEPRECATED: use ObjectTree instead
+	OBJECT_TREE = ObjectTree
+	// DEPRECATED: use ObjectBlob instead
+	OBJECT_BLOB = ObjectBlob
+	// DEPRECATED: use ObjectTag instead
+	OBJECT_TAG = ObjectTag
+)
+
+// DEPRECATED: use TagPrefix instead
+const TAG_PREFIX = TagPrefix
+
+// DEPRECATED: use EmptySHA instead
+const EMPTY_SHA = EmptySHA
+
+const (
+	// DEPRECATED: use EntryBlob instead
+	ENTRY_MODE_BLOB = EntryBlob
+	// DEPRECATED: use EntryExec instead
+	ENTRY_MODE_EXEC = EntryExec
+	// DEPRECATED: use EntrySymlink instead
+	ENTRY_MODE_SYMLINK = EntrySymlink
+	// DEPRECATED: use EntryCommit instead
+	ENTRY_MODE_COMMIT = EntryCommit
+	// DEPRECATED: use EntryTree instead
+	ENTRY_MODE_TREE = EntryTree
+)

+ 0 - 6
vendor/github.com/G-Node/git-module/git.go

@@ -10,12 +10,6 @@ import (
 	"time"
 )
 
-const _VERSION = "0.7.1"
-
-func Version() string {
-	return _VERSION
-}
-
 var (
 	// Debug enables verbose logging on everything.
 	// This should be false in case Gogs starts in SSH mode.

+ 9 - 0
vendor/github.com/G-Node/git-module/go.mod

@@ -0,0 +1,9 @@
+module github.com/G-Node/git-module
+
+go 1.12
+
+require (
+	github.com/G-Node/libgin v0.3.0
+	github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2
+	github.com/smartystreets/goconvey v1.6.4
+)

+ 16 - 0
vendor/github.com/G-Node/git-module/go.sum

@@ -0,0 +1,16 @@
+github.com/G-Node/gig v0.0.0-20171025133355-6d784b40b534/go.mod h1:H+82VbQUp9AzlbEiZl3bri3mlPlN2q6CxlIh0CyJWnc=
+github.com/G-Node/git-module v0.8.4-0.20191218161021-3fd4d7aaf932/go.mod h1:VYqGwMiaOacy+pch//bmTW0HnfVxZA6VnFSvzOOwpX0=
+github.com/G-Node/libgin v0.0.0-20191216094436-47f8aadc0067/go.mod h1:2yLXQnNbwjH8mslxnzU8Kb+d7c2Zqo8DIgR6Pgp7lCg=
+github.com/G-Node/libgin v0.3.0/go.mod h1:VjulCBq7k/kgf4Eabk2f4w9SDNowWhLnK+yZvy5Nppk=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2/go.mod h1:76rfSfYPWj01Z85hUf/ituArm797mNKcvINh1OlsZKo=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=

+ 31 - 0
vendor/github.com/G-Node/git-module/remote.go

@@ -0,0 +1,31 @@
+// Copyright 2019 The Gogs Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package git
+
+import "strings"
+
+// RemoveRemote removes a remote from given repository path if exists.
+func RemoveRemote(repoPath, remote string) error {
+	_, err := NewCommand("remote", "rm", remote).RunInDir(repoPath)
+	if err != nil && !strings.Contains(err.Error(), "fatal: No such remote") {
+		return err
+	}
+	return nil
+}
+
+// AddRemoteOptions contains options to add a remote address.
+type AddRemoteOptions struct {
+	Mirror bool
+}
+
+// AddRemote adds a new remote
+func AddRemote(repoPath, remote, addr string, opts AddRemoteOptions) error {
+	cmd := NewCommand("remote", "add", remote)
+	if opts.Mirror {
+		cmd.AddArguments("--mirror")
+	}
+	_, err := cmd.AddArguments(addr).RunInDir(repoPath)
+	return err
+}

+ 31 - 27
vendor/github.com/G-Node/git-module/repo.go

@@ -11,10 +11,9 @@ import (
 	"os"
 	"path"
 	"path/filepath"
+	"strconv"
 	"strings"
 	"time"
-
-	"github.com/unknwon/com"
 )
 
 // Repository represents a Git repository.
@@ -25,7 +24,7 @@ type Repository struct {
 	tagCache    *objectCache
 }
 
-const _PRETTY_LOG_FORMAT = `--pretty=format:%H`
+const prettyLogFormat = `--pretty=format:%H`
 
 func (repo *Repository) parsePrettyFormatLogToList(logs []byte) (*list.List, error) {
 	l := list.New()
@@ -241,16 +240,21 @@ type CountObject struct {
 }
 
 const (
-	_STAT_COUNT          = "count: "
-	_STAT_SIZE           = "size: "
-	_STAT_IN_PACK        = "in-pack: "
-	_STAT_PACKS          = "packs: "
-	_STAT_SIZE_PACK      = "size-pack: "
-	_STAT_PRUNE_PACKABLE = "prune-packable: "
-	_STAT_GARBAGE        = "garbage: "
-	_STAT_SIZE_GARBAGE   = "size-garbage: "
+	statCount         = "count: "
+	statSize          = "size: "
+	statInPack        = "in-pack: "
+	statPacks         = "packs: "
+	statSizePack      = "size-pack: "
+	statPrunePackable = "prune-packable: "
+	statGarbage       = "garbage: "
+	statSizeGarbage   = "size-garbage: "
 )
 
+func strToInt64(s string) int64 {
+	i, _ := strconv.ParseInt(s, 10, 64)
+	return i
+}
+
 // GetRepoSize returns disk usage report of repository in given path.
 func GetRepoSize(repoPath string) (*CountObject, error) {
 	cmd := NewCommand("count-objects", "-v")
@@ -262,22 +266,22 @@ func GetRepoSize(repoPath string) (*CountObject, error) {
 	countObject := new(CountObject)
 	for _, line := range strings.Split(stdout, "\n") {
 		switch {
-		case strings.HasPrefix(line, _STAT_COUNT):
-			countObject.Count = com.StrTo(line[7:]).MustInt64()
-		case strings.HasPrefix(line, _STAT_SIZE):
-			countObject.Size = com.StrTo(line[6:]).MustInt64() * 1024
-		case strings.HasPrefix(line, _STAT_IN_PACK):
-			countObject.InPack = com.StrTo(line[9:]).MustInt64()
-		case strings.HasPrefix(line, _STAT_PACKS):
-			countObject.Packs = com.StrTo(line[7:]).MustInt64()
-		case strings.HasPrefix(line, _STAT_SIZE_PACK):
-			countObject.SizePack = com.StrTo(line[11:]).MustInt64() * 1024
-		case strings.HasPrefix(line, _STAT_PRUNE_PACKABLE):
-			countObject.PrunePackable = com.StrTo(line[16:]).MustInt64()
-		case strings.HasPrefix(line, _STAT_GARBAGE):
-			countObject.Garbage = com.StrTo(line[9:]).MustInt64()
-		case strings.HasPrefix(line, _STAT_SIZE_GARBAGE):
-			countObject.SizeGarbage = com.StrTo(line[14:]).MustInt64() * 1024
+		case strings.HasPrefix(line, statCount):
+			countObject.Count = strToInt64(line[7:])
+		case strings.HasPrefix(line, statSize):
+			countObject.Size = strToInt64(line[6:]) * 1024
+		case strings.HasPrefix(line, statInPack):
+			countObject.InPack = strToInt64(line[9:])
+		case strings.HasPrefix(line, statPacks):
+			countObject.Packs = strToInt64(line[7:])
+		case strings.HasPrefix(line, statSizePack):
+			countObject.SizePack = strToInt64(line[11:]) * 1024
+		case strings.HasPrefix(line, statPrunePackable):
+			countObject.PrunePackable = strToInt64(line[16:])
+		case strings.HasPrefix(line, statGarbage):
+			countObject.Garbage = strToInt64(line[9:])
+		case strings.HasPrefix(line, statSizeGarbage):
+			countObject.SizeGarbage = strToInt64(line[14:]) * 1024
 		}
 	}
 

+ 6 - 12
vendor/github.com/G-Node/git-module/repo_branch.go

@@ -7,11 +7,9 @@ package git
 import (
 	"fmt"
 	"strings"
-
-	"github.com/mcuadros/go-version"
 )
 
-const BRANCH_PREFIX = "refs/heads/"
+const BranchPrefix = "refs/heads/"
 
 // IsReferenceExist returns true if given reference exists in the repository.
 func IsReferenceExist(repoPath, name string) bool {
@@ -21,7 +19,7 @@ func IsReferenceExist(repoPath, name string) bool {
 
 // IsBranchExist returns true if given branch exists in the repository.
 func IsBranchExist(repoPath, name string) bool {
-	return IsReferenceExist(repoPath, BRANCH_PREFIX+name)
+	return IsReferenceExist(repoPath, BranchPrefix+name)
 }
 
 func (repo *Repository) IsBranchExist(name string) bool {
@@ -42,23 +40,19 @@ func (repo *Repository) GetHEADBranch() (*Branch, error) {
 	}
 	stdout = strings.TrimSpace(stdout)
 
-	if !strings.HasPrefix(stdout, BRANCH_PREFIX) {
+	if !strings.HasPrefix(stdout, BranchPrefix) {
 		return nil, fmt.Errorf("invalid HEAD branch: %v", stdout)
 	}
 
 	return &Branch{
-		Name: stdout[len(BRANCH_PREFIX):],
+		Name: stdout[len(BranchPrefix):],
 		Path: stdout,
 	}, nil
 }
 
 // SetDefaultBranch sets default branch of repository.
 func (repo *Repository) SetDefaultBranch(name string) error {
-	if version.Compare(gitVersion, "1.7.10", "<") {
-		return ErrUnsupportedVersion{"1.7.10"}
-	}
-
-	_, err := NewCommand("symbolic-ref", "HEAD", BRANCH_PREFIX+name).RunInDir(repo.Path)
+	_, err := NewCommand("symbolic-ref", "HEAD", BranchPrefix+name).RunInDir(repo.Path)
 	return err
 }
 
@@ -76,7 +70,7 @@ func (repo *Repository) GetBranches() ([]string, error) {
 		if len(fields) != 2 {
 			continue // NOTE: I should believe git will not give me wrong string.
 		}
-		branches[i] = strings.TrimPrefix(fields[1], BRANCH_PREFIX)
+		branches[i] = strings.TrimPrefix(fields[1], BranchPrefix)
 	}
 	return branches, nil
 }

+ 14 - 42
vendor/github.com/G-Node/git-module/repo_commit.go

@@ -11,11 +11,9 @@ import (
 	"strconv"
 	"strings"
 	"time"
-
-	"github.com/mcuadros/go-version"
 )
 
-const REMOTE_PREFIX = "refs/remotes/"
+const RemotePrefix = "refs/remotes/"
 
 // getRefCommitID returns the last commit ID string of given reference (branch or tag).
 func (repo *Repository) getRefCommitID(name string) (string, error) {
@@ -31,17 +29,17 @@ func (repo *Repository) getRefCommitID(name string) (string, error) {
 
 // GetBranchCommitID returns last commit ID string of given branch.
 func (repo *Repository) GetBranchCommitID(name string) (string, error) {
-	return repo.getRefCommitID(BRANCH_PREFIX + name)
+	return repo.getRefCommitID(BranchPrefix + name)
 }
 
 // GetTagCommitID returns last commit ID string of given tag.
 func (repo *Repository) GetTagCommitID(name string) (string, error) {
-	return repo.getRefCommitID(TAG_PREFIX + name)
+	return repo.getRefCommitID(TagPrefix + name)
 }
 
 // GetRemoteBranchCommitID returns last commit ID string of given remote branch.
 func (repo *Repository) GetRemoteBranchCommitID(name string) (string, error) {
-	return repo.getRefCommitID(REMOTE_PREFIX + name)
+	return repo.getRefCommitID(RemotePrefix + name)
 }
 
 // parseCommitData parses commit information from the (uncompressed) raw
@@ -172,7 +170,7 @@ func (repo *Repository) getCommitByPathWithID(id sha1, relpath string) (*Commit,
 		relpath = `\` + relpath
 	}
 
-	stdout, err := NewCommand("log", "-1", _PRETTY_LOG_FORMAT, id.String(), "--", relpath).RunInDir(repo.Path)
+	stdout, err := NewCommand("log", "-1", prettyLogFormat, id.String(), "--", relpath).RunInDir(repo.Path)
 	if err != nil {
 		return nil, err
 	}
@@ -187,7 +185,7 @@ func (repo *Repository) getCommitByPathWithID(id sha1, relpath string) (*Commit,
 
 // GetCommitByPath returns the last commit of relative path.
 func (repo *Repository) GetCommitByPath(relpath string) (*Commit, error) {
-	stdout, err := NewCommand("log", "-1", _PRETTY_LOG_FORMAT, "--", relpath).RunInDirBytes(repo.Path)
+	stdout, err := NewCommand("log", "-1", prettyLogFormat, "--", relpath).RunInDirBytes(repo.Path)
 	if err != nil {
 		return nil, err
 	}
@@ -201,7 +199,7 @@ func (repo *Repository) GetCommitByPath(relpath string) (*Commit, error) {
 
 func (repo *Repository) CommitsByRangeSize(revision string, page, size int) (*list.List, error) {
 	stdout, err := NewCommand("log", revision, "--skip="+strconv.Itoa((page-1)*size),
-		"--max-count="+strconv.Itoa(size), _PRETTY_LOG_FORMAT).RunInDirBytes(repo.Path)
+		"--max-count="+strconv.Itoa(size), prettyLogFormat).RunInDirBytes(repo.Path)
 	if err != nil {
 		return nil, err
 	}
@@ -215,7 +213,7 @@ func (repo *Repository) CommitsByRange(revision string, page int) (*list.List, e
 }
 
 func (repo *Repository) searchCommits(id sha1, keyword string) (*list.List, error) {
-	stdout, err := NewCommand("log", id.String(), "-100", "-i", "--grep="+keyword, _PRETTY_LOG_FORMAT).RunInDirBytes(repo.Path)
+	stdout, err := NewCommand("log", id.String(), "-100", "-i", "--grep="+keyword, prettyLogFormat).RunInDirBytes(repo.Path)
 	if err != nil {
 		return nil, err
 	}
@@ -236,7 +234,7 @@ func (repo *Repository) FileCommitsCount(revision, file string) (int64, error) {
 
 func (repo *Repository) CommitsByFileAndRangeSize(revision, file string, page, size int) (*list.List, error) {
 	stdout, err := NewCommand("log", revision, "--skip="+strconv.Itoa((page-1)*size),
-		"--max-count="+strconv.Itoa(size), _PRETTY_LOG_FORMAT, "--", file).RunInDirBytes(repo.Path)
+		"--max-count="+strconv.Itoa(size), prettyLogFormat, "--", file).RunInDirBytes(repo.Path)
 	if err != nil {
 		return nil, err
 	}
@@ -257,37 +255,11 @@ func (repo *Repository) FilesCountBetween(startCommitID, endCommitID string) (in
 
 // CommitsBetween returns a list that contains commits between [last, before).
 func (repo *Repository) CommitsBetween(last *Commit, before *Commit) (*list.List, error) {
-	if version.Compare(gitVersion, "1.8.0", ">=") {
-		stdout, err := NewCommand("rev-list", before.ID.String()+"..."+last.ID.String()).RunInDirBytes(repo.Path)
-		if err != nil {
-			return nil, err
-		}
-		return repo.parsePrettyFormatLogToList(bytes.TrimSpace(stdout))
-	}
-
-	// Fallback to stupid solution, which iterates all commits of the repository
-	// if before is not an ancestor of last.
-	l := list.New()
-	if last == nil || last.ParentCount() == 0 {
-		return l, nil
-	}
-
-	var err error
-	cur := last
-	for {
-		if cur.ID.Equal(before.ID) {
-			break
-		}
-		l.PushBack(cur)
-		if cur.ParentCount() == 0 {
-			break
-		}
-		cur, err = cur.Parent(0)
-		if err != nil {
-			return nil, err
-		}
+	stdout, err := NewCommand("rev-list", before.ID.String()+"..."+last.ID.String()).RunInDirBytes(repo.Path)
+	if err != nil {
+		return nil, err
 	}
-	return l, nil
+	return repo.parsePrettyFormatLogToList(bytes.TrimSpace(stdout))
 }
 
 func (repo *Repository) CommitsBetweenIDs(last, before string) (*list.List, error) {
@@ -379,7 +351,7 @@ func (repo *Repository) getCommitsBeforeLimit(id sha1, num int) (*list.List, err
 // CommitsAfterDate returns a list of commits which committed after given date.
 // The format of date should be in RFC3339.
 func (repo *Repository) CommitsAfterDate(date string) (*list.List, error) {
-	stdout, err := NewCommand("log", _PRETTY_LOG_FORMAT, "--since="+date).RunInDirBytes(repo.Path)
+	stdout, err := NewCommand("log", prettyLogFormat, "--since="+date).RunInDirBytes(repo.Path)
 	if err != nil {
 		return nil, err
 	}

+ 24 - 24
vendor/github.com/G-Node/git-module/repo_diff.go

@@ -19,20 +19,20 @@ import (
 type DiffLineType uint8
 
 const (
-	DIFF_LINE_PLAIN DiffLineType = iota + 1
-	DIFF_LINE_ADD
-	DIFF_LINE_DEL
-	DIFF_LINE_SECTION
+	DiffLinePlain DiffLineType = iota + 1
+	DiffLineAdd
+	DiffLineDel
+	DiffLineSection
 )
 
 // DiffFileType represents the file status in diff.
 type DiffFileType uint8
 
 const (
-	DIFF_FILE_ADD DiffFileType = iota + 1
-	DIFF_FILE_CHANGE
-	DIFF_FILE_DEL
-	DIFF_FILE_RENAME
+	DiffFileAdd DiffFileType = iota + 1
+	DiffFileChange
+	DiffFileDel
+	DiffFileRename
 )
 
 // DiffLine represents a line in diff.
@@ -65,9 +65,9 @@ func (diffSection *DiffSection) Line(lineType DiffLineType, idx int) *DiffLine {
 LOOP:
 	for _, diffLine := range diffSection.Lines {
 		switch diffLine.Type {
-		case DIFF_LINE_ADD:
+		case DiffLineAdd:
 			addCount++
-		case DIFF_LINE_DEL:
+		case DiffLineDel:
 			delCount++
 		default:
 			if matchDiffLine != nil {
@@ -79,11 +79,11 @@ LOOP:
 		}
 
 		switch lineType {
-		case DIFF_LINE_DEL:
+		case DiffLineDel:
 			if diffLine.RightIdx == 0 && diffLine.LeftIdx == idx-difference {
 				matchDiffLine = diffLine
 			}
-		case DIFF_LINE_ADD:
+		case DiffLineAdd:
 			if diffLine.LeftIdx == 0 && diffLine.RightIdx == idx+difference {
 				matchDiffLine = diffLine
 			}
@@ -180,7 +180,7 @@ func ParsePatch(done chan<- error, maxLines, maxLineCharacteres, maxFiles int, r
 
 		switch {
 		case line[0] == ' ':
-			diffLine := &DiffLine{Type: DIFF_LINE_PLAIN, Content: line, LeftIdx: leftLine, RightIdx: rightLine}
+			diffLine := &DiffLine{Type: DiffLinePlain, Content: line, LeftIdx: leftLine, RightIdx: rightLine}
 			leftLine++
 			rightLine++
 			curSection.Lines = append(curSection.Lines, diffLine)
@@ -189,7 +189,7 @@ func ParsePatch(done chan<- error, maxLines, maxLineCharacteres, maxFiles int, r
 			curSection = &DiffSection{}
 			curFile.Sections = append(curFile.Sections, curSection)
 			ss := strings.Split(line, "@@")
-			diffLine := &DiffLine{Type: DIFF_LINE_SECTION, Content: line}
+			diffLine := &DiffLine{Type: DiffLineSection, Content: line}
 			curSection.Lines = append(curSection.Lines, diffLine)
 
 			// Parse line number.
@@ -204,14 +204,14 @@ func ParsePatch(done chan<- error, maxLines, maxLineCharacteres, maxFiles int, r
 		case line[0] == '+':
 			curFile.Addition++
 			diff.TotalAddition++
-			diffLine := &DiffLine{Type: DIFF_LINE_ADD, Content: line, RightIdx: rightLine}
+			diffLine := &DiffLine{Type: DiffLineAdd, Content: line, RightIdx: rightLine}
 			rightLine++
 			curSection.Lines = append(curSection.Lines, diffLine)
 			continue
 		case line[0] == '-':
 			curFile.Deletion++
 			diff.TotalDeletion++
-			diffLine := &DiffLine{Type: DIFF_LINE_DEL, Content: line, LeftIdx: leftLine}
+			diffLine := &DiffLine{Type: DiffLineDel, Content: line, LeftIdx: leftLine}
 			if leftLine > 0 {
 				leftLine++
 			}
@@ -244,7 +244,7 @@ func ParsePatch(done chan<- error, maxLines, maxLineCharacteres, maxFiles int, r
 
 			curFile = &DiffFile{
 				Name:     a,
-				Type:     DIFF_FILE_CHANGE,
+				Type:     DiffFileChange,
 				Sections: make([]*DiffSection, 0, 10),
 			}
 			diff.Files = append(diff.Files, curFile)
@@ -270,11 +270,11 @@ func ParsePatch(done chan<- error, maxLines, maxLineCharacteres, maxFiles int, r
 
 				switch {
 				case strings.HasPrefix(line, "new file"):
-					curFile.Type = DIFF_FILE_ADD
+					curFile.Type = DiffFileAdd
 					curFile.IsCreated = true
 					curFile.IsSubmodule = strings.HasSuffix(line, " 160000\n")
 				case strings.HasPrefix(line, "deleted"):
-					curFile.Type = DIFF_FILE_DEL
+					curFile.Type = DiffFileDel
 					curFile.IsDeleted = true
 					curFile.IsSubmodule = strings.HasSuffix(line, " 160000\n")
 				case strings.HasPrefix(line, "index"):
@@ -287,7 +287,7 @@ func ParsePatch(done chan<- error, maxLines, maxLineCharacteres, maxFiles int, r
 					}
 					break CHECK_TYPE
 				case strings.HasPrefix(line, "similarity index 100%"):
-					curFile.Type = DIFF_FILE_RENAME
+					curFile.Type = DiffFileRename
 					curFile.IsRenamed = true
 					curFile.OldName = curFile.Name
 					curFile.Name = b
@@ -350,8 +350,8 @@ func GetDiffRange(repoPath, beforeCommitID, afterCommitID string, maxLines, maxL
 type RawDiffType string
 
 const (
-	RAW_DIFF_NORMAL RawDiffType = "diff"
-	RAW_DIFF_PATCH  RawDiffType = "patch"
+	RawDiffNormal RawDiffType = "diff"
+	RawDiffPatch  RawDiffType = "patch"
 )
 
 // GetRawDiff dumps diff results of repository in given commit ID to io.Writer.
@@ -368,14 +368,14 @@ func GetRawDiff(repoPath, commitID string, diffType RawDiffType, writer io.Write
 
 	cmd := NewCommand()
 	switch diffType {
-	case RAW_DIFF_NORMAL:
+	case RawDiffNormal:
 		if commit.ParentCount() == 0 {
 			cmd.AddArguments("show", commitID)
 		} else {
 			c, _ := commit.Parent(0)
 			cmd.AddArguments("diff", "-M", c.ID.String(), commitID)
 		}
-	case RAW_DIFF_PATCH:
+	case RawDiffPatch:
 		if commit.ParentCount() == 0 {
 			cmd.AddArguments("format-patch", "--no-signature", "--stdout", "--root", commitID)
 		} else {

+ 4 - 4
vendor/github.com/G-Node/git-module/repo_object.go

@@ -7,8 +7,8 @@ package git
 type ObjectType string
 
 const (
-	OBJECT_COMMIT ObjectType = "commit"
-	OBJECT_TREE   ObjectType = "tree"
-	OBJECT_BLOB   ObjectType = "blob"
-	OBJECT_TAG    ObjectType = "tag"
+	ObjectCommit ObjectType = "commit"
+	ObjectTree   ObjectType = "tree"
+	ObjectBlob   ObjectType = "blob"
+	ObjectTag    ObjectType = "tag"
 )

+ 1 - 1
vendor/github.com/G-Node/git-module/repo_pull.go

@@ -56,7 +56,7 @@ func (repo *Repository) GetPullRequestInfo(basePath, baseBranch, headBranch stri
 		return nil, err
 	}
 
-	logs, err := NewCommand("log", prInfo.MergeBase+"..."+headBranch, _PRETTY_LOG_FORMAT).RunInDirBytes(repo.Path)
+	logs, err := NewCommand("log", prInfo.MergeBase+"..."+headBranch, prettyLogFormat).RunInDirBytes(repo.Path)
 	if err != nil {
 		return nil, err
 	}

+ 8 - 8
vendor/github.com/G-Node/git-module/repo_tag.go

@@ -8,14 +8,14 @@ import (
 	"fmt"
 	"strings"
 
-	"github.com/mcuadros/go-version"
+	goversion "github.com/mcuadros/go-version"
 )
 
-const TAG_PREFIX = "refs/tags/"
+const TagPrefix = "refs/tags/"
 
 // IsTagExist returns true if given tag exists in the repository.
 func IsTagExist(repoPath, name string) bool {
-	return IsReferenceExist(repoPath, TAG_PREFIX+name)
+	return IsReferenceExist(repoPath, TagPrefix+name)
 }
 
 func (repo *Repository) IsTagExist(name string) bool {
@@ -42,11 +42,11 @@ func (repo *Repository) getTag(id sha1) (*Tag, error) {
 	tp = strings.TrimSpace(tp)
 
 	// Tag is a commit.
-	if ObjectType(tp) == OBJECT_COMMIT {
+	if ObjectType(tp) == ObjectCommit {
 		tag := &Tag{
 			ID:     id,
 			Object: id,
-			Type:   string(OBJECT_COMMIT),
+			Type:   string(ObjectCommit),
 			repo:   repo,
 		}
 
@@ -95,7 +95,7 @@ func (repo *Repository) GetTag(name string) (*Tag, error) {
 // GetTags returns all tags of the repository.
 func (repo *Repository) GetTags() ([]string, error) {
 	cmd := NewCommand("tag", "-l")
-	if version.Compare(gitVersion, "2.4.9", ">=") {
+	if goversion.Compare(gitVersion, "2.4.9", ">=") {
 		cmd.AddArguments("--sort=-creatordate")
 	}
 
@@ -107,8 +107,8 @@ func (repo *Repository) GetTags() ([]string, error) {
 	tags := strings.Split(stdout, "\n")
 	tags = tags[:len(tags)-1]
 
-	if version.Compare(gitVersion, "2.4.9", "<") {
-		version.Sort(tags)
+	if goversion.Compare(gitVersion, "2.4.9", "<") {
+		goversion.Sort(tags)
 
 		// Reverse order
 		for i := 0; i < len(tags)/2; i++ {

+ 1 - 1
vendor/github.com/G-Node/git-module/sha1.go

@@ -10,7 +10,7 @@ import (
 	"strings"
 )
 
-const EMPTY_SHA = "0000000000000000000000000000000000000000"
+const EmptySHA = "0000000000000000000000000000000000000000"
 
 type sha1 [20]byte
 

+ 10 - 10
vendor/github.com/G-Node/git-module/tree.go

@@ -61,22 +61,22 @@ func parseTreeData(tree *Tree, data []byte) ([]*TreeEntry, error) {
 		step := 6
 		switch string(data[pos : pos+step]) {
 		case "100644", "100664":
-			entry.mode = ENTRY_MODE_BLOB
-			entry.Type = OBJECT_BLOB
+			entry.mode = EntryBlob
+			entry.Type = ObjectBlob
 		case "100755":
-			entry.mode = ENTRY_MODE_EXEC
-			entry.Type = OBJECT_BLOB
+			entry.mode = EntryExec
+			entry.Type = ObjectBlob
 		case "120000":
-			entry.mode = ENTRY_MODE_SYMLINK
-			entry.Type = OBJECT_BLOB
+			entry.mode = EntrySymlink
+			entry.Type = ObjectBlob
 		case "160000":
-			entry.mode = ENTRY_MODE_COMMIT
-			entry.Type = OBJECT_COMMIT
+			entry.mode = EntryCommit
+			entry.Type = ObjectCommit
 
 			step = 8
 		case "040000":
-			entry.mode = ENTRY_MODE_TREE
-			entry.Type = OBJECT_TREE
+			entry.mode = EntryTree
+			entry.Type = ObjectTree
 		default:
 			return nil, fmt.Errorf("unknown type: %v", string(data[pos:pos+step]))
 		}

+ 2 - 2
vendor/github.com/G-Node/git-module/tree_blob.go

@@ -13,8 +13,8 @@ func (t *Tree) GetTreeEntryByPath(relpath string) (*TreeEntry, error) {
 	if len(relpath) == 0 {
 		return &TreeEntry{
 			ID:   t.ID,
-			Type: OBJECT_TREE,
-			mode: ENTRY_MODE_TREE,
+			Type: ObjectTree,
+			mode: EntryTree,
 		}, nil
 	}
 

+ 9 - 9
vendor/github.com/G-Node/git-module/tree_entry.go

@@ -19,11 +19,11 @@ type EntryMode int
 // There are only a few file modes in Git. They look like unix file modes, but they can only be
 // one of these.
 const (
-	ENTRY_MODE_BLOB    EntryMode = 0100644
-	ENTRY_MODE_EXEC    EntryMode = 0100755
-	ENTRY_MODE_SYMLINK EntryMode = 0120000
-	ENTRY_MODE_COMMIT  EntryMode = 0160000
-	ENTRY_MODE_TREE    EntryMode = 0040000
+	EntryBlob    EntryMode = 0100644
+	EntryExec    EntryMode = 0100755
+	EntrySymlink EntryMode = 0120000
+	EntryCommit  EntryMode = 0160000
+	EntryTree    EntryMode = 0040000
 )
 
 type TreeEntry struct {
@@ -68,15 +68,15 @@ func (te *TreeEntry) Size() int64 {
 }
 
 func (te *TreeEntry) IsSubModule() bool {
-	return te.mode == ENTRY_MODE_COMMIT
+	return te.mode == EntryCommit
 }
 
 func (te *TreeEntry) IsDir() bool {
-	return te.mode == ENTRY_MODE_TREE
+	return te.mode == EntryTree
 }
 
 func (te *TreeEntry) IsLink() bool {
-	return te.mode == ENTRY_MODE_SYMLINK
+	return te.mode == EntrySymlink
 }
 
 func (te *TreeEntry) Blob() *Blob {
@@ -178,7 +178,7 @@ func (tes Entries) GetCommitsInfoWithCustomConcurrency(commit *Commit, treePath
 		// However when taskChan is full, code will block and wait any running goroutines to finish.
 		taskChan <- true
 
-		if tes[i].Type != OBJECT_COMMIT {
+		if tes[i].Type != ObjectCommit {
 			go func(i int) {
 				cinfo := commitInfo{entryName: tes[i].Name()}
 				c, err := commit.GetCommitByPath(filepath.Join(treePath, tes[i].Name()))

+ 4 - 4
vendor/github.com/G-Node/git-module/utils.go

@@ -81,12 +81,12 @@ func filepathFromSHA1(rootdir, sha1 string) string {
 }
 
 func RefEndName(refStr string) string {
-	if strings.HasPrefix(refStr, BRANCH_PREFIX) {
-		return refStr[len(BRANCH_PREFIX):]
+	if strings.HasPrefix(refStr, BranchPrefix) {
+		return refStr[len(BranchPrefix):]
 	}
 
-	if strings.HasPrefix(refStr, TAG_PREFIX) {
-		return refStr[len(TAG_PREFIX):]
+	if strings.HasPrefix(refStr, TagPrefix) {
+		return refStr[len(TagPrefix):]
 	}
 
 	return refStr

+ 0 - 29
vendor/github.com/G-Node/go-annex/LICENSE

@@ -1,29 +0,0 @@
-BSD 3-Clause License
-
-Copyright (c) 2017, German Neuroinformatics Node
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
-  this list of conditions and the following disclaimer in the documentation
-  and/or other materials provided with the distribution.
-
-* Neither the name of the copyright holder nor the names of its
-  contributors may be used to endorse or promote products derived from
-  this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 0 - 5
vendor/github.com/G-Node/go-annex/util.go

@@ -1,5 +0,0 @@
-package gannex
-
-func isAnnexed(dir string) (bool, error) {
-	return false, nil
-}

+ 0 - 65
vendor/github.com/G-Node/godML/odml/odml.go

@@ -1,65 +0,0 @@
-package odml
-
-import (
-	"fmt"
-	"encoding/json"
-)
-
-type Section struct {
-	Name       string        `json:"-" xml:"name"`
-	Type       string        `json:"-" xml:"type"`
-	Properties [] Property   `json:"-" xml:"property"`
-	Text       string        `json:"text"`
-	Sections   [] Section    `json:"-" xml:"section"`
-	Children   [] OdMLObject `json:"children,omitempty"`
-}
-
-type Property struct {
-	Name       string   `json:"-" xml:"name"`
-	Value      []string `json:"-" xml:"value"`
-	Text       string   `json:"text"`
-	Icon       string
-	Definition string   `xml:"definition"`
-}
-
-type OdMLObject struct {
-	Prop    Property
-	Section Section
-	Type    string
-}
-
-type Odml struct {
-	OdmnlSections []Section `json:"children" xml:"section"`
-}
-
-func (u *Property) MarshalJSON() ([]byte, error) {
-	type Alias Property
-	if u.Text == "" {
-		u.Text = fmt.Sprintf("%s: %s (%s)", u.Name, u.Value, u.Definition)
-	}
-	return json.Marshal(Alias(*u))
-}
-
-func (u *Section) MarshalJSON() ([]byte, error) {
-	type Alias Section
-	if u.Text == "" {
-		u.Text = fmt.Sprintf("%s", u.Name)
-	}
-	for _, x := range u.Properties {
-		u.Children = append(u.Children, OdMLObject{Prop: x, Type: "property"})
-	}
-	for _, x := range u.Sections {
-		u.Children = append(u.Children, OdMLObject{Section: x, Type: "section"})
-	}
-	return json.Marshal(Alias(*u))
-}
-
-func (u *OdMLObject) MarshalJSON() ([]byte, error) {
-	if u.Type == "property" {
-		return u.Prop.MarshalJSON()
-	}
-	if u.Type == "section" {
-		return u.Section.MarshalJSON()
-	}
-	return nil, fmt.Errorf("Could not unmarshal odml object")
-}

+ 7 - 1
vendor/github.com/G-Node/libgin/LICENSE

@@ -1,6 +1,12 @@
 BSD 3-Clause License
 
-Copyright (c) 2018, German Neuroinformatics Node
+Copyright (c) 2019, German Neuroinformatics Node
+                    Achilleas Koutsou <koutsou@bio.lmu.de>
+                    Michael Sonntag <sonntag@bio.lmu.de>
+                    Christian Garbers
+                    Christian Kellner <kellner@bio.lmu.de>
+                    Adrian Stoewer
+
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without

+ 2 - 7
vendor/github.com/G-Node/go-annex/add.go → vendor/github.com/G-Node/libgin/libgin/annex/add.go

@@ -1,9 +1,9 @@
-package gannex
+package annex
 
 import (
 	"fmt"
 
-	"github.com/G-Node/git-module"
+	git "github.com/G-Node/git-module"
 )
 
 const (
@@ -19,11 +19,6 @@ func Init(dir string, args ...string) (string, error) {
 	return cmd.AddArguments(args...).RunInDir(dir)
 }
 
-func Upgrade(dir string) (string, error) {
-	cmd := git.NewACommand("upgrade")
-	return cmd.RunInDir(dir)
-}
-
 func Uninit(dir string, args ...string) (string, error) {
 	cmd := git.NewACommand("uninit")
 	return cmd.AddArguments(args...).RunInDir(dir)

+ 4 - 5
vendor/github.com/G-Node/go-annex/file.go → vendor/github.com/G-Node/libgin/libgin/annex/file.go

@@ -1,12 +1,12 @@
-package gannex
+package annex
 
 import (
+	"fmt"
+	"io"
 	"os"
 	"path/filepath"
-	"strings"
-	"io"
-	"fmt"
 	"regexp"
+	"strings"
 )
 
 var (
@@ -14,7 +14,6 @@ var (
 	aFPattern = regexp.MustCompile(`[\\\/]annex[\\\/](.+)`)
 )
 
-
 type AFile struct {
 	Filepath  string
 	OFilename string

+ 12 - 0
vendor/github.com/G-Node/libgin/libgin/annex/util.go

@@ -0,0 +1,12 @@
+package annex
+
+import "github.com/G-Node/git-module"
+
+func isAnnexed(dir string) (bool, error) {
+	return false, nil
+}
+
+func Upgrade(dir string) (string, error) {
+	cmd := git.NewACommand("upgrade")
+	return cmd.RunInDir(dir)
+}

+ 1 - 1
vendor/github.com/G-Node/libgin/libgin/dex.go

@@ -3,7 +3,7 @@ package libgin
 import (
 	"time"
 
-	"github.com/G-Node/gig"
+	"github.com/G-Node/libgin/libgin/gig"
 )
 
 // NOTE: TEMPORARY COPY FROM gin-dex

+ 0 - 0
vendor/github.com/G-Node/gig/delta.go → vendor/github.com/G-Node/libgin/libgin/gig/delta.go


+ 0 - 0
vendor/github.com/G-Node/gig/objects.go → vendor/github.com/G-Node/libgin/libgin/gig/objects.go


+ 0 - 0
vendor/github.com/G-Node/gig/pack.go → vendor/github.com/G-Node/libgin/libgin/gig/pack.go


+ 0 - 0
vendor/github.com/G-Node/gig/parse.go → vendor/github.com/G-Node/libgin/libgin/gig/parse.go


+ 0 - 0
vendor/github.com/G-Node/gig/refs.go → vendor/github.com/G-Node/libgin/libgin/gig/refs.go


+ 0 - 0
vendor/github.com/G-Node/gig/repo.go → vendor/github.com/G-Node/libgin/libgin/gig/repo.go


+ 0 - 0
vendor/github.com/G-Node/gig/util.go → vendor/github.com/G-Node/libgin/libgin/gig/util.go


+ 0 - 0
vendor/github.com/G-Node/gig/walk.go → vendor/github.com/G-Node/libgin/libgin/gig/walk.go


+ 0 - 0
vendor/github.com/G-Node/gig/write.go → vendor/github.com/G-Node/libgin/libgin/gig/write.go


+ 20 - 0
vendor/github.com/beorn7/perks/LICENSE

@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 2388 - 0
vendor/github.com/beorn7/perks/quantile/exampledata.txt

@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4

+ 316 - 0
vendor/github.com/beorn7/perks/quantile/stream.go

@@ -0,0 +1,316 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+	"math"
+	"sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+	Value float64 `json:",string"`
+	Width float64 `json:",string"`
+	Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int           { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+	ƒ := func(s *stream, r float64) float64 {
+		return 2 * epsilon * r
+	}
+	return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+	ƒ := func(s *stream, r float64) float64 {
+		return 2 * epsilon * (s.n - r)
+	}
+	return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targetMap map[float64]float64) *Stream {
+	// Convert map to slice to avoid slow iterations on a map.
+	// ƒ is called on the hot path, so converting the map to a slice
+	// beforehand results in significant CPU savings.
+	targets := targetMapToSlice(targetMap)
+
+	ƒ := func(s *stream, r float64) float64 {
+		var m = math.MaxFloat64
+		var f float64
+		for _, t := range targets {
+			if t.quantile*s.n <= r {
+				f = (2 * t.epsilon * r) / t.quantile
+			} else {
+				f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
+			}
+			if f < m {
+				m = f
+			}
+		}
+		return m
+	}
+	return newStream(ƒ)
+}
+
+type target struct {
+	quantile float64
+	epsilon  float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+	targets := make([]target, 0, len(targetMap))
+
+	for quantile, epsilon := range targetMap {
+		t := target{
+			quantile: quantile,
+			epsilon:  epsilon,
+		}
+		targets = append(targets, t)
+	}
+
+	return targets
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+	*stream
+	b      Samples
+	sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+	x := &stream{ƒ: ƒ}
+	return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+	s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+	s.b = append(s.b, sample)
+	s.sorted = false
+	if len(s.b) == cap(s.b) {
+		s.flush()
+	}
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+	if !s.flushed() {
+		// Fast path when there hasn't been enough data for a flush;
+		// this also yields better accuracy for small sets of data.
+		l := len(s.b)
+		if l == 0 {
+			return 0
+		}
+		i := int(math.Ceil(float64(l) * q))
+		if i > 0 {
+			i -= 1
+		}
+		s.maybeSort()
+		return s.b[i].Value
+	}
+	s.flush()
+	return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+	sort.Sort(samples)
+	s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+	s.stream.reset()
+	s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+	if !s.flushed() {
+		return s.b
+	}
+	s.flush()
+	return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+	return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+	s.maybeSort()
+	s.stream.merge(s.b)
+	s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+	if !s.sorted {
+		s.sorted = true
+		sort.Sort(s.b)
+	}
+}
+
+func (s *Stream) flushed() bool {
+	return len(s.stream.l) > 0
+}
+
+type stream struct {
+	n float64
+	l []Sample
+	ƒ invariant
+}
+
+func (s *stream) reset() {
+	s.l = s.l[:0]
+	s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+	s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+	// TODO(beorn7): This tries to merge not only individual samples, but
+	// whole summaries. The paper doesn't mention merging summaries at
+	// all. Unittests show that the merging is inaccurate. Find out how to
+	// do merges properly.
+	var r float64
+	i := 0
+	for _, sample := range samples {
+		for ; i < len(s.l); i++ {
+			c := s.l[i]
+			if c.Value > sample.Value {
+				// Insert at position i.
+				s.l = append(s.l, Sample{})
+				copy(s.l[i+1:], s.l[i:])
+				s.l[i] = Sample{
+					sample.Value,
+					sample.Width,
+					math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+					// TODO(beorn7): How to calculate delta correctly?
+				}
+				i++
+				goto inserted
+			}
+			r += c.Width
+		}
+		s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+		i++
+	inserted:
+		s.n += sample.Width
+		r += sample.Width
+	}
+	s.compress()
+}
+
+func (s *stream) count() int {
+	return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+	t := math.Ceil(q * s.n)
+	t += math.Ceil(s.ƒ(s, t) / 2)
+	p := s.l[0]
+	var r float64
+	for _, c := range s.l[1:] {
+		r += p.Width
+		if r+c.Width+c.Delta > t {
+			return p.Value
+		}
+		p = c
+	}
+	return p.Value
+}
+
+func (s *stream) compress() {
+	if len(s.l) < 2 {
+		return
+	}
+	x := s.l[len(s.l)-1]
+	xi := len(s.l) - 1
+	r := s.n - 1 - x.Width
+
+	for i := len(s.l) - 2; i >= 0; i-- {
+		c := s.l[i]
+		if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+			x.Width += c.Width
+			s.l[xi] = x
+			// Remove element at i.
+			copy(s.l[i:], s.l[i+1:])
+			s.l = s.l[:len(s.l)-1]
+			xi -= 1
+		} else {
+			x = c
+			xi = i
+		}
+		r -= c.Width
+	}
+}
+
+func (s *stream) samples() Samples {
+	samples := make(Samples, len(s.l))
+	copy(samples, s.l)
+	return samples
+}

+ 1 - 0
vendor/github.com/boombuler/barcode/.gitignore

@@ -0,0 +1 @@
+.vscode/

+ 21 - 0
vendor/github.com/boombuler/barcode/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Florian Sundermann
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 53 - 0
vendor/github.com/boombuler/barcode/README.md

@@ -0,0 +1,53 @@
+[![Join the chat at https://gitter.im/golang-barcode/Lobby](https://badges.gitter.im/golang-barcode/Lobby.svg)](https://gitter.im/golang-barcode/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+## Introduction ##
+
+This is a package for GO which can be used to create different types of barcodes.
+
+## Supported Barcode Types ##
+* 2 of 5
+* Aztec Code
+* Codabar
+* Code 128
+* Code 39
+* Code 93
+* Datamatrix
+* EAN 13
+* EAN 8
+* PDF 417
+* QR Code
+
+## Example ##
+
+This is a simple example on how to create a QR-Code and write it to a png-file
+```go
+package main
+
+import (
+	"image/png"
+	"os"
+
+	"github.com/boombuler/barcode"
+	"github.com/boombuler/barcode/qr"
+)
+
+func main() {
+	// Create the barcode
+	qrCode, _ := qr.Encode("Hello World", qr.M, qr.Auto)
+
+	// Scale the barcode to 200x200 pixels
+	qrCode, _ = barcode.Scale(qrCode, 200, 200)
+
+	// create the output file
+	file, _ := os.Create("qrcode.png")
+	defer file.Close()
+
+	// encode the barcode as png
+	png.Encode(file, qrCode)
+}
+```
+
+## Documentation ##
+See [GoDoc](https://godoc.org/github.com/boombuler/barcode)
+
+To create a barcode use the Encode function from one of the subpackages.

+ 42 - 0
vendor/github.com/boombuler/barcode/barcode.go

@@ -0,0 +1,42 @@
+package barcode
+
+import "image"
+
+const (
+	TypeAztec           = "Aztec"
+	TypeCodabar         = "Codabar"
+	TypeCode128         = "Code 128"
+	TypeCode39          = "Code 39"
+	TypeCode93          = "Code 93"
+	TypeDataMatrix      = "DataMatrix"
+	TypeEAN8            = "EAN 8"
+	TypeEAN13           = "EAN 13"
+	TypePDF             = "PDF417"
+	TypeQR              = "QR Code"
+	Type2of5            = "2 of 5"
+	Type2of5Interleaved = "2 of 5 (interleaved)"
+)
+
+// Contains some meta information about a barcode
+type Metadata struct {
+	// the name of the barcode kind
+	CodeKind string
+	// contains 1 for 1D barcodes or 2 for 2D barcodes
+	Dimensions byte
+}
+
+// a rendered and encoded barcode
+type Barcode interface {
+	image.Image
+	// returns some meta information about the barcode
+	Metadata() Metadata
+	// the data that was encoded in this barcode
+	Content() string
+}
+
+// Additional interface that some barcodes might implement to provide
+// the value of its checksum.
+type BarcodeIntCS interface {
+	Barcode
+	CheckSum() int
+}

+ 1 - 0
vendor/github.com/boombuler/barcode/go.mod

@@ -0,0 +1 @@
+module github.com/boombuler/barcode

+ 66 - 0
vendor/github.com/boombuler/barcode/qr/alphanumeric.go

@@ -0,0 +1,66 @@
+package qr
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/boombuler/barcode/utils"
+)
+
+const charSet string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:"
+
+func stringToAlphaIdx(content string) <-chan int {
+	result := make(chan int)
+	go func() {
+		for _, r := range content {
+			idx := strings.IndexRune(charSet, r)
+			result <- idx
+			if idx < 0 {
+				break
+			}
+		}
+		close(result)
+	}()
+
+	return result
+}
+
+func encodeAlphaNumeric(content string, ecl ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error) {
+
+	contentLenIsOdd := len(content)%2 == 1
+	contentBitCount := (len(content) / 2) * 11
+	if contentLenIsOdd {
+		contentBitCount += 6
+	}
+	vi := findSmallestVersionInfo(ecl, alphaNumericMode, contentBitCount)
+	if vi == nil {
+		return nil, nil, errors.New("To much data to encode")
+	}
+
+	res := new(utils.BitList)
+	res.AddBits(int(alphaNumericMode), 4)
+	res.AddBits(len(content), vi.charCountBits(alphaNumericMode))
+
+	encoder := stringToAlphaIdx(content)
+
+	for idx := 0; idx < len(content)/2; idx++ {
+		c1 := <-encoder
+		c2 := <-encoder
+		if c1 < 0 || c2 < 0 {
+			return nil, nil, fmt.Errorf("\"%s\" can not be encoded as %s", content, AlphaNumeric)
+		}
+		res.AddBits(c1*45+c2, 11)
+	}
+	if contentLenIsOdd {
+		c := <-encoder
+		if c < 0 {
+			return nil, nil, fmt.Errorf("\"%s\" can not be encoded as %s", content, AlphaNumeric)
+		}
+		res.AddBits(c, 6)
+	}
+
+	addPaddingAndTerminator(res, vi)
+
+	return res, vi, nil
+}

+ 23 - 0
vendor/github.com/boombuler/barcode/qr/automatic.go

@@ -0,0 +1,23 @@
+package qr
+
+import (
+	"fmt"
+
+	"github.com/boombuler/barcode/utils"
+)
+
+func encodeAuto(content string, ecl ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error) {
+	bits, vi, _ := Numeric.getEncoder()(content, ecl)
+	if bits != nil && vi != nil {
+		return bits, vi, nil
+	}
+	bits, vi, _ = AlphaNumeric.getEncoder()(content, ecl)
+	if bits != nil && vi != nil {
+		return bits, vi, nil
+	}
+	bits, vi, _ = Unicode.getEncoder()(content, ecl)
+	if bits != nil && vi != nil {
+		return bits, vi, nil
+	}
+	return nil, nil, fmt.Errorf("No encoding found to encode \"%s\"", content)
+}

+ 59 - 0
vendor/github.com/boombuler/barcode/qr/blocks.go

@@ -0,0 +1,59 @@
+package qr
+
+type block struct {
+	data []byte
+	ecc  []byte
+}
+type blockList []*block
+
+func splitToBlocks(data <-chan byte, vi *versionInfo) blockList {
+	result := make(blockList, vi.NumberOfBlocksInGroup1+vi.NumberOfBlocksInGroup2)
+
+	for b := 0; b < int(vi.NumberOfBlocksInGroup1); b++ {
+		blk := new(block)
+		blk.data = make([]byte, vi.DataCodeWordsPerBlockInGroup1)
+		for cw := 0; cw < int(vi.DataCodeWordsPerBlockInGroup1); cw++ {
+			blk.data[cw] = <-data
+		}
+		blk.ecc = ec.calcECC(blk.data, vi.ErrorCorrectionCodewordsPerBlock)
+		result[b] = blk
+	}
+
+	for b := 0; b < int(vi.NumberOfBlocksInGroup2); b++ {
+		blk := new(block)
+		blk.data = make([]byte, vi.DataCodeWordsPerBlockInGroup2)
+		for cw := 0; cw < int(vi.DataCodeWordsPerBlockInGroup2); cw++ {
+			blk.data[cw] = <-data
+		}
+		blk.ecc = ec.calcECC(blk.data, vi.ErrorCorrectionCodewordsPerBlock)
+		result[int(vi.NumberOfBlocksInGroup1)+b] = blk
+	}
+
+	return result
+}
+
+func (bl blockList) interleave(vi *versionInfo) []byte {
+	var maxCodewordCount int
+	if vi.DataCodeWordsPerBlockInGroup1 > vi.DataCodeWordsPerBlockInGroup2 {
+		maxCodewordCount = int(vi.DataCodeWordsPerBlockInGroup1)
+	} else {
+		maxCodewordCount = int(vi.DataCodeWordsPerBlockInGroup2)
+	}
+	resultLen := (vi.DataCodeWordsPerBlockInGroup1+vi.ErrorCorrectionCodewordsPerBlock)*vi.NumberOfBlocksInGroup1 +
+		(vi.DataCodeWordsPerBlockInGroup2+vi.ErrorCorrectionCodewordsPerBlock)*vi.NumberOfBlocksInGroup2
+
+	result := make([]byte, 0, resultLen)
+	for i := 0; i < maxCodewordCount; i++ {
+		for b := 0; b < len(bl); b++ {
+			if len(bl[b].data) > i {
+				result = append(result, bl[b].data[i])
+			}
+		}
+	}
+	for i := 0; i < int(vi.ErrorCorrectionCodewordsPerBlock); i++ {
+		for b := 0; b < len(bl); b++ {
+			result = append(result, bl[b].ecc[i])
+		}
+	}
+	return result
+}

+ 416 - 0
vendor/github.com/boombuler/barcode/qr/encoder.go

@@ -0,0 +1,416 @@
+// Package qr can be used to create QR barcodes.
+package qr
+
+import (
+	"image"
+
+	"github.com/boombuler/barcode"
+	"github.com/boombuler/barcode/utils"
+)
+
+type encodeFn func(content string, eccLevel ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error)
+
+// Encoding mode for QR Codes.
+type Encoding byte
+
+const (
+	// Auto will choose ths best matching encoding
+	Auto Encoding = iota
+	// Numeric encoding only encodes numbers [0-9]
+	Numeric
+	// AlphaNumeric encoding only encodes uppercase letters, numbers and  [Space], $, %, *, +, -, ., /, :
+	AlphaNumeric
+	// Unicode encoding encodes the string as utf-8
+	Unicode
+	// only for testing purpose
+	unknownEncoding
+)
+
+func (e Encoding) getEncoder() encodeFn {
+	switch e {
+	case Auto:
+		return encodeAuto
+	case Numeric:
+		return encodeNumeric
+	case AlphaNumeric:
+		return encodeAlphaNumeric
+	case Unicode:
+		return encodeUnicode
+	}
+	return nil
+}
+
+func (e Encoding) String() string {
+	switch e {
+	case Auto:
+		return "Auto"
+	case Numeric:
+		return "Numeric"
+	case AlphaNumeric:
+		return "AlphaNumeric"
+	case Unicode:
+		return "Unicode"
+	}
+	return ""
+}
+
+// Encode returns a QR barcode with the given content, error correction level and uses the given encoding
+func Encode(content string, level ErrorCorrectionLevel, mode Encoding) (barcode.Barcode, error) {
+	bits, vi, err := mode.getEncoder()(content, level)
+	if err != nil {
+		return nil, err
+	}
+
+	blocks := splitToBlocks(bits.IterateBytes(), vi)
+	data := blocks.interleave(vi)
+	result := render(data, vi)
+	result.content = content
+	return result, nil
+}
+
+func render(data []byte, vi *versionInfo) *qrcode {
+	dim := vi.modulWidth()
+	results := make([]*qrcode, 8)
+	for i := 0; i < 8; i++ {
+		results[i] = newBarcode(dim)
+	}
+
+	occupied := newBarcode(dim)
+
+	setAll := func(x int, y int, val bool) {
+		occupied.Set(x, y, true)
+		for i := 0; i < 8; i++ {
+			results[i].Set(x, y, val)
+		}
+	}
+
+	drawFinderPatterns(vi, setAll)
+	drawAlignmentPatterns(occupied, vi, setAll)
+
+	//Timing Pattern:
+	var i int
+	for i = 0; i < dim; i++ {
+		if !occupied.Get(i, 6) {
+			setAll(i, 6, i%2 == 0)
+		}
+		if !occupied.Get(6, i) {
+			setAll(6, i, i%2 == 0)
+		}
+	}
+	// Dark Module
+	setAll(8, dim-8, true)
+
+	drawVersionInfo(vi, setAll)
+	drawFormatInfo(vi, -1, occupied.Set)
+	for i := 0; i < 8; i++ {
+		drawFormatInfo(vi, i, results[i].Set)
+	}
+
+	// Write the data
+	var curBitNo int
+
+	for pos := range iterateModules(occupied) {
+		var curBit bool
+		if curBitNo < len(data)*8 {
+			curBit = ((data[curBitNo/8] >> uint(7-(curBitNo%8))) & 1) == 1
+		} else {
+			curBit = false
+		}
+
+		for i := 0; i < 8; i++ {
+			setMasked(pos.X, pos.Y, curBit, i, results[i].Set)
+		}
+		curBitNo++
+	}
+
+	lowestPenalty := ^uint(0)
+	lowestPenaltyIdx := -1
+	for i := 0; i < 8; i++ {
+		p := results[i].calcPenalty()
+		if p < lowestPenalty {
+			lowestPenalty = p
+			lowestPenaltyIdx = i
+		}
+	}
+	return results[lowestPenaltyIdx]
+}
+
+func setMasked(x, y int, val bool, mask int, set func(int, int, bool)) {
+	switch mask {
+	case 0:
+		val = val != (((y + x) % 2) == 0)
+		break
+	case 1:
+		val = val != ((y % 2) == 0)
+		break
+	case 2:
+		val = val != ((x % 3) == 0)
+		break
+	case 3:
+		val = val != (((y + x) % 3) == 0)
+		break
+	case 4:
+		val = val != (((y/2 + x/3) % 2) == 0)
+		break
+	case 5:
+		val = val != (((y*x)%2)+((y*x)%3) == 0)
+		break
+	case 6:
+		val = val != ((((y*x)%2)+((y*x)%3))%2 == 0)
+		break
+	case 7:
+		val = val != ((((y+x)%2)+((y*x)%3))%2 == 0)
+	}
+	set(x, y, val)
+}
+
+func iterateModules(occupied *qrcode) <-chan image.Point {
+	result := make(chan image.Point)
+	allPoints := make(chan image.Point)
+	go func() {
+		curX := occupied.dimension - 1
+		curY := occupied.dimension - 1
+		isUpward := true
+
+		for true {
+			if isUpward {
+				allPoints <- image.Pt(curX, curY)
+				allPoints <- image.Pt(curX-1, curY)
+				curY--
+				if curY < 0 {
+					curY = 0
+					curX -= 2
+					if curX == 6 {
+						curX--
+					}
+					if curX < 0 {
+						break
+					}
+					isUpward = false
+				}
+			} else {
+				allPoints <- image.Pt(curX, curY)
+				allPoints <- image.Pt(curX-1, curY)
+				curY++
+				if curY >= occupied.dimension {
+					curY = occupied.dimension - 1
+					curX -= 2
+					if curX == 6 {
+						curX--
+					}
+					isUpward = true
+					if curX < 0 {
+						break
+					}
+				}
+			}
+		}
+
+		close(allPoints)
+	}()
+	go func() {
+		for pt := range allPoints {
+			if !occupied.Get(pt.X, pt.Y) {
+				result <- pt
+			}
+		}
+		close(result)
+	}()
+	return result
+}
+
+func drawFinderPatterns(vi *versionInfo, set func(int, int, bool)) {
+	dim := vi.modulWidth()
+	drawPattern := func(xoff int, yoff int) {
+		for x := -1; x < 8; x++ {
+			for y := -1; y < 8; y++ {
+				val := (x == 0 || x == 6 || y == 0 || y == 6 || (x > 1 && x < 5 && y > 1 && y < 5)) && (x <= 6 && y <= 6 && x >= 0 && y >= 0)
+
+				if x+xoff >= 0 && x+xoff < dim && y+yoff >= 0 && y+yoff < dim {
+					set(x+xoff, y+yoff, val)
+				}
+			}
+		}
+	}
+	drawPattern(0, 0)
+	drawPattern(0, dim-7)
+	drawPattern(dim-7, 0)
+}
+
+func drawAlignmentPatterns(occupied *qrcode, vi *versionInfo, set func(int, int, bool)) {
+	drawPattern := func(xoff int, yoff int) {
+		for x := -2; x <= 2; x++ {
+			for y := -2; y <= 2; y++ {
+				val := x == -2 || x == 2 || y == -2 || y == 2 || (x == 0 && y == 0)
+				set(x+xoff, y+yoff, val)
+			}
+		}
+	}
+	positions := vi.alignmentPatternPlacements()
+
+	for _, x := range positions {
+		for _, y := range positions {
+			if occupied.Get(x, y) {
+				continue
+			}
+			drawPattern(x, y)
+		}
+	}
+}
+
+var formatInfos = map[ErrorCorrectionLevel]map[int][]bool{
+	L: {
+		0: []bool{true, true, true, false, true, true, true, true, true, false, false, false, true, false, false},
+		1: []bool{true, true, true, false, false, true, false, true, true, true, true, false, false, true, true},
+		2: []bool{true, true, true, true, true, false, true, true, false, true, false, true, false, true, false},
+		3: []bool{true, true, true, true, false, false, false, true, false, false, true, true, true, false, true},
+		4: []bool{true, true, false, false, true, true, false, false, false, true, false, true, true, true, true},
+		5: []bool{true, true, false, false, false, true, true, false, false, false, true, true, false, false, false},
+		6: []bool{true, true, false, true, true, false, false, false, true, false, false, false, false, false, true},
+		7: []bool{true, true, false, true, false, false, true, false, true, true, true, false, true, true, false},
+	},
+	M: {
+		0: []bool{true, false, true, false, true, false, false, false, false, false, true, false, false, true, false},
+		1: []bool{true, false, true, false, false, false, true, false, false, true, false, false, true, false, true},
+		2: []bool{true, false, true, true, true, true, false, false, true, true, true, true, true, false, false},
+		3: []bool{true, false, true, true, false, true, true, false, true, false, false, true, false, true, true},
+		4: []bool{true, false, false, false, true, false, true, true, true, true, true, true, false, false, true},
+		5: []bool{true, false, false, false, false, false, false, true, true, false, false, true, true, true, false},
+		6: []bool{true, false, false, true, true, true, true, true, false, false, true, false, true, true, true},
+		7: []bool{true, false, false, true, false, true, false, true, false, true, false, false, false, false, false},
+	},
+	Q: {
+		0: []bool{false, true, true, false, true, false, true, false, true, false, true, true, true, true, true},
+		1: []bool{false, true, true, false, false, false, false, false, true, true, false, true, false, false, false},
+		2: []bool{false, true, true, true, true, true, true, false, false, true, true, false, false, false, true},
+		3: []bool{false, true, true, true, false, true, false, false, false, false, false, false, true, true, false},
+		4: []bool{false, true, false, false, true, false, false, true, false, true, true, false, true, false, false},
+		5: []bool{false, true, false, false, false, false, true, true, false, false, false, false, false, true, true},
+		6: []bool{false, true, false, true, true, true, false, true, true, false, true, true, false, true, false},
+		7: []bool{false, true, false, true, false, true, true, true, true, true, false, true, true, false, true},
+	},
+	H: {
+		0: []bool{false, false, true, false, true, true, false, true, false, false, false, true, false, false, true},
+		1: []bool{false, false, true, false, false, true, true, true, false, true, true, true, true, true, false},
+		2: []bool{false, false, true, true, true, false, false, true, true, true, false, false, true, true, true},
+		3: []bool{false, false, true, true, false, false, true, true, true, false, true, false, false, false, false},
+		4: []bool{false, false, false, false, true, true, true, false, true, true, false, false, false, true, false},
+		5: []bool{false, false, false, false, false, true, false, false, true, false, true, false, true, false, true},
+		6: []bool{false, false, false, true, true, false, true, false, false, false, false, true, true, false, false},
+		7: []bool{false, false, false, true, false, false, false, false, false, true, true, true, false, true, true},
+	},
+}
+
+func drawFormatInfo(vi *versionInfo, usedMask int, set func(int, int, bool)) {
+	var formatInfo []bool
+
+	if usedMask == -1 {
+		formatInfo = []bool{true, true, true, true, true, true, true, true, true, true, true, true, true, true, true} // Set all to true cause -1 --> occupied mask.
+	} else {
+		formatInfo = formatInfos[vi.Level][usedMask]
+	}
+
+	if len(formatInfo) == 15 {
+		dim := vi.modulWidth()
+		set(0, 8, formatInfo[0])
+		set(1, 8, formatInfo[1])
+		set(2, 8, formatInfo[2])
+		set(3, 8, formatInfo[3])
+		set(4, 8, formatInfo[4])
+		set(5, 8, formatInfo[5])
+		set(7, 8, formatInfo[6])
+		set(8, 8, formatInfo[7])
+		set(8, 7, formatInfo[8])
+		set(8, 5, formatInfo[9])
+		set(8, 4, formatInfo[10])
+		set(8, 3, formatInfo[11])
+		set(8, 2, formatInfo[12])
+		set(8, 1, formatInfo[13])
+		set(8, 0, formatInfo[14])
+
+		set(8, dim-1, formatInfo[0])
+		set(8, dim-2, formatInfo[1])
+		set(8, dim-3, formatInfo[2])
+		set(8, dim-4, formatInfo[3])
+		set(8, dim-5, formatInfo[4])
+		set(8, dim-6, formatInfo[5])
+		set(8, dim-7, formatInfo[6])
+		set(dim-8, 8, formatInfo[7])
+		set(dim-7, 8, formatInfo[8])
+		set(dim-6, 8, formatInfo[9])
+		set(dim-5, 8, formatInfo[10])
+		set(dim-4, 8, formatInfo[11])
+		set(dim-3, 8, formatInfo[12])
+		set(dim-2, 8, formatInfo[13])
+		set(dim-1, 8, formatInfo[14])
+	}
+}
+
+var versionInfoBitsByVersion = map[byte][]bool{
+	7:  []bool{false, false, false, true, true, true, true, true, false, false, true, false, false, true, false, true, false, false},
+	8:  []bool{false, false, true, false, false, false, false, true, false, true, true, false, true, true, true, true, false, false},
+	9:  []bool{false, false, true, false, false, true, true, false, true, false, true, false, false, true, true, false, false, true},
+	10: []bool{false, false, true, false, true, false, false, true, false, false, true, true, false, true, false, false, true, true},
+	11: []bool{false, false, true, false, true, true, true, false, true, true, true, true, true, true, false, true, true, false},
+	12: []bool{false, false, true, true, false, false, false, true, true, true, false, true, true, false, false, false, true, false},
+	13: []bool{false, false, true, true, false, true, true, false, false, false, false, true, false, false, false, true, true, true},
+	14: []bool{false, false, true, true, true, false, false, true, true, false, false, false, false, false, true, true, false, true},
+	15: []bool{false, false, true, true, true, true, true, false, false, true, false, false, true, false, true, false, false, false},
+	16: []bool{false, true, false, false, false, false, true, false, true, true, false, true, true, true, true, false, false, false},
+	17: []bool{false, true, false, false, false, true, false, true, false, false, false, true, false, true, true, true, false, true},
+	18: []bool{false, true, false, false, true, false, true, false, true, false, false, false, false, true, false, true, true, true},
+	19: []bool{false, true, false, false, true, true, false, true, false, true, false, false, true, true, false, false, true, false},
+	20: []bool{false, true, false, true, false, false, true, false, false, true, true, false, true, false, false, true, true, false},
+	21: []bool{false, true, false, true, false, true, false, true, true, false, true, false, false, false, false, false, true, true},
+	22: []bool{false, true, false, true, true, false, true, false, false, false, true, true, false, false, true, false, false, true},
+	23: []bool{false, true, false, true, true, true, false, true, true, true, true, true, true, false, true, true, false, false},
+	24: []bool{false, true, true, false, false, false, true, true, true, false, true, true, false, false, false, true, false, false},
+	25: []bool{false, true, true, false, false, true, false, false, false, true, true, true, true, false, false, false, false, true},
+	26: []bool{false, true, true, false, true, false, true, true, true, true, true, false, true, false, true, false, true, true},
+	27: []bool{false, true, true, false, true, true, false, false, false, false, true, false, false, false, true, true, true, false},
+	28: []bool{false, true, true, true, false, false, true, true, false, false, false, false, false, true, true, false, true, false},
+	29: []bool{false, true, true, true, false, true, false, false, true, true, false, false, true, true, true, true, true, true},
+	30: []bool{false, true, true, true, true, false, true, true, false, true, false, true, true, true, false, true, false, true},
+	31: []bool{false, true, true, true, true, true, false, false, true, false, false, true, false, true, false, false, false, false},
+	32: []bool{true, false, false, false, false, false, true, false, false, true, true, true, false, true, false, true, false, true},
+	33: []bool{true, false, false, false, false, true, false, true, true, false, true, true, true, true, false, false, false, false},
+	34: []bool{true, false, false, false, true, false, true, false, false, false, true, false, true, true, true, false, true, false},
+	35: []bool{true, false, false, false, true, true, false, true, true, true, true, false, false, true, true, true, true, true},
+	36: []bool{true, false, false, true, false, false, true, false, true, true, false, false, false, false, true, false, true, true},
+	37: []bool{true, false, false, true, false, true, false, true, false, false, false, false, true, false, true, true, true, false},
+	38: []bool{true, false, false, true, true, false, true, false, true, false, false, true, true, false, false, true, false, false},
+	39: []bool{true, false, false, true, true, true, false, true, false, true, false, true, false, false, false, false, false, true},
+	40: []bool{true, false, true, false, false, false, true, true, false, false, false, true, true, false, true, false, false, true},
+}
+
+func drawVersionInfo(vi *versionInfo, set func(int, int, bool)) {
+	versionInfoBits, ok := versionInfoBitsByVersion[vi.Version]
+
+	if ok && len(versionInfoBits) > 0 {
+		for i := 0; i < len(versionInfoBits); i++ {
+			x := (vi.modulWidth() - 11) + i%3
+			y := i / 3
+			set(x, y, versionInfoBits[len(versionInfoBits)-i-1])
+			set(y, x, versionInfoBits[len(versionInfoBits)-i-1])
+		}
+	}
+
+}
+
+func addPaddingAndTerminator(bl *utils.BitList, vi *versionInfo) {
+	for i := 0; i < 4 && bl.Len() < vi.totalDataBytes()*8; i++ {
+		bl.AddBit(false)
+	}
+
+	for bl.Len()%8 != 0 {
+		bl.AddBit(false)
+	}
+
+	for i := 0; bl.Len() < vi.totalDataBytes()*8; i++ {
+		if i%2 == 0 {
+			bl.AddByte(236)
+		} else {
+			bl.AddByte(17)
+		}
+	}
+}

+ 29 - 0
vendor/github.com/boombuler/barcode/qr/errorcorrection.go

@@ -0,0 +1,29 @@
+package qr
+
+import (
+	"github.com/boombuler/barcode/utils"
+)
+
+type errorCorrection struct {
+	rs *utils.ReedSolomonEncoder
+}
+
+var ec = newErrorCorrection()
+
+func newErrorCorrection() *errorCorrection {
+	fld := utils.NewGaloisField(285, 256, 0)
+	return &errorCorrection{utils.NewReedSolomonEncoder(fld)}
+}
+
+func (ec *errorCorrection) calcECC(data []byte, eccCount byte) []byte {
+	dataInts := make([]int, len(data))
+	for i := 0; i < len(data); i++ {
+		dataInts[i] = int(data[i])
+	}
+	res := ec.rs.Encode(dataInts, int(eccCount))
+	result := make([]byte, len(res))
+	for i := 0; i < len(res); i++ {
+		result[i] = byte(res[i])
+	}
+	return result
+}

+ 56 - 0
vendor/github.com/boombuler/barcode/qr/numeric.go

@@ -0,0 +1,56 @@
+package qr
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+
+	"github.com/boombuler/barcode/utils"
+)
+
+func encodeNumeric(content string, ecl ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error) {
+	contentBitCount := (len(content) / 3) * 10
+	switch len(content) % 3 {
+	case 1:
+		contentBitCount += 4
+	case 2:
+		contentBitCount += 7
+	}
+	vi := findSmallestVersionInfo(ecl, numericMode, contentBitCount)
+	if vi == nil {
+		return nil, nil, errors.New("To much data to encode")
+	}
+	res := new(utils.BitList)
+	res.AddBits(int(numericMode), 4)
+	res.AddBits(len(content), vi.charCountBits(numericMode))
+
+	for pos := 0; pos < len(content); pos += 3 {
+		var curStr string
+		if pos+3 <= len(content) {
+			curStr = content[pos : pos+3]
+		} else {
+			curStr = content[pos:]
+		}
+
+		i, err := strconv.Atoi(curStr)
+		if err != nil || i < 0 {
+			return nil, nil, fmt.Errorf("\"%s\" can not be encoded as %s", content, Numeric)
+		}
+		var bitCnt byte
+		switch len(curStr) % 3 {
+		case 0:
+			bitCnt = 10
+		case 1:
+			bitCnt = 4
+			break
+		case 2:
+			bitCnt = 7
+			break
+		}
+
+		res.AddBits(i, bitCnt)
+	}
+
+	addPaddingAndTerminator(res, vi)
+	return res, vi, nil
+}

+ 166 - 0
vendor/github.com/boombuler/barcode/qr/qrcode.go

@@ -0,0 +1,166 @@
+package qr
+
+import (
+	"image"
+	"image/color"
+	"math"
+
+	"github.com/boombuler/barcode"
+	"github.com/boombuler/barcode/utils"
+)
+
+type qrcode struct {
+	dimension int
+	data      *utils.BitList
+	content   string
+}
+
+func (qr *qrcode) Content() string {
+	return qr.content
+}
+
+func (qr *qrcode) Metadata() barcode.Metadata {
+	return barcode.Metadata{barcode.TypeQR, 2}
+}
+
+func (qr *qrcode) ColorModel() color.Model {
+	return color.Gray16Model
+}
+
+func (qr *qrcode) Bounds() image.Rectangle {
+	return image.Rect(0, 0, qr.dimension, qr.dimension)
+}
+
+func (qr *qrcode) At(x, y int) color.Color {
+	if qr.Get(x, y) {
+		return color.Black
+	}
+	return color.White
+}
+
+func (qr *qrcode) Get(x, y int) bool {
+	return qr.data.GetBit(x*qr.dimension + y)
+}
+
+func (qr *qrcode) Set(x, y int, val bool) {
+	qr.data.SetBit(x*qr.dimension+y, val)
+}
+
+func (qr *qrcode) calcPenalty() uint {
+	return qr.calcPenaltyRule1() + qr.calcPenaltyRule2() + qr.calcPenaltyRule3() + qr.calcPenaltyRule4()
+}
+
+func (qr *qrcode) calcPenaltyRule1() uint {
+	var result uint
+	for x := 0; x < qr.dimension; x++ {
+		checkForX := false
+		var cntX uint
+		checkForY := false
+		var cntY uint
+
+		for y := 0; y < qr.dimension; y++ {
+			if qr.Get(x, y) == checkForX {
+				cntX++
+			} else {
+				checkForX = !checkForX
+				if cntX >= 5 {
+					result += cntX - 2
+				}
+				cntX = 1
+			}
+
+			if qr.Get(y, x) == checkForY {
+				cntY++
+			} else {
+				checkForY = !checkForY
+				if cntY >= 5 {
+					result += cntY - 2
+				}
+				cntY = 1
+			}
+		}
+
+		if cntX >= 5 {
+			result += cntX - 2
+		}
+		if cntY >= 5 {
+			result += cntY - 2
+		}
+	}
+
+	return result
+}
+
+func (qr *qrcode) calcPenaltyRule2() uint {
+	var result uint
+	for x := 0; x < qr.dimension-1; x++ {
+		for y := 0; y < qr.dimension-1; y++ {
+			check := qr.Get(x, y)
+			if qr.Get(x, y+1) == check && qr.Get(x+1, y) == check && qr.Get(x+1, y+1) == check {
+				result += 3
+			}
+		}
+	}
+	return result
+}
+
+func (qr *qrcode) calcPenaltyRule3() uint {
+	pattern1 := []bool{true, false, true, true, true, false, true, false, false, false, false}
+	pattern2 := []bool{false, false, false, false, true, false, true, true, true, false, true}
+
+	var result uint
+	for x := 0; x <= qr.dimension-len(pattern1); x++ {
+		for y := 0; y < qr.dimension; y++ {
+			pattern1XFound := true
+			pattern2XFound := true
+			pattern1YFound := true
+			pattern2YFound := true
+
+			for i := 0; i < len(pattern1); i++ {
+				iv := qr.Get(x+i, y)
+				if iv != pattern1[i] {
+					pattern1XFound = false
+				}
+				if iv != pattern2[i] {
+					pattern2XFound = false
+				}
+				iv = qr.Get(y, x+i)
+				if iv != pattern1[i] {
+					pattern1YFound = false
+				}
+				if iv != pattern2[i] {
+					pattern2YFound = false
+				}
+			}
+			if pattern1XFound || pattern2XFound {
+				result += 40
+			}
+			if pattern1YFound || pattern2YFound {
+				result += 40
+			}
+		}
+	}
+
+	return result
+}
+
+func (qr *qrcode) calcPenaltyRule4() uint {
+	totalNum := qr.data.Len()
+	trueCnt := 0
+	for i := 0; i < totalNum; i++ {
+		if qr.data.GetBit(i) {
+			trueCnt++
+		}
+	}
+	percDark := float64(trueCnt) * 100 / float64(totalNum)
+	floor := math.Abs(math.Floor(percDark/5) - 10)
+	ceil := math.Abs(math.Ceil(percDark/5) - 10)
+	return uint(math.Min(floor, ceil) * 10)
+}
+
+func newBarcode(dim int) *qrcode {
+	res := new(qrcode)
+	res.dimension = dim
+	res.data = utils.NewBitList(dim * dim)
+	return res
+}

+ 27 - 0
vendor/github.com/boombuler/barcode/qr/unicode.go

@@ -0,0 +1,27 @@
+package qr
+
+import (
+	"errors"
+
+	"github.com/boombuler/barcode/utils"
+)
+
+func encodeUnicode(content string, ecl ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error) {
+	data := []byte(content)
+
+	vi := findSmallestVersionInfo(ecl, byteMode, len(data)*8)
+	if vi == nil {
+		return nil, nil, errors.New("To much data to encode")
+	}
+
+	// It's not correct to add the unicode bytes to the result directly but most readers can't handle the
+	// required ECI header...
+	res := new(utils.BitList)
+	res.AddBits(int(byteMode), 4)
+	res.AddBits(len(content), vi.charCountBits(byteMode))
+	for _, b := range data {
+		res.AddByte(b)
+	}
+	addPaddingAndTerminator(res, vi)
+	return res, vi, nil
+}

+ 310 - 0
vendor/github.com/boombuler/barcode/qr/versioninfo.go

@@ -0,0 +1,310 @@
+package qr
+
+import "math"
+
+// ErrorCorrectionLevel indicates the amount of "backup data" stored in the QR code
+type ErrorCorrectionLevel byte
+
+const (
+	// L recovers 7% of data
+	L ErrorCorrectionLevel = iota
+	// M recovers 15% of data
+	M
+	// Q recovers 25% of data
+	Q
+	// H recovers 30% of data
+	H
+)
+
+func (ecl ErrorCorrectionLevel) String() string {
+	switch ecl {
+	case L:
+		return "L"
+	case M:
+		return "M"
+	case Q:
+		return "Q"
+	case H:
+		return "H"
+	}
+	return "unknown"
+}
+
+type encodingMode byte
+
+const (
+	numericMode      encodingMode = 1
+	alphaNumericMode encodingMode = 2
+	byteMode         encodingMode = 4
+	kanjiMode        encodingMode = 8
+)
+
+type versionInfo struct {
+	Version                          byte
+	Level                            ErrorCorrectionLevel
+	ErrorCorrectionCodewordsPerBlock byte
+	NumberOfBlocksInGroup1           byte
+	DataCodeWordsPerBlockInGroup1    byte
+	NumberOfBlocksInGroup2           byte
+	DataCodeWordsPerBlockInGroup2    byte
+}
+
+var versionInfos = []*versionInfo{
+	&versionInfo{1, L, 7, 1, 19, 0, 0},
+	&versionInfo{1, M, 10, 1, 16, 0, 0},
+	&versionInfo{1, Q, 13, 1, 13, 0, 0},
+	&versionInfo{1, H, 17, 1, 9, 0, 0},
+	&versionInfo{2, L, 10, 1, 34, 0, 0},
+	&versionInfo{2, M, 16, 1, 28, 0, 0},
+	&versionInfo{2, Q, 22, 1, 22, 0, 0},
+	&versionInfo{2, H, 28, 1, 16, 0, 0},
+	&versionInfo{3, L, 15, 1, 55, 0, 0},
+	&versionInfo{3, M, 26, 1, 44, 0, 0},
+	&versionInfo{3, Q, 18, 2, 17, 0, 0},
+	&versionInfo{3, H, 22, 2, 13, 0, 0},
+	&versionInfo{4, L, 20, 1, 80, 0, 0},
+	&versionInfo{4, M, 18, 2, 32, 0, 0},
+	&versionInfo{4, Q, 26, 2, 24, 0, 0},
+	&versionInfo{4, H, 16, 4, 9, 0, 0},
+	&versionInfo{5, L, 26, 1, 108, 0, 0},
+	&versionInfo{5, M, 24, 2, 43, 0, 0},
+	&versionInfo{5, Q, 18, 2, 15, 2, 16},
+	&versionInfo{5, H, 22, 2, 11, 2, 12},
+	&versionInfo{6, L, 18, 2, 68, 0, 0},
+	&versionInfo{6, M, 16, 4, 27, 0, 0},
+	&versionInfo{6, Q, 24, 4, 19, 0, 0},
+	&versionInfo{6, H, 28, 4, 15, 0, 0},
+	&versionInfo{7, L, 20, 2, 78, 0, 0},
+	&versionInfo{7, M, 18, 4, 31, 0, 0},
+	&versionInfo{7, Q, 18, 2, 14, 4, 15},
+	&versionInfo{7, H, 26, 4, 13, 1, 14},
+	&versionInfo{8, L, 24, 2, 97, 0, 0},
+	&versionInfo{8, M, 22, 2, 38, 2, 39},
+	&versionInfo{8, Q, 22, 4, 18, 2, 19},
+	&versionInfo{8, H, 26, 4, 14, 2, 15},
+	&versionInfo{9, L, 30, 2, 116, 0, 0},
+	&versionInfo{9, M, 22, 3, 36, 2, 37},
+	&versionInfo{9, Q, 20, 4, 16, 4, 17},
+	&versionInfo{9, H, 24, 4, 12, 4, 13},
+	&versionInfo{10, L, 18, 2, 68, 2, 69},
+	&versionInfo{10, M, 26, 4, 43, 1, 44},
+	&versionInfo{10, Q, 24, 6, 19, 2, 20},
+	&versionInfo{10, H, 28, 6, 15, 2, 16},
+	&versionInfo{11, L, 20, 4, 81, 0, 0},
+	&versionInfo{11, M, 30, 1, 50, 4, 51},
+	&versionInfo{11, Q, 28, 4, 22, 4, 23},
+	&versionInfo{11, H, 24, 3, 12, 8, 13},
+	&versionInfo{12, L, 24, 2, 92, 2, 93},
+	&versionInfo{12, M, 22, 6, 36, 2, 37},
+	&versionInfo{12, Q, 26, 4, 20, 6, 21},
+	&versionInfo{12, H, 28, 7, 14, 4, 15},
+	&versionInfo{13, L, 26, 4, 107, 0, 0},
+	&versionInfo{13, M, 22, 8, 37, 1, 38},
+	&versionInfo{13, Q, 24, 8, 20, 4, 21},
+	&versionInfo{13, H, 22, 12, 11, 4, 12},
+	&versionInfo{14, L, 30, 3, 115, 1, 116},
+	&versionInfo{14, M, 24, 4, 40, 5, 41},
+	&versionInfo{14, Q, 20, 11, 16, 5, 17},
+	&versionInfo{14, H, 24, 11, 12, 5, 13},
+	&versionInfo{15, L, 22, 5, 87, 1, 88},
+	&versionInfo{15, M, 24, 5, 41, 5, 42},
+	&versionInfo{15, Q, 30, 5, 24, 7, 25},
+	&versionInfo{15, H, 24, 11, 12, 7, 13},
+	&versionInfo{16, L, 24, 5, 98, 1, 99},
+	&versionInfo{16, M, 28, 7, 45, 3, 46},
+	&versionInfo{16, Q, 24, 15, 19, 2, 20},
+	&versionInfo{16, H, 30, 3, 15, 13, 16},
+	&versionInfo{17, L, 28, 1, 107, 5, 108},
+	&versionInfo{17, M, 28, 10, 46, 1, 47},
+	&versionInfo{17, Q, 28, 1, 22, 15, 23},
+	&versionInfo{17, H, 28, 2, 14, 17, 15},
+	&versionInfo{18, L, 30, 5, 120, 1, 121},
+	&versionInfo{18, M, 26, 9, 43, 4, 44},
+	&versionInfo{18, Q, 28, 17, 22, 1, 23},
+	&versionInfo{18, H, 28, 2, 14, 19, 15},
+	&versionInfo{19, L, 28, 3, 113, 4, 114},
+	&versionInfo{19, M, 26, 3, 44, 11, 45},
+	&versionInfo{19, Q, 26, 17, 21, 4, 22},
+	&versionInfo{19, H, 26, 9, 13, 16, 14},
+	&versionInfo{20, L, 28, 3, 107, 5, 108},
+	&versionInfo{20, M, 26, 3, 41, 13, 42},
+	&versionInfo{20, Q, 30, 15, 24, 5, 25},
+	&versionInfo{20, H, 28, 15, 15, 10, 16},
+	&versionInfo{21, L, 28, 4, 116, 4, 117},
+	&versionInfo{21, M, 26, 17, 42, 0, 0},
+	&versionInfo{21, Q, 28, 17, 22, 6, 23},
+	&versionInfo{21, H, 30, 19, 16, 6, 17},
+	&versionInfo{22, L, 28, 2, 111, 7, 112},
+	&versionInfo{22, M, 28, 17, 46, 0, 0},
+	&versionInfo{22, Q, 30, 7, 24, 16, 25},
+	&versionInfo{22, H, 24, 34, 13, 0, 0},
+	&versionInfo{23, L, 30, 4, 121, 5, 122},
+	&versionInfo{23, M, 28, 4, 47, 14, 48},
+	&versionInfo{23, Q, 30, 11, 24, 14, 25},
+	&versionInfo{23, H, 30, 16, 15, 14, 16},
+	&versionInfo{24, L, 30, 6, 117, 4, 118},
+	&versionInfo{24, M, 28, 6, 45, 14, 46},
+	&versionInfo{24, Q, 30, 11, 24, 16, 25},
+	&versionInfo{24, H, 30, 30, 16, 2, 17},
+	&versionInfo{25, L, 26, 8, 106, 4, 107},
+	&versionInfo{25, M, 28, 8, 47, 13, 48},
+	&versionInfo{25, Q, 30, 7, 24, 22, 25},
+	&versionInfo{25, H, 30, 22, 15, 13, 16},
+	&versionInfo{26, L, 28, 10, 114, 2, 115},
+	&versionInfo{26, M, 28, 19, 46, 4, 47},
+	&versionInfo{26, Q, 28, 28, 22, 6, 23},
+	&versionInfo{26, H, 30, 33, 16, 4, 17},
+	&versionInfo{27, L, 30, 8, 122, 4, 123},
+	&versionInfo{27, M, 28, 22, 45, 3, 46},
+	&versionInfo{27, Q, 30, 8, 23, 26, 24},
+	&versionInfo{27, H, 30, 12, 15, 28, 16},
+	&versionInfo{28, L, 30, 3, 117, 10, 118},
+	&versionInfo{28, M, 28, 3, 45, 23, 46},
+	&versionInfo{28, Q, 30, 4, 24, 31, 25},
+	&versionInfo{28, H, 30, 11, 15, 31, 16},
+	&versionInfo{29, L, 30, 7, 116, 7, 117},
+	&versionInfo{29, M, 28, 21, 45, 7, 46},
+	&versionInfo{29, Q, 30, 1, 23, 37, 24},
+	&versionInfo{29, H, 30, 19, 15, 26, 16},
+	&versionInfo{30, L, 30, 5, 115, 10, 116},
+	&versionInfo{30, M, 28, 19, 47, 10, 48},
+	&versionInfo{30, Q, 30, 15, 24, 25, 25},
+	&versionInfo{30, H, 30, 23, 15, 25, 16},
+	&versionInfo{31, L, 30, 13, 115, 3, 116},
+	&versionInfo{31, M, 28, 2, 46, 29, 47},
+	&versionInfo{31, Q, 30, 42, 24, 1, 25},
+	&versionInfo{31, H, 30, 23, 15, 28, 16},
+	&versionInfo{32, L, 30, 17, 115, 0, 0},
+	&versionInfo{32, M, 28, 10, 46, 23, 47},
+	&versionInfo{32, Q, 30, 10, 24, 35, 25},
+	&versionInfo{32, H, 30, 19, 15, 35, 16},
+	&versionInfo{33, L, 30, 17, 115, 1, 116},
+	&versionInfo{33, M, 28, 14, 46, 21, 47},
+	&versionInfo{33, Q, 30, 29, 24, 19, 25},
+	&versionInfo{33, H, 30, 11, 15, 46, 16},
+	&versionInfo{34, L, 30, 13, 115, 6, 116},
+	&versionInfo{34, M, 28, 14, 46, 23, 47},
+	&versionInfo{34, Q, 30, 44, 24, 7, 25},
+	&versionInfo{34, H, 30, 59, 16, 1, 17},
+	&versionInfo{35, L, 30, 12, 121, 7, 122},
+	&versionInfo{35, M, 28, 12, 47, 26, 48},
+	&versionInfo{35, Q, 30, 39, 24, 14, 25},
+	&versionInfo{35, H, 30, 22, 15, 41, 16},
+	&versionInfo{36, L, 30, 6, 121, 14, 122},
+	&versionInfo{36, M, 28, 6, 47, 34, 48},
+	&versionInfo{36, Q, 30, 46, 24, 10, 25},
+	&versionInfo{36, H, 30, 2, 15, 64, 16},
+	&versionInfo{37, L, 30, 17, 122, 4, 123},
+	&versionInfo{37, M, 28, 29, 46, 14, 47},
+	&versionInfo{37, Q, 30, 49, 24, 10, 25},
+	&versionInfo{37, H, 30, 24, 15, 46, 16},
+	&versionInfo{38, L, 30, 4, 122, 18, 123},
+	&versionInfo{38, M, 28, 13, 46, 32, 47},
+	&versionInfo{38, Q, 30, 48, 24, 14, 25},
+	&versionInfo{38, H, 30, 42, 15, 32, 16},
+	&versionInfo{39, L, 30, 20, 117, 4, 118},
+	&versionInfo{39, M, 28, 40, 47, 7, 48},
+	&versionInfo{39, Q, 30, 43, 24, 22, 25},
+	&versionInfo{39, H, 30, 10, 15, 67, 16},
+	&versionInfo{40, L, 30, 19, 118, 6, 119},
+	&versionInfo{40, M, 28, 18, 47, 31, 48},
+	&versionInfo{40, Q, 30, 34, 24, 34, 25},
+	&versionInfo{40, H, 30, 20, 15, 61, 16},
+}
+
+func (vi *versionInfo) totalDataBytes() int {
+	g1Data := int(vi.NumberOfBlocksInGroup1) * int(vi.DataCodeWordsPerBlockInGroup1)
+	g2Data := int(vi.NumberOfBlocksInGroup2) * int(vi.DataCodeWordsPerBlockInGroup2)
+	return (g1Data + g2Data)
+}
+
+func (vi *versionInfo) charCountBits(m encodingMode) byte {
+	switch m {
+	case numericMode:
+		if vi.Version < 10 {
+			return 10
+		} else if vi.Version < 27 {
+			return 12
+		}
+		return 14
+
+	case alphaNumericMode:
+		if vi.Version < 10 {
+			return 9
+		} else if vi.Version < 27 {
+			return 11
+		}
+		return 13
+
+	case byteMode:
+		if vi.Version < 10 {
+			return 8
+		}
+		return 16
+
+	case kanjiMode:
+		if vi.Version < 10 {
+			return 8
+		} else if vi.Version < 27 {
+			return 10
+		}
+		return 12
+	default:
+		return 0
+	}
+}
+
+func (vi *versionInfo) modulWidth() int {
+	return ((int(vi.Version) - 1) * 4) + 21
+}
+
+func (vi *versionInfo) alignmentPatternPlacements() []int {
+	if vi.Version == 1 {
+		return make([]int, 0)
+	}
+
+	first := 6
+	last := vi.modulWidth() - 7
+	space := float64(last - first)
+	count := int(math.Ceil(space/28)) + 1
+
+	result := make([]int, count)
+	result[0] = first
+	result[len(result)-1] = last
+	if count > 2 {
+		step := int(math.Ceil(float64(last-first) / float64(count-1)))
+		if step%2 == 1 {
+			frac := float64(last-first) / float64(count-1)
+			_, x := math.Modf(frac)
+			if x >= 0.5 {
+				frac = math.Ceil(frac)
+			} else {
+				frac = math.Floor(frac)
+			}
+
+			if int(frac)%2 == 0 {
+				step--
+			} else {
+				step++
+			}
+		}
+
+		for i := 1; i <= count-2; i++ {
+			result[i] = last - (step * (count - 1 - i))
+		}
+	}
+
+	return result
+}
+
+func findSmallestVersionInfo(ecl ErrorCorrectionLevel, mode encodingMode, dataBits int) *versionInfo {
+	dataBits = dataBits + 4 // mode indicator
+	for _, vi := range versionInfos {
+		if vi.Level == ecl {
+			if (vi.totalDataBytes() * 8) >= (dataBits + int(vi.charCountBits(mode))) {
+				return vi
+			}
+		}
+	}
+	return nil
+}

+ 134 - 0
vendor/github.com/boombuler/barcode/scaledbarcode.go

@@ -0,0 +1,134 @@
+package barcode
+
+import (
+	"errors"
+	"fmt"
+	"image"
+	"image/color"
+	"math"
+)
+
+type wrapFunc func(x, y int) color.Color
+
+type scaledBarcode struct {
+	wrapped     Barcode
+	wrapperFunc wrapFunc
+	rect        image.Rectangle
+}
+
+type intCSscaledBC struct {
+	scaledBarcode
+}
+
+func (bc *scaledBarcode) Content() string {
+	return bc.wrapped.Content()
+}
+
+func (bc *scaledBarcode) Metadata() Metadata {
+	return bc.wrapped.Metadata()
+}
+
+func (bc *scaledBarcode) ColorModel() color.Model {
+	return bc.wrapped.ColorModel()
+}
+
+func (bc *scaledBarcode) Bounds() image.Rectangle {
+	return bc.rect
+}
+
+func (bc *scaledBarcode) At(x, y int) color.Color {
+	return bc.wrapperFunc(x, y)
+}
+
+func (bc *intCSscaledBC) CheckSum() int {
+	if cs, ok := bc.wrapped.(BarcodeIntCS); ok {
+		return cs.CheckSum()
+	}
+	return 0
+}
+
+// Scale returns a resized barcode with the given width and height.
+func Scale(bc Barcode, width, height int) (Barcode, error) {
+	switch bc.Metadata().Dimensions {
+	case 1:
+		return scale1DCode(bc, width, height)
+	case 2:
+		return scale2DCode(bc, width, height)
+	}
+
+	return nil, errors.New("unsupported barcode format")
+}
+
+func newScaledBC(wrapped Barcode, wrapperFunc wrapFunc, rect image.Rectangle) Barcode {
+	result := &scaledBarcode{
+		wrapped:     wrapped,
+		wrapperFunc: wrapperFunc,
+		rect:        rect,
+	}
+
+	if _, ok := wrapped.(BarcodeIntCS); ok {
+		return &intCSscaledBC{*result}
+	}
+	return result
+}
+
+func scale2DCode(bc Barcode, width, height int) (Barcode, error) {
+	orgBounds := bc.Bounds()
+	orgWidth := orgBounds.Max.X - orgBounds.Min.X
+	orgHeight := orgBounds.Max.Y - orgBounds.Min.Y
+
+	factor := int(math.Min(float64(width)/float64(orgWidth), float64(height)/float64(orgHeight)))
+	if factor <= 0 {
+		return nil, fmt.Errorf("can not scale barcode to an image smaller than %dx%d", orgWidth, orgHeight)
+	}
+
+	offsetX := (width - (orgWidth * factor)) / 2
+	offsetY := (height - (orgHeight * factor)) / 2
+
+	wrap := func(x, y int) color.Color {
+		if x < offsetX || y < offsetY {
+			return color.White
+		}
+		x = (x - offsetX) / factor
+		y = (y - offsetY) / factor
+		if x >= orgWidth || y >= orgHeight {
+			return color.White
+		}
+		return bc.At(x, y)
+	}
+
+	return newScaledBC(
+		bc,
+		wrap,
+		image.Rect(0, 0, width, height),
+	), nil
+}
+
+func scale1DCode(bc Barcode, width, height int) (Barcode, error) {
+	orgBounds := bc.Bounds()
+	orgWidth := orgBounds.Max.X - orgBounds.Min.X
+	factor := int(float64(width) / float64(orgWidth))
+
+	if factor <= 0 {
+		return nil, fmt.Errorf("can not scale barcode to an image smaller than %dx1", orgWidth)
+	}
+	offsetX := (width - (orgWidth * factor)) / 2
+
+	wrap := func(x, y int) color.Color {
+		if x < offsetX {
+			return color.White
+		}
+		x = (x - offsetX) / factor
+
+		if x >= orgWidth {
+			return color.White
+		}
+		return bc.At(x, 0)
+	}
+
+	return newScaledBC(
+		bc,
+		wrap,
+		image.Rect(0, 0, width, height),
+	), nil
+}

+ 57 - 0
vendor/github.com/boombuler/barcode/utils/base1dcode.go

@@ -0,0 +1,57 @@
+// Package utils contain some utilities which are needed to create barcodes
+package utils
+
+import (
+	"image"
+	"image/color"
+
+	"github.com/boombuler/barcode"
+)
+
+type base1DCode struct {
+	*BitList
+	kind    string
+	content string
+}
+
+type base1DCodeIntCS struct {
+	base1DCode
+	checksum int
+}
+
+func (c *base1DCode) Content() string {
+	return c.content
+}
+
+func (c *base1DCode) Metadata() barcode.Metadata {
+	return barcode.Metadata{c.kind, 1}
+}
+
+func (c *base1DCode) ColorModel() color.Model {
+	return color.Gray16Model
+}
+
+func (c *base1DCode) Bounds() image.Rectangle {
+	return image.Rect(0, 0, c.Len(), 1)
+}
+
+func (c *base1DCode) At(x, y int) color.Color {
+	if c.GetBit(x) {
+		return color.Black
+	}
+	return color.White
+}
+
+func (c *base1DCodeIntCS) CheckSum() int {
+	return c.checksum
+}
+
+// New1DCodeIntCheckSum creates a new 1D barcode where the bars are represented by the bits in the bars BitList
+func New1DCodeIntCheckSum(codeKind, content string, bars *BitList, checksum int) barcode.BarcodeIntCS {
+	return &base1DCodeIntCS{base1DCode{bars, codeKind, content}, checksum}
+}
+
+// New1DCode creates a new 1D barcode where the bars are represented by the bits in the bars BitList
+func New1DCode(codeKind, content string, bars *BitList) barcode.Barcode {
+	return &base1DCode{bars, codeKind, content}
+}

+ 119 - 0
vendor/github.com/boombuler/barcode/utils/bitlist.go

@@ -0,0 +1,119 @@
+package utils
+
+// BitList is a list that contains bits
+type BitList struct {
+	count int
+	data  []int32
+}
+
+// NewBitList returns a new BitList with the given length
+// all bits are initialize with false
+func NewBitList(capacity int) *BitList {
+	bl := new(BitList)
+	bl.count = capacity
+	x := 0
+	if capacity%32 != 0 {
+		x = 1
+	}
+	bl.data = make([]int32, capacity/32+x)
+	return bl
+}
+
+// Len returns the number of contained bits
+func (bl *BitList) Len() int {
+	return bl.count
+}
+
+func (bl *BitList) grow() {
+	growBy := len(bl.data)
+	if growBy < 128 {
+		growBy = 128
+	} else if growBy >= 1024 {
+		growBy = 1024
+	}
+
+	nd := make([]int32, len(bl.data)+growBy)
+	copy(nd, bl.data)
+	bl.data = nd
+}
+
+// AddBit appends the given bits to the end of the list
+func (bl *BitList) AddBit(bits ...bool) {
+	for _, bit := range bits {
+		itmIndex := bl.count / 32
+		for itmIndex >= len(bl.data) {
+			bl.grow()
+		}
+		bl.SetBit(bl.count, bit)
+		bl.count++
+	}
+}
+
+// SetBit sets the bit at the given index to the given value
+func (bl *BitList) SetBit(index int, value bool) {
+	itmIndex := index / 32
+	itmBitShift := 31 - (index % 32)
+	if value {
+		bl.data[itmIndex] = bl.data[itmIndex] | 1<<uint(itmBitShift)
+	} else {
+		bl.data[itmIndex] = bl.data[itmIndex] & ^(1 << uint(itmBitShift))
+	}
+}
+
+// GetBit returns the bit at the given index
+func (bl *BitList) GetBit(index int) bool {
+	itmIndex := index / 32
+	itmBitShift := 31 - (index % 32)
+	return ((bl.data[itmIndex] >> uint(itmBitShift)) & 1) == 1
+}
+
+// AddByte appends all 8 bits of the given byte to the end of the list
+func (bl *BitList) AddByte(b byte) {
+	for i := 7; i >= 0; i-- {
+		bl.AddBit(((b >> uint(i)) & 1) == 1)
+	}
+}
+
+// AddBits appends the last (LSB) 'count' bits of 'b' the the end of the list
+func (bl *BitList) AddBits(b int, count byte) {
+	for i := int(count) - 1; i >= 0; i-- {
+		bl.AddBit(((b >> uint(i)) & 1) == 1)
+	}
+}
+
+// GetBytes returns all bits of the BitList as a []byte
+func (bl *BitList) GetBytes() []byte {
+	len := bl.count >> 3
+	if (bl.count % 8) != 0 {
+		len++
+	}
+	result := make([]byte, len)
+	for i := 0; i < len; i++ {
+		shift := (3 - (i % 4)) * 8
+		result[i] = (byte)((bl.data[i/4] >> uint(shift)) & 0xFF)
+	}
+	return result
+}
+
+// IterateBytes iterates through all bytes contained in the BitList
+func (bl *BitList) IterateBytes() <-chan byte {
+	res := make(chan byte)
+
+	go func() {
+		c := bl.count
+		shift := 24
+		i := 0
+		for c > 0 {
+			res <- byte((bl.data[i] >> uint(shift)) & 0xFF)
+			shift -= 8
+			if shift < 0 {
+				shift = 24
+				i++
+			}
+			c -= 8
+		}
+		close(res)
+	}()
+
+	return res
+}

+ 65 - 0
vendor/github.com/boombuler/barcode/utils/galoisfield.go

@@ -0,0 +1,65 @@
+package utils
+
+// GaloisField encapsulates galois field arithmetics
+type GaloisField struct {
+	Size    int
+	Base    int
+	ALogTbl []int
+	LogTbl  []int
+}
+
+// NewGaloisField creates a new galois field
+func NewGaloisField(pp, fieldSize, b int) *GaloisField {
+	result := new(GaloisField)
+
+	result.Size = fieldSize
+	result.Base = b
+	result.ALogTbl = make([]int, fieldSize)
+	result.LogTbl = make([]int, fieldSize)
+
+	x := 1
+	for i := 0; i < fieldSize; i++ {
+		result.ALogTbl[i] = x
+		x = x * 2
+		if x >= fieldSize {
+			x = (x ^ pp) & (fieldSize - 1)
+		}
+	}
+
+	for i := 0; i < fieldSize; i++ {
+		result.LogTbl[result.ALogTbl[i]] = int(i)
+	}
+
+	return result
+}
+
+func (gf *GaloisField) Zero() *GFPoly {
+	return NewGFPoly(gf, []int{0})
+}
+
+// AddOrSub add or substract two numbers
+func (gf *GaloisField) AddOrSub(a, b int) int {
+	return a ^ b
+}
+
+// Multiply multiplys two numbers
+func (gf *GaloisField) Multiply(a, b int) int {
+	if a == 0 || b == 0 {
+		return 0
+	}
+	return gf.ALogTbl[(gf.LogTbl[a]+gf.LogTbl[b])%(gf.Size-1)]
+}
+
+// Divide divides two numbers
+func (gf *GaloisField) Divide(a, b int) int {
+	if b == 0 {
+		panic("divide by zero")
+	} else if a == 0 {
+		return 0
+	}
+	return gf.ALogTbl[(gf.LogTbl[a]-gf.LogTbl[b])%(gf.Size-1)]
+}
+
+func (gf *GaloisField) Invers(num int) int {
+	return gf.ALogTbl[(gf.Size-1)-gf.LogTbl[num]]
+}

+ 103 - 0
vendor/github.com/boombuler/barcode/utils/gfpoly.go

@@ -0,0 +1,103 @@
+package utils
+
+type GFPoly struct {
+	gf           *GaloisField
+	Coefficients []int
+}
+
+func (gp *GFPoly) Degree() int {
+	return len(gp.Coefficients) - 1
+}
+
+func (gp *GFPoly) Zero() bool {
+	return gp.Coefficients[0] == 0
+}
+
+// GetCoefficient returns the coefficient of x ^ degree
+func (gp *GFPoly) GetCoefficient(degree int) int {
+	return gp.Coefficients[gp.Degree()-degree]
+}
+
+func (gp *GFPoly) AddOrSubstract(other *GFPoly) *GFPoly {
+	if gp.Zero() {
+		return other
+	} else if other.Zero() {
+		return gp
+	}
+	smallCoeff := gp.Coefficients
+	largeCoeff := other.Coefficients
+	if len(smallCoeff) > len(largeCoeff) {
+		largeCoeff, smallCoeff = smallCoeff, largeCoeff
+	}
+	sumDiff := make([]int, len(largeCoeff))
+	lenDiff := len(largeCoeff) - len(smallCoeff)
+	copy(sumDiff, largeCoeff[:lenDiff])
+	for i := lenDiff; i < len(largeCoeff); i++ {
+		sumDiff[i] = int(gp.gf.AddOrSub(int(smallCoeff[i-lenDiff]), int(largeCoeff[i])))
+	}
+	return NewGFPoly(gp.gf, sumDiff)
+}
+
+func (gp *GFPoly) MultByMonominal(degree int, coeff int) *GFPoly {
+	if coeff == 0 {
+		return gp.gf.Zero()
+	}
+	size := len(gp.Coefficients)
+	result := make([]int, size+degree)
+	for i := 0; i < size; i++ {
+		result[i] = int(gp.gf.Multiply(int(gp.Coefficients[i]), int(coeff)))
+	}
+	return NewGFPoly(gp.gf, result)
+}
+
+func (gp *GFPoly) Multiply(other *GFPoly) *GFPoly {
+	if gp.Zero() || other.Zero() {
+		return gp.gf.Zero()
+	}
+	aCoeff := gp.Coefficients
+	aLen := len(aCoeff)
+	bCoeff := other.Coefficients
+	bLen := len(bCoeff)
+	product := make([]int, aLen+bLen-1)
+	for i := 0; i < aLen; i++ {
+		ac := int(aCoeff[i])
+		for j := 0; j < bLen; j++ {
+			bc := int(bCoeff[j])
+			product[i+j] = int(gp.gf.AddOrSub(int(product[i+j]), gp.gf.Multiply(ac, bc)))
+		}
+	}
+	return NewGFPoly(gp.gf, product)
+}
+
+func (gp *GFPoly) Divide(other *GFPoly) (quotient *GFPoly, remainder *GFPoly) {
+	quotient = gp.gf.Zero()
+	remainder = gp
+	fld := gp.gf
+	denomLeadTerm := other.GetCoefficient(other.Degree())
+	inversDenomLeadTerm := fld.Invers(int(denomLeadTerm))
+	for remainder.Degree() >= other.Degree() && !remainder.Zero() {
+		degreeDiff := remainder.Degree() - other.Degree()
+		scale := int(fld.Multiply(int(remainder.GetCoefficient(remainder.Degree())), inversDenomLeadTerm))
+		term := other.MultByMonominal(degreeDiff, scale)
+		itQuot := NewMonominalPoly(fld, degreeDiff, scale)
+		quotient = quotient.AddOrSubstract(itQuot)
+		remainder = remainder.AddOrSubstract(term)
+	}
+	return
+}
+
+func NewMonominalPoly(field *GaloisField, degree int, coeff int) *GFPoly {
+	if coeff == 0 {
+		return field.Zero()
+	}
+	result := make([]int, degree+1)
+	result[0] = coeff
+	return NewGFPoly(field, result)
+}
+
+func NewGFPoly(field *GaloisField, coefficients []int) *GFPoly {
+	for len(coefficients) > 1 && coefficients[0] == 0 {
+		coefficients = coefficients[1:]
+	}
+	return &GFPoly{field, coefficients}
+}

+ 44 - 0
vendor/github.com/boombuler/barcode/utils/reedsolomon.go

@@ -0,0 +1,44 @@
+package utils
+
+import (
+	"sync"
+)
+
+type ReedSolomonEncoder struct {
+	gf        *GaloisField
+	polynomes []*GFPoly
+	m         *sync.Mutex
+}
+
+func NewReedSolomonEncoder(gf *GaloisField) *ReedSolomonEncoder {
+	return &ReedSolomonEncoder{
+		gf, []*GFPoly{NewGFPoly(gf, []int{1})}, new(sync.Mutex),
+	}
+}
+
+func (rs *ReedSolomonEncoder) getPolynomial(degree int) *GFPoly {
+	rs.m.Lock()
+	defer rs.m.Unlock()
+
+	if degree >= len(rs.polynomes) {
+		last := rs.polynomes[len(rs.polynomes)-1]
+		for d := len(rs.polynomes); d <= degree; d++ {
+			next := last.Multiply(NewGFPoly(rs.gf, []int{1, rs.gf.ALogTbl[d-1+rs.gf.Base]}))
+			rs.polynomes = append(rs.polynomes, next)
+			last = next
+		}
+	}
+	return rs.polynomes[degree]
+}
+
+func (rs *ReedSolomonEncoder) Encode(data []int, eccCount int) []int {
+	generator := rs.getPolynomial(eccCount)
+	info := NewGFPoly(rs.gf, data)
+	info = info.MultByMonominal(eccCount, 1)
+	_, remainder := info.Divide(generator)
+
+	result := make([]int, eccCount)
+	numZero := int(eccCount) - len(remainder.Coefficients)
+	copy(result[numZero:], remainder.Coefficients)
+	return result
+}

+ 19 - 0
vendor/github.com/boombuler/barcode/utils/runeint.go

@@ -0,0 +1,19 @@
+package utils
+
+// RuneToInt converts a rune between '0' and '9' to an integer between 0 and 9
+// If the rune is outside of this range -1 is returned.
+func RuneToInt(r rune) int {
+	if r >= '0' && r <= '9' {
+		return int(r - '0')
+	}
+	return -1
+}
+
+// IntToRune converts a digit 0 - 9 to the rune '0' - '9'. If the given int is outside
+// of this range 'F' is returned!
+func IntToRune(i int) rune {
+	if i >= 0 && i <= 9 {
+		return rune(i + '0')
+	}
+	return 'F'
+}

+ 202 - 0
vendor/github.com/bradfitz/gomemcache/LICENSE

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 687 - 0
vendor/github.com/bradfitz/gomemcache/memcache/memcache.go

@@ -0,0 +1,687 @@
+/*
+Copyright 2011 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package memcache provides a client for the memcached cache server.
+package memcache
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+// Similar to:
+// https://godoc.org/google.golang.org/appengine/memcache
+
+var (
+	// ErrCacheMiss means that a Get failed because the item wasn't present.
+	ErrCacheMiss = errors.New("memcache: cache miss")
+
+	// ErrCASConflict means that a CompareAndSwap call failed due to the
+	// cached value being modified between the Get and the CompareAndSwap.
+	// If the cached value was simply evicted rather than replaced,
+	// ErrNotStored will be returned instead.
+	ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
+
+	// ErrNotStored means that a conditional write operation (i.e. Add or
+	// CompareAndSwap) failed because the condition was not satisfied.
+	ErrNotStored = errors.New("memcache: item not stored")
+
+	// ErrServer means that a server error occurred.
+	ErrServerError = errors.New("memcache: server error")
+
+	// ErrNoStats means that no statistics were available.
+	ErrNoStats = errors.New("memcache: no statistics available")
+
+	// ErrMalformedKey is returned when an invalid key is used.
+	// Keys must be at maximum 250 bytes long and not
+	// contain whitespace or control characters.
+	ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters")
+
+	// ErrNoServers is returned when no servers are configured or available.
+	ErrNoServers = errors.New("memcache: no servers configured or available")
+)
+
+const (
+	// DefaultTimeout is the default socket read/write timeout.
+	DefaultTimeout = 100 * time.Millisecond
+
+	// DefaultMaxIdleConns is the default maximum number of idle connections
+	// kept for any single address.
+	DefaultMaxIdleConns = 2
+)
+
+const buffered = 8 // arbitrary buffered channel size, for readability
+
+// resumableError returns true if err is only a protocol-level cache error.
+// This is used to determine whether or not a server connection should
+// be re-used or not. If an error occurs, by default we don't reuse the
+// connection, unless it was just a cache error.
+func resumableError(err error) bool {
+	switch err {
+	case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey:
+		return true
+	}
+	return false
+}
+
+func legalKey(key string) bool {
+	if len(key) > 250 {
+		return false
+	}
+	for i := 0; i < len(key); i++ {
+		if key[i] <= ' ' || key[i] == 0x7f {
+			return false
+		}
+	}
+	return true
+}
+
+var (
+	crlf            = []byte("\r\n")
+	space           = []byte(" ")
+	resultOK        = []byte("OK\r\n")
+	resultStored    = []byte("STORED\r\n")
+	resultNotStored = []byte("NOT_STORED\r\n")
+	resultExists    = []byte("EXISTS\r\n")
+	resultNotFound  = []byte("NOT_FOUND\r\n")
+	resultDeleted   = []byte("DELETED\r\n")
+	resultEnd       = []byte("END\r\n")
+	resultOk        = []byte("OK\r\n")
+	resultTouched   = []byte("TOUCHED\r\n")
+
+	resultClientErrorPrefix = []byte("CLIENT_ERROR ")
+)
+
+// New returns a memcache client using the provided server(s)
+// with equal weight. If a server is listed multiple times,
+// it gets a proportional amount of weight.
+func New(server ...string) *Client {
+	ss := new(ServerList)
+	ss.SetServers(server...)
+	return NewFromSelector(ss)
+}
+
+// NewFromSelector returns a new Client using the provided ServerSelector.
+func NewFromSelector(ss ServerSelector) *Client {
+	return &Client{selector: ss}
+}
+
+// Client is a memcache client.
+// It is safe for unlocked use by multiple concurrent goroutines.
+type Client struct {
+	// Timeout specifies the socket read/write timeout.
+	// If zero, DefaultTimeout is used.
+	Timeout time.Duration
+
+	// MaxIdleConns specifies the maximum number of idle connections that will
+	// be maintained per address. If less than one, DefaultMaxIdleConns will be
+	// used.
+	//
+	// Consider your expected traffic rates and latency carefully. This should
+	// be set to a number higher than your peak parallel requests.
+	MaxIdleConns int
+
+	selector ServerSelector
+
+	lk       sync.Mutex
+	freeconn map[string][]*conn
+}
+
+// Item is an item to be got or stored in a memcached server.
+type Item struct {
+	// Key is the Item's key (250 bytes maximum).
+	Key string
+
+	// Value is the Item's value.
+	Value []byte
+
+	// Flags are server-opaque flags whose semantics are entirely
+	// up to the app.
+	Flags uint32
+
+	// Expiration is the cache expiration time, in seconds: either a relative
+	// time from now (up to 1 month), or an absolute Unix epoch time.
+	// Zero means the Item has no expiration time.
+	Expiration int32
+
+	// Compare and swap ID.
+	casid uint64
+}
+
+// conn is a connection to a server.
+type conn struct {
+	nc   net.Conn
+	rw   *bufio.ReadWriter
+	addr net.Addr
+	c    *Client
+}
+
+// release returns this connection back to the client's free pool
+func (cn *conn) release() {
+	cn.c.putFreeConn(cn.addr, cn)
+}
+
+func (cn *conn) extendDeadline() {
+	cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout()))
+}
+
+// condRelease releases this connection if the error pointed to by err
+// is nil (not an error) or is only a protocol level error (e.g. a
+// cache miss).  The purpose is to not recycle TCP connections that
+// are bad.
+func (cn *conn) condRelease(err *error) {
+	if *err == nil || resumableError(*err) {
+		cn.release()
+	} else {
+		cn.nc.Close()
+	}
+}
+
+func (c *Client) putFreeConn(addr net.Addr, cn *conn) {
+	c.lk.Lock()
+	defer c.lk.Unlock()
+	if c.freeconn == nil {
+		c.freeconn = make(map[string][]*conn)
+	}
+	freelist := c.freeconn[addr.String()]
+	if len(freelist) >= c.maxIdleConns() {
+		cn.nc.Close()
+		return
+	}
+	c.freeconn[addr.String()] = append(freelist, cn)
+}
+
+func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) {
+	c.lk.Lock()
+	defer c.lk.Unlock()
+	if c.freeconn == nil {
+		return nil, false
+	}
+	freelist, ok := c.freeconn[addr.String()]
+	if !ok || len(freelist) == 0 {
+		return nil, false
+	}
+	cn = freelist[len(freelist)-1]
+	c.freeconn[addr.String()] = freelist[:len(freelist)-1]
+	return cn, true
+}
+
+func (c *Client) netTimeout() time.Duration {
+	if c.Timeout != 0 {
+		return c.Timeout
+	}
+	return DefaultTimeout
+}
+
+func (c *Client) maxIdleConns() int {
+	if c.MaxIdleConns > 0 {
+		return c.MaxIdleConns
+	}
+	return DefaultMaxIdleConns
+}
+
+// ConnectTimeoutError is the error type used when it takes
+// too long to connect to the desired host. This level of
+// detail can generally be ignored.
+type ConnectTimeoutError struct {
+	Addr net.Addr
+}
+
+func (cte *ConnectTimeoutError) Error() string {
+	return "memcache: connect timeout to " + cte.Addr.String()
+}
+
+func (c *Client) dial(addr net.Addr) (net.Conn, error) {
+	type connError struct {
+		cn  net.Conn
+		err error
+	}
+
+	nc, err := net.DialTimeout(addr.Network(), addr.String(), c.netTimeout())
+	if err == nil {
+		return nc, nil
+	}
+
+	if ne, ok := err.(net.Error); ok && ne.Timeout() {
+		return nil, &ConnectTimeoutError{addr}
+	}
+
+	return nil, err
+}
+
+func (c *Client) getConn(addr net.Addr) (*conn, error) {
+	cn, ok := c.getFreeConn(addr)
+	if ok {
+		cn.extendDeadline()
+		return cn, nil
+	}
+	nc, err := c.dial(addr)
+	if err != nil {
+		return nil, err
+	}
+	cn = &conn{
+		nc:   nc,
+		addr: addr,
+		rw:   bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)),
+		c:    c,
+	}
+	cn.extendDeadline()
+	return cn, nil
+}
+
+func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error {
+	addr, err := c.selector.PickServer(item.Key)
+	if err != nil {
+		return err
+	}
+	cn, err := c.getConn(addr)
+	if err != nil {
+		return err
+	}
+	defer cn.condRelease(&err)
+	if err = fn(c, cn.rw, item); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *Client) FlushAll() error {
+	return c.selector.Each(c.flushAllFromAddr)
+}
+
+// Get gets the item for the given key. ErrCacheMiss is returned for a
+// memcache cache miss. The key must be at most 250 bytes in length.
+func (c *Client) Get(key string) (item *Item, err error) {
+	err = c.withKeyAddr(key, func(addr net.Addr) error {
+		return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it })
+	})
+	if err == nil && item == nil {
+		err = ErrCacheMiss
+	}
+	return
+}
+
+// Touch updates the expiry for the given key. The seconds parameter is either
+// a Unix timestamp or, if seconds is less than 1 month, the number of seconds
+// into the future at which time the item will expire. Zero means the item has
+// no expiration time. ErrCacheMiss is returned if the key is not in the cache.
+// The key must be at most 250 bytes in length.
+func (c *Client) Touch(key string, seconds int32) (err error) {
+	return c.withKeyAddr(key, func(addr net.Addr) error {
+		return c.touchFromAddr(addr, []string{key}, seconds)
+	})
+}
+
+func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) {
+	if !legalKey(key) {
+		return ErrMalformedKey
+	}
+	addr, err := c.selector.PickServer(key)
+	if err != nil {
+		return err
+	}
+	return fn(addr)
+}
+
+func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) {
+	cn, err := c.getConn(addr)
+	if err != nil {
+		return err
+	}
+	defer cn.condRelease(&err)
+	return fn(cn.rw)
+}
+
+func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error {
+	return c.withKeyAddr(key, func(addr net.Addr) error {
+		return c.withAddrRw(addr, fn)
+	})
+}
+
+func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error {
+	return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+		if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil {
+			return err
+		}
+		if err := rw.Flush(); err != nil {
+			return err
+		}
+		if err := parseGetResponse(rw.Reader, cb); err != nil {
+			return err
+		}
+		return nil
+	})
+}
+
+// flushAllFromAddr send the flush_all command to the given addr
+func (c *Client) flushAllFromAddr(addr net.Addr) error {
+	return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+		if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil {
+			return err
+		}
+		if err := rw.Flush(); err != nil {
+			return err
+		}
+		line, err := rw.ReadSlice('\n')
+		if err != nil {
+			return err
+		}
+		switch {
+		case bytes.Equal(line, resultOk):
+			break
+		default:
+			return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line))
+		}
+		return nil
+	})
+}
+
+func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error {
+	return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+		for _, key := range keys {
+			if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil {
+				return err
+			}
+			if err := rw.Flush(); err != nil {
+				return err
+			}
+			line, err := rw.ReadSlice('\n')
+			if err != nil {
+				return err
+			}
+			switch {
+			case bytes.Equal(line, resultTouched):
+				break
+			case bytes.Equal(line, resultNotFound):
+				return ErrCacheMiss
+			default:
+				return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line))
+			}
+		}
+		return nil
+	})
+}
+
+// GetMulti is a batch version of Get. The returned map from keys to
+// items may have fewer elements than the input slice, due to memcache
+// cache misses. Each key must be at most 250 bytes in length.
+// If no error is returned, the returned map will also be non-nil.
+func (c *Client) GetMulti(keys []string) (map[string]*Item, error) {
+	var lk sync.Mutex
+	m := make(map[string]*Item)
+	addItemToMap := func(it *Item) {
+		lk.Lock()
+		defer lk.Unlock()
+		m[it.Key] = it
+	}
+
+	keyMap := make(map[net.Addr][]string)
+	for _, key := range keys {
+		if !legalKey(key) {
+			return nil, ErrMalformedKey
+		}
+		addr, err := c.selector.PickServer(key)
+		if err != nil {
+			return nil, err
+		}
+		keyMap[addr] = append(keyMap[addr], key)
+	}
+
+	ch := make(chan error, buffered)
+	for addr, keys := range keyMap {
+		go func(addr net.Addr, keys []string) {
+			ch <- c.getFromAddr(addr, keys, addItemToMap)
+		}(addr, keys)
+	}
+
+	var err error
+	for _ = range keyMap {
+		if ge := <-ch; ge != nil {
+			err = ge
+		}
+	}
+	return m, err
+}
+
+// parseGetResponse reads a GET response from r and calls cb for each
+// read and allocated Item
+func parseGetResponse(r *bufio.Reader, cb func(*Item)) error {
+	for {
+		line, err := r.ReadSlice('\n')
+		if err != nil {
+			return err
+		}
+		if bytes.Equal(line, resultEnd) {
+			return nil
+		}
+		it := new(Item)
+		size, err := scanGetResponseLine(line, it)
+		if err != nil {
+			return err
+		}
+		it.Value = make([]byte, size+2)
+		_, err = io.ReadFull(r, it.Value)
+		if err != nil {
+			it.Value = nil
+			return err
+		}
+		if !bytes.HasSuffix(it.Value, crlf) {
+			it.Value = nil
+			return fmt.Errorf("memcache: corrupt get result read")
+		}
+		it.Value = it.Value[:size]
+		cb(it)
+	}
+}
+
+// scanGetResponseLine populates it and returns the declared size of the item.
+// It does not read the bytes of the item.
+func scanGetResponseLine(line []byte, it *Item) (size int, err error) {
+	pattern := "VALUE %s %d %d %d\r\n"
+	dest := []interface{}{&it.Key, &it.Flags, &size, &it.casid}
+	if bytes.Count(line, space) == 3 {
+		pattern = "VALUE %s %d %d\r\n"
+		dest = dest[:3]
+	}
+	n, err := fmt.Sscanf(string(line), pattern, dest...)
+	if err != nil || n != len(dest) {
+		return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line)
+	}
+	return size, nil
+}
+
+// Set writes the given item, unconditionally.
+func (c *Client) Set(item *Item) error {
+	return c.onItem(item, (*Client).set)
+}
+
+func (c *Client) set(rw *bufio.ReadWriter, item *Item) error {
+	return c.populateOne(rw, "set", item)
+}
+
+// Add writes the given item, if no value already exists for its
+// key. ErrNotStored is returned if that condition is not met.
+func (c *Client) Add(item *Item) error {
+	return c.onItem(item, (*Client).add)
+}
+
+func (c *Client) add(rw *bufio.ReadWriter, item *Item) error {
+	return c.populateOne(rw, "add", item)
+}
+
+// Replace writes the given item, but only if the server *does*
+// already hold data for this key
+func (c *Client) Replace(item *Item) error {
+	return c.onItem(item, (*Client).replace)
+}
+
+func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error {
+	return c.populateOne(rw, "replace", item)
+}
+
+// CompareAndSwap writes the given item that was previously returned
+// by Get, if the value was neither modified or evicted between the
+// Get and the CompareAndSwap calls. The item's Key should not change
+// between calls but all other item fields may differ. ErrCASConflict
+// is returned if the value was modified in between the
+// calls. ErrNotStored is returned if the value was evicted in between
+// the calls.
+func (c *Client) CompareAndSwap(item *Item) error {
+	return c.onItem(item, (*Client).cas)
+}
+
+func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error {
+	return c.populateOne(rw, "cas", item)
+}
+
+func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error {
+	if !legalKey(item.Key) {
+		return ErrMalformedKey
+	}
+	var err error
+	if verb == "cas" {
+		_, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n",
+			verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid)
+	} else {
+		_, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n",
+			verb, item.Key, item.Flags, item.Expiration, len(item.Value))
+	}
+	if err != nil {
+		return err
+	}
+	if _, err = rw.Write(item.Value); err != nil {
+		return err
+	}
+	if _, err := rw.Write(crlf); err != nil {
+		return err
+	}
+	if err := rw.Flush(); err != nil {
+		return err
+	}
+	line, err := rw.ReadSlice('\n')
+	if err != nil {
+		return err
+	}
+	switch {
+	case bytes.Equal(line, resultStored):
+		return nil
+	case bytes.Equal(line, resultNotStored):
+		return ErrNotStored
+	case bytes.Equal(line, resultExists):
+		return ErrCASConflict
+	case bytes.Equal(line, resultNotFound):
+		return ErrCacheMiss
+	}
+	return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line))
+}
+
+func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) {
+	_, err := fmt.Fprintf(rw, format, args...)
+	if err != nil {
+		return nil, err
+	}
+	if err := rw.Flush(); err != nil {
+		return nil, err
+	}
+	line, err := rw.ReadSlice('\n')
+	return line, err
+}
+
+func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error {
+	line, err := writeReadLine(rw, format, args...)
+	if err != nil {
+		return err
+	}
+	switch {
+	case bytes.Equal(line, resultOK):
+		return nil
+	case bytes.Equal(line, expect):
+		return nil
+	case bytes.Equal(line, resultNotStored):
+		return ErrNotStored
+	case bytes.Equal(line, resultExists):
+		return ErrCASConflict
+	case bytes.Equal(line, resultNotFound):
+		return ErrCacheMiss
+	}
+	return fmt.Errorf("memcache: unexpected response line: %q", string(line))
+}
+
+// Delete deletes the item with the provided key. The error ErrCacheMiss is
+// returned if the item didn't already exist in the cache.
+func (c *Client) Delete(key string) error {
+	return c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
+		return writeExpectf(rw, resultDeleted, "delete %s\r\n", key)
+	})
+}
+
+// DeleteAll deletes all items in the cache.
+func (c *Client) DeleteAll() error {
+	return c.withKeyRw("", func(rw *bufio.ReadWriter) error {
+		return writeExpectf(rw, resultDeleted, "flush_all\r\n")
+	})
+}
+
+// Increment atomically increments key by delta. The return value is
+// the new value after being incremented or an error. If the value
+// didn't exist in memcached the error is ErrCacheMiss. The value in
+// memcached must be an decimal number, or an error will be returned.
+// On 64-bit overflow, the new value wraps around.
+func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) {
+	return c.incrDecr("incr", key, delta)
+}
+
+// Decrement atomically decrements key by delta. The return value is
+// the new value after being decremented or an error. If the value
+// didn't exist in memcached the error is ErrCacheMiss. The value in
+// memcached must be an decimal number, or an error will be returned.
+// On underflow, the new value is capped at zero and does not wrap
+// around.
+func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) {
+	return c.incrDecr("decr", key, delta)
+}
+
+func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) {
+	var val uint64
+	err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
+		line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta)
+		if err != nil {
+			return err
+		}
+		switch {
+		case bytes.Equal(line, resultNotFound):
+			return ErrCacheMiss
+		case bytes.HasPrefix(line, resultClientErrorPrefix):
+			errMsg := line[len(resultClientErrorPrefix) : len(line)-2]
+			return errors.New("memcache: client error: " + string(errMsg))
+		}
+		val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64)
+		if err != nil {
+			return err
+		}
+		return nil
+	})
+	return val, err
+}

+ 129 - 0
vendor/github.com/bradfitz/gomemcache/memcache/selector.go

@@ -0,0 +1,129 @@
+/*
+Copyright 2011 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package memcache
+
+import (
+	"hash/crc32"
+	"net"
+	"strings"
+	"sync"
+)
+
+// ServerSelector is the interface that selects a memcache server
+// as a function of the item's key.
+//
+// All ServerSelector implementations must be safe for concurrent use
+// by multiple goroutines.
+type ServerSelector interface {
+	// PickServer returns the server address that a given item
+	// should be shared onto.
+	PickServer(key string) (net.Addr, error)
+	Each(func(net.Addr) error) error
+}
+
+// ServerList is a simple ServerSelector. Its zero value is usable.
+type ServerList struct {
+	mu    sync.RWMutex
+	addrs []net.Addr
+}
+
+// staticAddr caches the Network() and String() values from any net.Addr.
+type staticAddr struct {
+	ntw, str string
+}
+
+func newStaticAddr(a net.Addr) net.Addr {
+	return &staticAddr{
+		ntw: a.Network(),
+		str: a.String(),
+	}
+}
+
+func (s *staticAddr) Network() string { return s.ntw }
+func (s *staticAddr) String() string  { return s.str }
+
+// SetServers changes a ServerList's set of servers at runtime and is
+// safe for concurrent use by multiple goroutines.
+//
+// Each server is given equal weight. A server is given more weight
+// if it's listed multiple times.
+//
+// SetServers returns an error if any of the server names fail to
+// resolve. No attempt is made to connect to the server. If any error
+// is returned, no changes are made to the ServerList.
+func (ss *ServerList) SetServers(servers ...string) error {
+	naddr := make([]net.Addr, len(servers))
+	for i, server := range servers {
+		if strings.Contains(server, "/") {
+			addr, err := net.ResolveUnixAddr("unix", server)
+			if err != nil {
+				return err
+			}
+			naddr[i] = newStaticAddr(addr)
+		} else {
+			tcpaddr, err := net.ResolveTCPAddr("tcp", server)
+			if err != nil {
+				return err
+			}
+			naddr[i] = newStaticAddr(tcpaddr)
+		}
+	}
+
+	ss.mu.Lock()
+	defer ss.mu.Unlock()
+	ss.addrs = naddr
+	return nil
+}
+
+// Each iterates over each server calling the given function
+func (ss *ServerList) Each(f func(net.Addr) error) error {
+	ss.mu.RLock()
+	defer ss.mu.RUnlock()
+	for _, a := range ss.addrs {
+		if err := f(a); nil != err {
+			return err
+		}
+	}
+	return nil
+}
+
+// keyBufPool returns []byte buffers for use by PickServer's call to
+// crc32.ChecksumIEEE to avoid allocations. (but doesn't avoid the
+// copies, which at least are bounded in size and small)
+var keyBufPool = sync.Pool{
+	New: func() interface{} {
+		b := make([]byte, 256)
+		return &b
+	},
+}
+
+func (ss *ServerList) PickServer(key string) (net.Addr, error) {
+	ss.mu.RLock()
+	defer ss.mu.RUnlock()
+	if len(ss.addrs) == 0 {
+		return nil, ErrNoServers
+	}
+	if len(ss.addrs) == 1 {
+		return ss.addrs[0], nil
+	}
+	bufp := keyBufPool.Get().(*[]byte)
+	n := copy(*bufp, key)
+	cs := crc32.ChecksumIEEE((*bufp)[:n])
+	keyBufPool.Put(bufp)
+
+	return ss.addrs[cs%uint32(len(ss.addrs))], nil
+}

+ 8 - 0
vendor/github.com/cespare/xxhash/v2/.travis.yml

@@ -0,0 +1,8 @@
+language: go
+go:
+  - "1.x"
+  - master
+env:
+  - TAGS=""
+  - TAGS="-tags purego"
+script: go test $TAGS -v ./...

+ 22 - 0
vendor/github.com/cespare/xxhash/v2/LICENSE.txt

@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 55 - 0
vendor/github.com/cespare/xxhash/v2/README.md

@@ -0,0 +1,55 @@
+# xxhash
+
+[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
+[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
+
+xxhash is a Go implementation of the 64-bit
+[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+    func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+This implementation provides a fast pure-Go implementation and an even faster
+assembly implementation for amd64.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| --- | --- | --- |
+| 5 B   |  979.66 MB/s |  1291.17 MB/s  |
+| 100 B | 7475.26 MB/s | 7973.40 MB/s  |
+| 4 KB  | 17573.46 MB/s | 17602.65 MB/s |
+| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+
+These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
+the following commands under Go 1.11.2:
+
+```
+$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
+$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [FreeCache](https://github.com/coocood/freecache)

+ 3 - 0
vendor/github.com/cespare/xxhash/v2/go.mod

@@ -0,0 +1,3 @@
+module github.com/cespare/xxhash/v2
+
+go 1.13

+ 0 - 0
vendor/github.com/cespare/xxhash/v2/go.sum


+ 236 - 0
vendor/github.com/cespare/xxhash/v2/xxhash.go

@@ -0,0 +1,236 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+package xxhash
+
+import (
+	"encoding/binary"
+	"errors"
+	"math/bits"
+)
+
+const (
+	prime1 uint64 = 11400714785074694791
+	prime2 uint64 = 14029467366897019727
+	prime3 uint64 = 1609587929392839161
+	prime4 uint64 = 9650029242287828579
+	prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var (
+	prime1v = prime1
+	prime2v = prime2
+	prime3v = prime3
+	prime4v = prime4
+	prime5v = prime5
+)
+
+// Digest implements hash.Hash64.
+type Digest struct {
+	v1    uint64
+	v2    uint64
+	v3    uint64
+	v4    uint64
+	total uint64
+	mem   [32]byte
+	n     int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+	var d Digest
+	d.Reset()
+	return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+	d.v1 = prime1v + prime2
+	d.v2 = prime2
+	d.v3 = 0
+	d.v4 = -prime1v
+	d.total = 0
+	d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+	n = len(b)
+	d.total += uint64(n)
+
+	if d.n+n < 32 {
+		// This new data doesn't even fill the current block.
+		copy(d.mem[d.n:], b)
+		d.n += n
+		return
+	}
+
+	if d.n > 0 {
+		// Finish off the partial block.
+		copy(d.mem[d.n:], b)
+		d.v1 = round(d.v1, u64(d.mem[0:8]))
+		d.v2 = round(d.v2, u64(d.mem[8:16]))
+		d.v3 = round(d.v3, u64(d.mem[16:24]))
+		d.v4 = round(d.v4, u64(d.mem[24:32]))
+		b = b[32-d.n:]
+		d.n = 0
+	}
+
+	if len(b) >= 32 {
+		// One or more full blocks left.
+		nw := writeBlocks(d, b)
+		b = b[nw:]
+	}
+
+	// Store any remaining partial block.
+	copy(d.mem[:], b)
+	d.n = len(b)
+
+	return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+	s := d.Sum64()
+	return append(
+		b,
+		byte(s>>56),
+		byte(s>>48),
+		byte(s>>40),
+		byte(s>>32),
+		byte(s>>24),
+		byte(s>>16),
+		byte(s>>8),
+		byte(s),
+	)
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+	var h uint64
+
+	if d.total >= 32 {
+		v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = d.v3 + prime5
+	}
+
+	h += d.total
+
+	i, end := 0, d.n
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(d.mem[i:i+8]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(d.mem[i:i+4])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for i < end {
+		h ^= uint64(d.mem[i]) * prime5
+		h = rol11(h) * prime1
+		i++
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+const (
+	magic         = "xxh\x06"
+	marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+	b := make([]byte, 0, marshaledSize)
+	b = append(b, magic...)
+	b = appendUint64(b, d.v1)
+	b = appendUint64(b, d.v2)
+	b = appendUint64(b, d.v3)
+	b = appendUint64(b, d.v4)
+	b = appendUint64(b, d.total)
+	b = append(b, d.mem[:d.n]...)
+	b = b[:len(b)+len(d.mem)-d.n]
+	return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+	if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+		return errors.New("xxhash: invalid hash state identifier")
+	}
+	if len(b) != marshaledSize {
+		return errors.New("xxhash: invalid hash state size")
+	}
+	b = b[len(magic):]
+	b, d.v1 = consumeUint64(b)
+	b, d.v2 = consumeUint64(b)
+	b, d.v3 = consumeUint64(b)
+	b, d.v4 = consumeUint64(b)
+	b, d.total = consumeUint64(b)
+	copy(d.mem[:], b)
+	b = b[len(d.mem):]
+	d.n = int(d.total % uint64(len(d.mem)))
+	return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+	var a [8]byte
+	binary.LittleEndian.PutUint64(a[:], x)
+	return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+	x := u64(b)
+	return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+	acc += input * prime2
+	acc = rol31(acc)
+	acc *= prime1
+	return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+	val = round(0, val)
+	acc ^= val
+	acc = acc*prime1 + prime4
+	return acc
+}
+
+func rol1(x uint64) uint64  { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64  { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }

Some files were not shown because too many files changed in this diff