sum_s390x.s 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. // Copyright 2018 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. //go:build gc && !purego
  5. // +build gc,!purego
  6. #include "textflag.h"
  7. // This implementation of Poly1305 uses the vector facility (vx)
  8. // to process up to 2 blocks (32 bytes) per iteration using an
  9. // algorithm based on the one described in:
  10. //
  11. // NEON crypto, Daniel J. Bernstein & Peter Schwabe
  12. // https://cryptojedi.org/papers/neoncrypto-20120320.pdf
  13. //
  14. // This algorithm uses 5 26-bit limbs to represent a 130-bit
  15. // value. These limbs are, for the most part, zero extended and
  16. // placed into 64-bit vector register elements. Each vector
  17. // register is 128-bits wide and so holds 2 of these elements.
  18. // Using 26-bit limbs allows us plenty of headroom to accommodate
  19. // accumulations before and after multiplication without
  20. // overflowing either 32-bits (before multiplication) or 64-bits
  21. // (after multiplication).
  22. //
  23. // In order to parallelise the operations required to calculate
  24. // the sum we use two separate accumulators and then sum those
  25. // in an extra final step. For compatibility with the generic
  26. // implementation we perform this summation at the end of every
  27. // updateVX call.
  28. //
  29. // To use two accumulators we must multiply the message blocks
  30. // by r² rather than r. Only the final message block should be
  31. // multiplied by r.
  32. //
  33. // Example:
  34. //
  35. // We want to calculate the sum (h) for a 64 byte message (m):
  36. //
  37. // h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r
  38. //
  39. // To do this we split the calculation into the even indices
  40. // and odd indices of the message. These form our SIMD 'lanes':
  41. //
  42. // h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0
  43. // m[16:32]r³ + m[48:64]r <- lane 1
  44. //
  45. // To calculate this iteratively we refactor so that both lanes
  46. // are written in terms of r² and r:
  47. //
  48. // h = (m[ 0:16]r² + m[32:48])r² + <- lane 0
  49. // (m[16:32]r² + m[48:64])r <- lane 1
  50. // ^ ^
  51. // | coefficients for second iteration
  52. // coefficients for first iteration
  53. //
  54. // So in this case we would have two iterations. In the first
  55. // both lanes are multiplied by r². In the second only the
  56. // first lane is multiplied by r² and the second lane is
  57. // instead multiplied by r. This gives use the odd and even
  58. // powers of r that we need from the original equation.
  59. //
  60. // Notation:
  61. //
  62. // h - accumulator
  63. // r - key
  64. // m - message
  65. //
  66. // [a, b] - SIMD register holding two 64-bit values
  67. // [a, b, c, d] - SIMD register holding four 32-bit values
  68. // xᵢ[n] - limb n of variable x with bit width i
  69. //
  70. // Limbs are expressed in little endian order, so for 26-bit
  71. // limbs x₂₆[4] will be the most significant limb and x₂₆[0]
  72. // will be the least significant limb.
  73. // masking constants
  74. #define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits
  75. #define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits
  76. // expansion constants (see EXPAND macro)
  77. #define EX0 V2
  78. #define EX1 V3
  79. #define EX2 V4
  80. // key (r², r or 1 depending on context)
  81. #define R_0 V5
  82. #define R_1 V6
  83. #define R_2 V7
  84. #define R_3 V8
  85. #define R_4 V9
  86. // precalculated coefficients (5r², 5r or 0 depending on context)
  87. #define R5_1 V10
  88. #define R5_2 V11
  89. #define R5_3 V12
  90. #define R5_4 V13
  91. // message block (m)
  92. #define M_0 V14
  93. #define M_1 V15
  94. #define M_2 V16
  95. #define M_3 V17
  96. #define M_4 V18
  97. // accumulator (h)
  98. #define H_0 V19
  99. #define H_1 V20
  100. #define H_2 V21
  101. #define H_3 V22
  102. #define H_4 V23
  103. // temporary registers (for short-lived values)
  104. #define T_0 V24
  105. #define T_1 V25
  106. #define T_2 V26
  107. #define T_3 V27
  108. #define T_4 V28
  109. GLOBL ·constants<>(SB), RODATA, $0x30
  110. // EX0
  111. DATA ·constants<>+0x00(SB)/8, $0x0006050403020100
  112. DATA ·constants<>+0x08(SB)/8, $0x1016151413121110
  113. // EX1
  114. DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706
  115. DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716
  116. // EX2
  117. DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d
  118. DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d
  119. // MULTIPLY multiplies each lane of f and g, partially reduced
  120. // modulo 2¹³⁰ - 5. The result, h, consists of partial products
  121. // in each lane that need to be reduced further to produce the
  122. // final result.
  123. //
  124. // h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰
  125. //
  126. // Note that the multiplication by 5 of the high bits is
  127. // achieved by precalculating the multiplication of four of the
  128. // g coefficients by 5. These are g51-g54.
  129. #define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \
  130. VMLOF f0, g0, h0 \
  131. VMLOF f0, g3, h3 \
  132. VMLOF f0, g1, h1 \
  133. VMLOF f0, g4, h4 \
  134. VMLOF f0, g2, h2 \
  135. VMLOF f1, g54, T_0 \
  136. VMLOF f1, g2, T_3 \
  137. VMLOF f1, g0, T_1 \
  138. VMLOF f1, g3, T_4 \
  139. VMLOF f1, g1, T_2 \
  140. VMALOF f2, g53, h0, h0 \
  141. VMALOF f2, g1, h3, h3 \
  142. VMALOF f2, g54, h1, h1 \
  143. VMALOF f2, g2, h4, h4 \
  144. VMALOF f2, g0, h2, h2 \
  145. VMALOF f3, g52, T_0, T_0 \
  146. VMALOF f3, g0, T_3, T_3 \
  147. VMALOF f3, g53, T_1, T_1 \
  148. VMALOF f3, g1, T_4, T_4 \
  149. VMALOF f3, g54, T_2, T_2 \
  150. VMALOF f4, g51, h0, h0 \
  151. VMALOF f4, g54, h3, h3 \
  152. VMALOF f4, g52, h1, h1 \
  153. VMALOF f4, g0, h4, h4 \
  154. VMALOF f4, g53, h2, h2 \
  155. VAG T_0, h0, h0 \
  156. VAG T_3, h3, h3 \
  157. VAG T_1, h1, h1 \
  158. VAG T_4, h4, h4 \
  159. VAG T_2, h2, h2
  160. // REDUCE performs the following carry operations in four
  161. // stages, as specified in Bernstein & Schwabe:
  162. //
  163. // 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4]
  164. // 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0]
  165. // 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3]
  166. // 4: h₂₆[3]->h₂₆[4]
  167. //
  168. // The result is that all of the limbs are limited to 26-bits
  169. // except for h₂₆[1] and h₂₆[4] which are limited to 27-bits.
  170. //
  171. // Note that although each limb is aligned at 26-bit intervals
  172. // they may contain values that exceed 2²⁶ - 1, hence the need
  173. // to carry the excess bits in each limb.
  174. #define REDUCE(h0, h1, h2, h3, h4) \
  175. VESRLG $26, h0, T_0 \
  176. VESRLG $26, h3, T_1 \
  177. VN MOD26, h0, h0 \
  178. VN MOD26, h3, h3 \
  179. VAG T_0, h1, h1 \
  180. VAG T_1, h4, h4 \
  181. VESRLG $26, h1, T_2 \
  182. VESRLG $26, h4, T_3 \
  183. VN MOD26, h1, h1 \
  184. VN MOD26, h4, h4 \
  185. VESLG $2, T_3, T_4 \
  186. VAG T_3, T_4, T_4 \
  187. VAG T_2, h2, h2 \
  188. VAG T_4, h0, h0 \
  189. VESRLG $26, h2, T_0 \
  190. VESRLG $26, h0, T_1 \
  191. VN MOD26, h2, h2 \
  192. VN MOD26, h0, h0 \
  193. VAG T_0, h3, h3 \
  194. VAG T_1, h1, h1 \
  195. VESRLG $26, h3, T_2 \
  196. VN MOD26, h3, h3 \
  197. VAG T_2, h4, h4
  198. // EXPAND splits the 128-bit little-endian values in0 and in1
  199. // into 26-bit big-endian limbs and places the results into
  200. // the first and second lane of d₂₆[0:4] respectively.
  201. //
  202. // The EX0, EX1 and EX2 constants are arrays of byte indices
  203. // for permutation. The permutation both reverses the bytes
  204. // in the input and ensures the bytes are copied into the
  205. // destination limb ready to be shifted into their final
  206. // position.
  207. #define EXPAND(in0, in1, d0, d1, d2, d3, d4) \
  208. VPERM in0, in1, EX0, d0 \
  209. VPERM in0, in1, EX1, d2 \
  210. VPERM in0, in1, EX2, d4 \
  211. VESRLG $26, d0, d1 \
  212. VESRLG $30, d2, d3 \
  213. VESRLG $4, d2, d2 \
  214. VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]]
  215. VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]]
  216. VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]]
  217. VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]]
  218. VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]]
  219. // func updateVX(state *macState, msg []byte)
  220. TEXT ·updateVX(SB), NOSPLIT, $0
  221. MOVD state+0(FP), R1
  222. LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len
  223. // load EX0, EX1 and EX2
  224. MOVD $·constants<>(SB), R5
  225. VLM (R5), EX0, EX2
  226. // generate masks
  227. VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff]
  228. VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff]
  229. // load h (accumulator) and r (key) from state
  230. VZERO T_1 // [0, 0]
  231. VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]]
  232. VLEG $0, 16(R1), T_1 // [h₆₄[2], 0]
  233. VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]]
  234. VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]]
  235. VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]]
  236. // unpack h and r into 26-bit limbs
  237. // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value
  238. VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]]
  239. VZERO H_1 // [0, 0]
  240. VZERO H_3 // [0, 0]
  241. VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out
  242. VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0]
  243. VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]]
  244. VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only
  245. VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]]
  246. VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only
  247. VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete
  248. VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete
  249. // replicate r across all 4 vector elements
  250. VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]]
  251. VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]]
  252. VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]]
  253. VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]]
  254. VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]]
  255. // zero out lane 1 of h
  256. VLEIG $1, $0, H_0 // [h₂₆[0], 0]
  257. VLEIG $1, $0, H_1 // [h₂₆[1], 0]
  258. VLEIG $1, $0, H_2 // [h₂₆[2], 0]
  259. VLEIG $1, $0, H_3 // [h₂₆[3], 0]
  260. VLEIG $1, $0, H_4 // [h₂₆[4], 0]
  261. // calculate 5r (ignore least significant limb)
  262. VREPIF $5, T_0
  263. VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]]
  264. VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]]
  265. VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]]
  266. VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]]
  267. // skip r² calculation if we are only calculating one block
  268. CMPBLE R3, $16, skip
  269. // calculate r²
  270. MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4)
  271. REDUCE(M_0, M_1, M_2, M_3, M_4)
  272. VGBM $0x0f0f, T_0
  273. VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]]
  274. VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]]
  275. VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]]
  276. VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]]
  277. VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]]
  278. // calculate 5r² (ignore least significant limb)
  279. VREPIF $5, T_0
  280. VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]]
  281. VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]]
  282. VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]]
  283. VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]]
  284. loop:
  285. CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients
  286. // load next 2 blocks from message
  287. VLM (R2), T_0, T_1
  288. // update message slice
  289. SUB $32, R3
  290. MOVD $32(R2), R2
  291. // unpack message blocks into 26-bit big-endian limbs
  292. EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
  293. // add 2¹²⁸ to each message block value
  294. VLEIB $4, $1, M_4
  295. VLEIB $12, $1, M_4
  296. multiply:
  297. // accumulate the incoming message
  298. VAG H_0, M_0, M_0
  299. VAG H_3, M_3, M_3
  300. VAG H_1, M_1, M_1
  301. VAG H_4, M_4, M_4
  302. VAG H_2, M_2, M_2
  303. // multiply the accumulator by the key coefficient
  304. MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4)
  305. // carry and partially reduce the partial products
  306. REDUCE(H_0, H_1, H_2, H_3, H_4)
  307. CMPBNE R3, $0, loop
  308. finish:
  309. // sum lane 0 and lane 1 and put the result in lane 1
  310. VZERO T_0
  311. VSUMQG H_0, T_0, H_0
  312. VSUMQG H_3, T_0, H_3
  313. VSUMQG H_1, T_0, H_1
  314. VSUMQG H_4, T_0, H_4
  315. VSUMQG H_2, T_0, H_2
  316. // reduce again after summation
  317. // TODO(mundaym): there might be a more efficient way to do this
  318. // now that we only have 1 active lane. For example, we could
  319. // simultaneously pack the values as we reduce them.
  320. REDUCE(H_0, H_1, H_2, H_3, H_4)
  321. // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1
  322. // TODO(mundaym): in testing this final carry was unnecessary.
  323. // Needs a proof before it can be removed though.
  324. VESRLG $26, H_1, T_1
  325. VN MOD26, H_1, H_1
  326. VAQ T_1, H_2, H_2
  327. VESRLG $26, H_2, T_2
  328. VN MOD26, H_2, H_2
  329. VAQ T_2, H_3, H_3
  330. VESRLG $26, H_3, T_3
  331. VN MOD26, H_3, H_3
  332. VAQ T_3, H_4, H_4
  333. // h is now < 2(2¹³⁰ - 5)
  334. // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1].
  335. VESLG $26, H_1, H_1
  336. VESLG $26, H_3, H_3
  337. VO H_0, H_1, H_0
  338. VO H_2, H_3, H_2
  339. VESLG $4, H_2, H_2
  340. VLEIB $7, $48, H_1
  341. VSLB H_1, H_2, H_2
  342. VO H_0, H_2, H_0
  343. VLEIB $7, $104, H_1
  344. VSLB H_1, H_4, H_3
  345. VO H_3, H_0, H_0
  346. VLEIB $7, $24, H_1
  347. VSRLB H_1, H_4, H_1
  348. // update state
  349. VSTEG $1, H_0, 0(R1)
  350. VSTEG $0, H_0, 8(R1)
  351. VSTEG $1, H_1, 16(R1)
  352. RET
  353. b2: // 2 or fewer blocks remaining
  354. CMPBLE R3, $16, b1
  355. // Load the 2 remaining blocks (17-32 bytes remaining).
  356. MOVD $-17(R3), R0 // index of final byte to load modulo 16
  357. VL (R2), T_0 // load full 16 byte block
  358. VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes
  359. // The Poly1305 algorithm requires that a 1 bit be appended to
  360. // each message block. If the final block is less than 16 bytes
  361. // long then it is easiest to insert the 1 before the message
  362. // block is split into 26-bit limbs. If, on the other hand, the
  363. // final message block is 16 bytes long then we append the 1 bit
  364. // after expansion as normal.
  365. MOVBZ $1, R0
  366. MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16)
  367. CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long
  368. VLVGB R3, R0, T_1 // insert 1 into the byte at index R3
  369. // Split both blocks into 26-bit limbs in the appropriate lanes.
  370. EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
  371. // Append a 1 byte to the end of the second to last block.
  372. VLEIB $4, $1, M_4
  373. // Append a 1 byte to the end of the last block only if it is a
  374. // full 16 byte block.
  375. CMPBNE R3, $16, 2(PC)
  376. VLEIB $12, $1, M_4
  377. // Finally, set up the coefficients for the final multiplication.
  378. // We have previously saved r and 5r in the 32-bit even indexes
  379. // of the R_[0-4] and R5_[1-4] coefficient registers.
  380. //
  381. // We want lane 0 to be multiplied by r² so that can be kept the
  382. // same. We want lane 1 to be multiplied by r so we need to move
  383. // the saved r value into the 32-bit odd index in lane 1 by
  384. // rotating the 64-bit lane by 32.
  385. VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only
  386. VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]]
  387. VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]]
  388. VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]]
  389. VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]]
  390. VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]]
  391. VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]]
  392. VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]]
  393. VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]]
  394. VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]]
  395. MOVD $0, R3
  396. BR multiply
  397. skip:
  398. CMPBEQ R3, $0, finish
  399. b1: // 1 block remaining
  400. // Load the final block (1-16 bytes). This will be placed into
  401. // lane 0.
  402. MOVD $-1(R3), R0
  403. VLL R0, (R2), T_0 // pad to 16 bytes with zeros
  404. // The Poly1305 algorithm requires that a 1 bit be appended to
  405. // each message block. If the final block is less than 16 bytes
  406. // long then it is easiest to insert the 1 before the message
  407. // block is split into 26-bit limbs. If, on the other hand, the
  408. // final message block is 16 bytes long then we append the 1 bit
  409. // after expansion as normal.
  410. MOVBZ $1, R0
  411. CMPBEQ R3, $16, 2(PC)
  412. VLVGB R3, R0, T_0
  413. // Set the message block in lane 1 to the value 0 so that it
  414. // can be accumulated without affecting the final result.
  415. VZERO T_1
  416. // Split the final message block into 26-bit limbs in lane 0.
  417. // Lane 1 will be contain 0.
  418. EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
  419. // Append a 1 byte to the end of the last block only if it is a
  420. // full 16 byte block.
  421. CMPBNE R3, $16, 2(PC)
  422. VLEIB $4, $1, M_4
  423. // We have previously saved r and 5r in the 32-bit even indexes
  424. // of the R_[0-4] and R5_[1-4] coefficient registers.
  425. //
  426. // We want lane 0 to be multiplied by r so we need to move the
  427. // saved r value into the 32-bit odd index in lane 0. We want
  428. // lane 1 to be set to the value 1. This makes multiplication
  429. // a no-op. We do this by setting lane 1 in every register to 0
  430. // and then just setting the 32-bit index 3 in R_0 to 1.
  431. VZERO T_0
  432. MOVD $0, R0
  433. MOVD $0x10111213, R12
  434. VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000]
  435. VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0]
  436. VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0]
  437. VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0]
  438. VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0]
  439. VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0]
  440. VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0]
  441. VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0]
  442. VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0]
  443. VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0]
  444. // Set the value of lane 1 to be 1.
  445. VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1]
  446. MOVD $0, R3
  447. BR multiply