1 // Copyright (c) 2017 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package field 6 7 import "math/bits" 8 9 // uint128 holds a 128-bit number as two 64-bit limbs, for use with the 10 // bits.Mul64 and bits.Add64 intrinsics. 11 type uint128 struct { 12 lo, hi uint64 13 } 14 15 // mul64 returns a * b. 16 func mul64(a, b uint64) uint128 { 17 hi, lo := bits.Mul64(a, b) 18 return uint128{lo, hi} 19 } 20 21 // addMul64 returns v + a * b. 22 func addMul64(v uint128, a, b uint64) uint128 { 23 hi, lo := bits.Mul64(a, b) 24 lo, c := bits.Add64(lo, v.lo, 0) 25 hi, _ = bits.Add64(hi, v.hi, c) 26 return uint128{lo, hi} 27 } 28 29 // shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. 30 func shiftRightBy51(a uint128) uint64 { 31 return (a.hi << (64 - 51)) | (a.lo >> 51) 32 } 33 34 func feMulGeneric(v, a, b *Element) { 35 a0 := a.l0 36 a1 := a.l1 37 a2 := a.l2 38 a3 := a.l3 39 a4 := a.l4 40 41 b0 := b.l0 42 b1 := b.l1 43 b2 := b.l2 44 b3 := b.l3 45 b4 := b.l4 46 47 // Limb multiplication works like pen-and-paper columnar multiplication, but 48 // with 51-bit limbs instead of digits. 49 // 50 // a4 a3 a2 a1 a0 x 51 // b4 b3 b2 b1 b0 = 52 // ------------------------ 53 // a4b0 a3b0 a2b0 a1b0 a0b0 + 54 // a4b1 a3b1 a2b1 a1b1 a0b1 + 55 // a4b2 a3b2 a2b2 a1b2 a0b2 + 56 // a4b3 a3b3 a2b3 a1b3 a0b3 + 57 // a4b4 a3b4 a2b4 a1b4 a0b4 = 58 // ---------------------------------------------- 59 // r8 r7 r6 r5 r4 r3 r2 r1 r0 60 // 61 // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to 62 // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5, 63 // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc. 64 // 65 // Reduction can be carried out simultaneously to multiplication. For 66 // example, we do not compute r5: whenever the result of a multiplication 67 // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. 68 // 69 // a4b0 a3b0 a2b0 a1b0 a0b0 + 70 // a3b1 a2b1 a1b1 a0b1 19×a4b1 + 71 // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + 72 // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + 73 // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = 74 // -------------------------------------- 75 // r4 r3 r2 r1 r0 76 // 77 // Finally we add up the columns into wide, overlapping limbs. 78 79 a1_19 := a1 * 19 80 a2_19 := a2 * 19 81 a3_19 := a3 * 19 82 a4_19 := a4 * 19 83 84 // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) 85 r0 := mul64(a0, b0) 86 r0 = addMul64(r0, a1_19, b4) 87 r0 = addMul64(r0, a2_19, b3) 88 r0 = addMul64(r0, a3_19, b2) 89 r0 = addMul64(r0, a4_19, b1) 90 91 // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) 92 r1 := mul64(a0, b1) 93 r1 = addMul64(r1, a1, b0) 94 r1 = addMul64(r1, a2_19, b4) 95 r1 = addMul64(r1, a3_19, b3) 96 r1 = addMul64(r1, a4_19, b2) 97 98 // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) 99 r2 := mul64(a0, b2) 100 r2 = addMul64(r2, a1, b1) 101 r2 = addMul64(r2, a2, b0) 102 r2 = addMul64(r2, a3_19, b4) 103 r2 = addMul64(r2, a4_19, b3) 104 105 // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 106 r3 := mul64(a0, b3) 107 r3 = addMul64(r3, a1, b2) 108 r3 = addMul64(r3, a2, b1) 109 r3 = addMul64(r3, a3, b0) 110 r3 = addMul64(r3, a4_19, b4) 111 112 // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 113 r4 := mul64(a0, b4) 114 r4 = addMul64(r4, a1, b3) 115 r4 = addMul64(r4, a2, b2) 116 r4 = addMul64(r4, a3, b1) 117 r4 = addMul64(r4, a4, b0) 118 119 // After the multiplication, we need to reduce (carry) the five coefficients 120 // to obtain a result with limbs that are at most slightly larger than 2⁵¹, 121 // to respect the Element invariant. 122 // 123 // Overall, the reduction works the same as carryPropagate, except with 124 // wider inputs: we take the carry for each coefficient by shifting it right 125 // by 51, and add it to the limb above it. The top carry is multiplied by 19 126 // according to the reduction identity and added to the lowest limb. 127 // 128 // The largest coefficient (r0) will be at most 111 bits, which guarantees 129 // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. 130 // 131 // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) 132 // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²) 133 // r0 < (1 + 19 × 4) × 2⁵² × 2⁵² 134 // r0 < 2⁷ × 2⁵² × 2⁵² 135 // r0 < 2¹¹¹ 136 // 137 // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most 138 // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and 139 // allows us to easily apply the reduction identity. 140 // 141 // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 142 // r4 < 5 × 2⁵² × 2⁵² 143 // r4 < 2¹⁰⁷ 144 // 145 146 c0 := shiftRightBy51(r0) 147 c1 := shiftRightBy51(r1) 148 c2 := shiftRightBy51(r2) 149 c3 := shiftRightBy51(r3) 150 c4 := shiftRightBy51(r4) 151 152 rr0 := r0.lo&maskLow51Bits + c4*19 153 rr1 := r1.lo&maskLow51Bits + c0 154 rr2 := r2.lo&maskLow51Bits + c1 155 rr3 := r3.lo&maskLow51Bits + c2 156 rr4 := r4.lo&maskLow51Bits + c3 157 158 // Now all coefficients fit into 64-bit registers but are still too large to 159 // be passed around as an Element. We therefore do one last carry chain, 160 // where the carries will be small enough to fit in the wiggle room above 2⁵¹. 161 *v = Element{rr0, rr1, rr2, rr3, rr4} 162 v.carryPropagate() 163 } 164 165 func feSquareGeneric(v, a *Element) { 166 l0 := a.l0 167 l1 := a.l1 168 l2 := a.l2 169 l3 := a.l3 170 l4 := a.l4 171 172 // Squaring works precisely like multiplication above, but thanks to its 173 // symmetry we get to group a few terms together. 174 // 175 // l4 l3 l2 l1 l0 x 176 // l4 l3 l2 l1 l0 = 177 // ------------------------ 178 // l4l0 l3l0 l2l0 l1l0 l0l0 + 179 // l4l1 l3l1 l2l1 l1l1 l0l1 + 180 // l4l2 l3l2 l2l2 l1l2 l0l2 + 181 // l4l3 l3l3 l2l3 l1l3 l0l3 + 182 // l4l4 l3l4 l2l4 l1l4 l0l4 = 183 // ---------------------------------------------- 184 // r8 r7 r6 r5 r4 r3 r2 r1 r0 185 // 186 // l4l0 l3l0 l2l0 l1l0 l0l0 + 187 // l3l1 l2l1 l1l1 l0l1 19×l4l1 + 188 // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + 189 // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + 190 // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = 191 // -------------------------------------- 192 // r4 r3 r2 r1 r0 193 // 194 // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with 195 // only three Mul64 and four Add64, instead of five and eight. 196 197 l0_2 := l0 * 2 198 l1_2 := l1 * 2 199 200 l1_38 := l1 * 38 201 l2_38 := l2 * 38 202 l3_38 := l3 * 38 203 204 l3_19 := l3 * 19 205 l4_19 := l4 * 19 206 207 // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) 208 r0 := mul64(l0, l0) 209 r0 = addMul64(r0, l1_38, l4) 210 r0 = addMul64(r0, l2_38, l3) 211 212 // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 213 r1 := mul64(l0_2, l1) 214 r1 = addMul64(r1, l2_38, l4) 215 r1 = addMul64(r1, l3_19, l3) 216 217 // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 218 r2 := mul64(l0_2, l2) 219 r2 = addMul64(r2, l1, l1) 220 r2 = addMul64(r2, l3_38, l4) 221 222 // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 223 r3 := mul64(l0_2, l3) 224 r3 = addMul64(r3, l1_2, l2) 225 r3 = addMul64(r3, l4_19, l4) 226 227 // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 228 r4 := mul64(l0_2, l4) 229 r4 = addMul64(r4, l1_2, l3) 230 r4 = addMul64(r4, l2, l2) 231 232 c0 := shiftRightBy51(r0) 233 c1 := shiftRightBy51(r1) 234 c2 := shiftRightBy51(r2) 235 c3 := shiftRightBy51(r3) 236 c4 := shiftRightBy51(r4) 237 238 rr0 := r0.lo&maskLow51Bits + c4*19 239 rr1 := r1.lo&maskLow51Bits + c0 240 rr2 := r2.lo&maskLow51Bits + c1 241 rr3 := r3.lo&maskLow51Bits + c2 242 rr4 := r4.lo&maskLow51Bits + c3 243 244 *v = Element{rr0, rr1, rr2, rr3, rr4} 245 v.carryPropagate() 246 } 247 248 // carryPropagateGeneric brings the limbs below 52 bits by applying the reduction 249 // identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. 250 func (v *Element) carryPropagateGeneric() *Element { 251 c0 := v.l0 >> 51 252 c1 := v.l1 >> 51 253 c2 := v.l2 >> 51 254 c3 := v.l3 >> 51 255 c4 := v.l4 >> 51 256 257 // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and 258 // the final l0 will be at most 52 bits. Similarly for the rest. 259 v.l0 = v.l0&maskLow51Bits + c4*19 260 v.l1 = v.l1&maskLow51Bits + c0 261 v.l2 = v.l2&maskLow51Bits + c1 262 v.l3 = v.l3&maskLow51Bits + c2 263 v.l4 = v.l4&maskLow51Bits + c3 264 265 return v 266 } 267