Longfellow ZK 0290cb32
Loading...
Searching...
No Matches
fp_generic.h
1// Copyright 2025 Google LLC.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#ifndef PRIVACY_PROOFS_ZK_LIB_ALGEBRA_FP_GENERIC_H_
16#define PRIVACY_PROOFS_ZK_LIB_ALGEBRA_FP_GENERIC_H_
17
18#include <array>
19#include <cstddef>
20#include <cstdint>
21#include <optional>
22#include <utility>
23
24#include "algebra/nat.h"
25#include "algebra/static_string.h"
26#include "algebra/sysdep.h"
27#include "util/panic.h"
28
29namespace proofs {
31
32/*
33The Fp_generic class contains the implementation of a finite field.
34*/
35template <size_t W64, bool optimized_mul, class OPS>
36class FpGeneric {
37 public:
38 // Type alias for a natural number, and the limbs within the nat are public
39 // to allow casting and operations.
40 using N = Nat<W64>;
41 using limb_t = typename N::limb_t;
42 using TypeTag = PrimeFieldTypeTag;
43
44 static constexpr size_t kU64 = N::kU64;
45 static constexpr size_t kBytes = N::kBytes;
46 static constexpr size_t kSubFieldBytes = kBytes;
47 static constexpr size_t kBits = N::kBits;
48 static constexpr size_t kLimbs = N::kLimbs;
49
50 static constexpr bool kCharacteristicTwo = false;
51 static constexpr size_t kNPolyEvaluationPoints = 6;
52
53 /* The Elt struct represented an element in the finite field.
54 */
55 struct Elt {
56 N n;
57 bool operator==(const Elt& y) const { return n == y.n; }
58 bool operator!=(const Elt& y) const { return !operator==(y); }
59 };
60
61 explicit FpGeneric(const N& modulus) : m_(modulus), negm_(N{}) {
62 negm_.sub(m_);
63
64 // compute rawhalf = (m + 1) / 2 = floor(m / 2) + 1 since m is odd
65 N raw_half = m_;
66 raw_half.shiftr(1);
67 raw_half.add(N(1));
68 raw_half_ = Elt{raw_half};
69
70 mprime_ = -inv_mod_b(m_.limb_[0]);
71 rsquare_ = Elt{N(1)};
72 for (size_t bits = 2 * kBits; bits > 0; bits--) {
73 add(rsquare_, rsquare_);
74 }
75
76 for (uint64_t i = 0; i < sizeof(k_) / sizeof(k_[0]); ++i) {
77 // convert k_[i] into montgomery form by calling mul0()
78 // directly, since mul() requires k_[0] and k_[1] to be
79 // defined
80 k_[i] = Elt{N(i)};
81 mul0(k_[i], rsquare_);
82 }
83
84 mone_ = negf(k_[1]);
85 half_ = invertf(k_[2]);
86
87 for (size_t i = 0; i < kNPolyEvaluationPoints; ++i) {
88 poly_evaluation_points_[i] = of_scalar(i);
89 if (i == 0) {
90 inv_small_scalars_[i] = zero();
91 } else {
92 inv_small_scalars_[i] = invertf(poly_evaluation_points_[i]);
93 }
94 }
95 }
96
97 explicit FpGeneric(const StaticString s) : FpGeneric(N(s)) {}
98
99 template <size_t LEN>
100 explicit FpGeneric(const char (&s)[LEN]) : FpGeneric(N(s)) {}
101
102 // Hack: works only if OPS::modulus is defined, and will
103 // fail to compile otherwise.
104 explicit FpGeneric() : FpGeneric(N(OPS::kModulus)) {}
105
106 FpGeneric(const FpGeneric&) = delete;
107 FpGeneric& operator=(const FpGeneric&) = delete;
108
109 template <size_t N>
110 Elt of_string(const char (&s)[N]) const {
111 return of_charp(&s[0]);
112 }
113
114 Elt of_string(const StaticString& s) const { return of_charp(s.as_pointer); }
115
116 std::optional<Elt> of_untrusted_string(const char* s) const {
117 auto maybe = N::of_untrusted_string(s);
118 if (maybe.has_value() && maybe.value() < m_) {
119 return to_montgomery(maybe.value());
120 } else {
121 return std::nullopt;
122 }
123 }
124
125 // a += y
126 void add(Elt& a, const Elt& y) const {
127 if (kLimbs == 1) {
128 limb_t aa = a.n.limb_[0], yy = y.n.limb_[0], mm = m_.limb_[0];
129 a.n.limb_[0] = addcmovc(aa - mm, yy, aa + yy);
130 } else {
131 limb_t ah = add_limb(kLimbs, a.n.limb_, y.n.limb_);
132 maybe_minus_m(a.n.limb_, ah);
133 }
134 }
135
136 // a -= y
137 void sub(Elt& a, const Elt& y) const {
138 if (kLimbs == 1) {
139 a.n.limb_[0] = sub_sysdep(a.n.limb_[0], y.n.limb_[0], m_.limb_[0]);
140 } else {
141 limb_t ah = sub_limb(kLimbs, a.n.limb_, y.n.limb_);
142 maybe_plus_m(a.n.limb_, ah);
143 }
144 }
145
146 // x *= y, Montgomery
147 void mul(Elt& x, const Elt& y) const {
148 if (optimized_mul) {
149 if (x == zero() || y == one()) {
150 return;
151 }
152 if (y == zero() || x == one()) {
153 x = y;
154 return;
155 }
156 }
157 mul0(x, y);
158 }
159
160 // x = -x
161 void neg(Elt& x) const {
162 Elt y(k_[0]);
163 sub(y, x);
164 x = y;
165 }
166
167 // x = 1/x
168 void invert(Elt& x) const { x = invertf(x); }
169
170 // functional interface
171 Elt addf(Elt a, const Elt& y) const {
172 add(a, y);
173 return a;
174 }
175 Elt subf(Elt a, const Elt& y) const {
176 sub(a, y);
177 return a;
178 }
179 Elt mulf(Elt a, const Elt& y) const {
180 mul(a, y);
181 return a;
182 }
183 Elt negf(Elt a) const {
184 neg(a);
185 return a;
186 }
187
188 // This is the binary extended gcd algorithm, modified
189 // to return the inverse of x.
190 Elt invertf(Elt x) const {
191 N a = from_montgomery(x);
192 N b = m_;
193 Elt u = one();
194 Elt v = zero();
195 while (a != /*zero*/ N{}) {
196 if ((a.limb_[0] & 0x1u) == 0) {
197 a.shiftr(1);
198 byhalf(u);
199 } else {
200 if (a < b) { // swap to maintain invariant
201 std::swap(a, b);
202 std::swap(u, v);
203 }
204 a.sub(b).shiftr(1); // a = (a-b)/2
205 sub(u, v);
206 byhalf(u);
207 }
208 }
209 return v;
210 }
211
212 // Reference implementation, unused.
213 N from_montgomery_reference(const Elt& x) const {
214 Elt r{N(1)};
215 mul(r, x);
216 return r.n;
217 }
218
219 // Optimized implementation of from_montgomery_reference(), exploiting
220 // the fact that the multiplicand is Elt{N(1)}.
221 N from_montgomery(const Elt& x) const {
222 limb_t a[2 * kLimbs + 1]; // uninitialized
223 mov(kLimbs, a, x.n.limb_);
224 a[kLimbs] = zero_limb<limb_t>();
225 for (size_t i = 0; i < kLimbs; ++i) {
226 a[i + kLimbs + 1] = zero_limb<limb_t>();
227 OPS::reduction_step(&a[i], mprime_, m_);
228 }
229 maybe_minus_m(a + kLimbs, a[2 * kLimbs]);
230 N r;
231 mov(kLimbs, r.limb_, a + kLimbs);
232 return r;
233 }
234
235 Elt to_montgomery(const N& xn) const {
236 Elt x{xn};
237 mul(x, rsquare_);
238 return x;
239 }
240
241 bool in_subfield(const Elt& e) const { return true; }
242
243 // The of_scalar methods should only be used on trusted inputs known
244 // at compile time to be valid field elements. As a result, they return
245 // Elt directly instead of std::optional, and panic if the condition is not
246 // satisfied. All untrusted input should be handled via the of_bytes method.
247 Elt of_scalar(uint64_t a) const { return of_scalar_field(a); }
248
249 Elt of_scalar_field(uint64_t a) const { return of_scalar_field(N(a)); }
250 Elt of_scalar_field(const std::array<uint64_t, W64>& a) const {
251 return of_scalar_field(N(a));
252 }
253 Elt of_scalar_field(const N& a) const {
254 check(a < m_, "of_scalar must be less than m");
255 return to_montgomery(a);
256 }
257
258 std::optional<Elt> of_bytes_field(const uint8_t ab[/* kBytes */]) const {
259 N an = N::of_bytes(ab);
260 if (an < m_) {
261 return to_montgomery(an);
262 } else {
263 return std::nullopt;
264 }
265 }
266
267 void to_bytes_field(uint8_t ab[/* kBytes */], const Elt& x) const {
268 from_montgomery(x).to_bytes(ab);
269 }
270
271 std::optional<Elt> of_bytes_subfield(const uint8_t ab[/* kBytes */]) const {
272 return of_bytes_field(ab);
273 }
274
275 void to_bytes_subfield(uint8_t ab[/* kBytes */], const Elt& x) const {
276 to_bytes_field(ab, x);
277 }
278
279 const Elt& zero() const { return k_[0]; }
280 const Elt& one() const { return k_[1]; }
281 const Elt& two() const { return k_[2]; }
282 const Elt& half() const { return half_; }
283 const Elt& mone() const { return mone_; }
284
285 Elt poly_evaluation_point(size_t i) const {
286 check(i < kNPolyEvaluationPoints, "i < kNPolyEvaluationPoints");
287 return poly_evaluation_points_[i];
288 }
289
290 // return (X[k] - X[k - i])^{-1}, were X[i] is the
291 // i-th poly evalaluation point.
292 Elt newton_denominator(size_t k, size_t i) const {
293 check(k < kNPolyEvaluationPoints, "k < kNPolyEvaluationPoints");
294 check(i <= k, "i <= k");
295 check(k != (k - i), "k != (k - i)");
296 return inv_small_scalars_[/* k - (k - i) = */ i];
297 }
298
299 private:
300 void maybe_minus_m(limb_t a[kLimbs], limb_t ah) const {
301 limb_t a1[kLimbs];
302 mov(kLimbs, a1, negm_.limb_);
303 limb_t ah1 = add_limb(kLimbs, a1, a);
304 cmovne(kLimbs, a, ah, ah1, a1);
305 }
306 void maybe_plus_m(limb_t a[kLimbs], limb_t ah) const {
307 limb_t a1[kLimbs];
308 mov(kLimbs, a1, a);
309 (void)add_limb(kLimbs, a1, m_.limb_);
310 cmovnz(kLimbs, a, ah, a1);
311 }
312
313 void byhalf(Elt& a) const {
314 if (a.n.shiftr(1) != 0) {
315 // the lost bit is a raw 1, not one() in Montgomery form,
316 // hence we must add a raw 1/2, not half().
317 add(a, raw_half_);
318 }
319 }
320
321 // unoptimized montgomery multiplication that does not
322 // depend on the constants zero() and one() being defined.
323 void mul0(Elt& x, const Elt& y) const {
324 limb_t a[2 * kLimbs + 1]; // uninitialized
325 mulstep<true>(a, x.n.limb_[0], y.n.limb_);
326 for (size_t i = 1; i < kLimbs; ++i) {
327 mulstep<false>(a + i, x.n.limb_[i], y.n.limb_);
328 }
329 maybe_minus_m(a + kLimbs, a[2 * kLimbs]);
330 mov(kLimbs, x.n.limb_, a + kLimbs);
331 }
332
333 template <bool first>
334 inline void mulstep(limb_t* a, limb_t x, const limb_t y[kLimbs]) const {
335 if (kLimbs == 1) {
336 // The general case (below) represents the (kLimbs+1)-word
337 // product as L+(H<<bitsPerLimb), where in general L and H
338 // overlap, requiring two additions. For kLimbs==1, L and H do
339 // not overlap, and we can interpret [L, H] as a single
340 // double-precision number.
341 a[2] = zero_limb<limb_t>();
342 check(first, "mulstep template must be have first=true for 1 limb");
343 mulhl(1, a, a + 1, x, y);
344 OPS::reduction_step(a, mprime_, m_);
345 } else {
346 limb_t l[kLimbs], h[kLimbs];
347 a[kLimbs + 1] = zero_limb<limb_t>();
348 if (first) {
349 a[kLimbs] = zero_limb<limb_t>();
350 mulhl(kLimbs, a, h, x, y);
351 } else {
352 mulhl(kLimbs, l, h, x, y);
353 accum(kLimbs + 1, a, kLimbs, l);
354 }
355 accum(kLimbs + 1, a + 1, kLimbs, h);
356 OPS::reduction_step(a, mprime_, m_);
357 }
358 }
359
360 // This method should only be used on static strings known at
361 // compile time to be valid field elements. We make it
362 // private to prevent misuse.
363 Elt of_charp(const char* s) const {
364 Elt a(k_[0]);
365 Elt base = of_scalar(10);
366 if (s[0] == '0' && (s[1] == 'x' || s[1] == 'X')) {
367 s += 2;
368 base = of_scalar(16);
369 }
370
371 for (; *s; s++) {
372 Elt d = of_scalar(digit(*s));
373 mul(a, base);
374 add(a, d);
375 }
376 return a;
377 }
378
379 N m_;
380 N negm_;
381 Elt rsquare_;
382 limb_t mprime_;
383 Elt k_[3]; // small constants
384 Elt half_; // 1/2
385 Elt raw_half_;
386 Elt mone_; // minus one
387 Elt poly_evaluation_points_[kNPolyEvaluationPoints];
388 Elt inv_small_scalars_[kNPolyEvaluationPoints];
389};
390} // namespace proofs
391
392#endif // PRIVACY_PROOFS_ZK_LIB_ALGEBRA_FP_GENERIC_H_
Definition nat.h:60
Definition fp_generic.h:55
Definition fp_generic.h:30