Longfellow ZK 0290cb32
Loading...
Searching...
No Matches
fp_generic.h
1// Copyright 2025 Google LLC.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#ifndef PRIVACY_PROOFS_ZK_LIB_ALGEBRA_FP_GENERIC_H_
16#define PRIVACY_PROOFS_ZK_LIB_ALGEBRA_FP_GENERIC_H_
17
18#include <array>
19#include <cstddef>
20#include <cstdint>
21#include <optional>
22#include <utility>
23
24#include "algebra/nat.h"
25#include "algebra/static_string.h"
26#include "algebra/sysdep.h"
27#include "util/panic.h"
28
29namespace proofs {
31
32/*
33The Fp_generic class contains the implementation of a finite field.
34*/
35template <size_t W64, bool optimized_mul, class OPS>
36class FpGeneric {
37 public:
38 // Type alias for a natural number, and the limbs within the nat are public
39 // to allow casting and operations.
40 using N = Nat<W64>;
41 using limb_t = typename N::limb_t;
42 using TypeTag = PrimeFieldTypeTag;
43
44 static constexpr size_t kU64 = N::kU64;
45 static constexpr size_t kBytes = N::kBytes;
46 static constexpr size_t kSubFieldBytes = kBytes;
47 static constexpr size_t kBits = N::kBits;
48 static constexpr size_t kLimbs = N::kLimbs;
49
50 static constexpr bool kCharacteristicTwo = false;
51 static constexpr size_t kNPolyEvaluationPoints = 6;
52
53 /* The Elt struct represented an element in the finite field.
54 */
55 struct Elt {
56 N n;
57 bool operator==(const Elt& y) const { return n == y.n; }
58 bool operator!=(const Elt& y) const { return !operator==(y); }
59 };
60
61 explicit FpGeneric(const N& modulus) : m_(modulus), negm_(N{}) {
62 negm_.sub(m_);
63
64 // compute rawhalf = (m + 1) / 2 = floor(m / 2) + 1 since m is odd
65 N raw_half = m_;
66 raw_half.shiftr(1);
67 raw_half.add(N(1));
68 raw_half_ = Elt{raw_half};
69
70 mprime_ = -inv_mod_b(m_.limb_[0]);
71 rsquare_ = Elt{N(1)};
72 for (size_t bits = 2 * kBits; bits > 0; bits--) {
73 add(rsquare_, rsquare_);
74 }
75
76 for (uint64_t i = 0; i < sizeof(k_) / sizeof(k_[0]); ++i) {
77 // convert k_[i] into montgomery form by calling mul0()
78 // directly, since mul() requires k_[0] and k_[1] to be
79 // defined
80 k_[i] = Elt{N(i)};
81 mul0(k_[i], rsquare_);
82 }
83
84 mone_ = negf(k_[1]);
85 half_ = invertf(k_[2]);
86
87 for (size_t i = 0; i < kNPolyEvaluationPoints; ++i) {
88 poly_evaluation_points_[i] = of_scalar(i);
89 if (i == 0) {
90 inv_small_scalars_[i] = zero();
91 } else {
92 inv_small_scalars_[i] = invertf(poly_evaluation_points_[i]);
93 }
94 }
95 }
96
97 explicit FpGeneric(const StaticString s) : FpGeneric(N(s)) {}
98
99 template <size_t LEN>
100 explicit FpGeneric(const char (&s)[LEN]) : FpGeneric(N(s)) {}
101
102 // Hack: works only if OPS::modulus is defined, and will
103 // fail to compile otherwise.
104 explicit FpGeneric() : FpGeneric(N(OPS::kModulus)) {}
105
106 FpGeneric(const FpGeneric&) = delete;
107 FpGeneric& operator=(const FpGeneric&) = delete;
108
109 template <size_t N>
110 Elt of_string(const char (&s)[N]) const {
111 return of_charp(&s[0]);
112 }
113
114 Elt of_string(const StaticString& s) const { return of_charp(s.as_pointer); }
115
116 std::optional<Elt> of_untrusted_string(const char* s) const {
117 auto maybe = N::of_untrusted_string(s);
118 if (maybe.has_value() && maybe.value() < m_) {
119 return to_montgomery(maybe.value());
120 } else {
121 return std::nullopt;
122 }
123 }
124
125 // a += y
126 void add(Elt& a, const Elt& y) const {
127 if (kLimbs == 1) {
128 limb_t aa = a.n.limb_[0], yy = y.n.limb_[0], mm = m_.limb_[0];
129 a.n.limb_[0] = addcmovc(aa - mm, yy, aa + yy);
130 } else {
131 limb_t ah = add_limb(kLimbs, a.n.limb_, y.n.limb_);
132 maybe_minus_m(a.n.limb_, ah);
133 }
134 }
135
136 // a -= y
137 void sub(Elt& a, const Elt& y) const {
138 if (kLimbs == 1) {
139 a.n.limb_[0] = sub_sysdep(a.n.limb_[0], y.n.limb_[0], m_.limb_[0]);
140 } else {
141 limb_t ah = sub_limb(kLimbs, a.n.limb_, y.n.limb_);
142 maybe_plus_m(a.n.limb_, ah);
143 }
144 }
145
146 // x *= y, Montgomery
147 void mul(Elt& x, const Elt& y) const {
148 if (optimized_mul) {
149 if (x == zero() || y == one()) {
150 return;
151 }
152 if (y == zero() || x == one()) {
153 x = y;
154 return;
155 }
156 }
157 mul0(x, y);
158 }
159
160 // x = -x
161 void neg(Elt& x) const {
162 Elt y(k_[0]);
163 sub(y, x);
164 x = y;
165 }
166
167 // x = 1/x
168 void invert(Elt& x) const { x = invertf(x); }
169
170 // functional interface
171 Elt addf(Elt a, const Elt& y) const {
172 add(a, y);
173 return a;
174 }
175 Elt subf(Elt a, const Elt& y) const {
176 sub(a, y);
177 return a;
178 }
179 Elt mulf(Elt a, const Elt& y) const {
180 mul(a, y);
181 return a;
182 }
183 Elt negf(Elt a) const {
184 neg(a);
185 return a;
186 }
187
188 // This is the binary extended gcd algorithm, modified
189 // to return the inverse of x.
190 Elt invertf(Elt x) const {
191 N a = from_montgomery(x);
192 N b = m_;
193 Elt u = one();
194 Elt v = zero();
195 while (a != /*zero*/ N{}) {
196 if ((a.limb_[0] & 0x1u) == 0) {
197 a.shiftr(1);
198 byhalf(u);
199 } else {
200 if (a < b) { // swap to maintain invariant
201 std::swap(a, b);
202 std::swap(u, v);
203 }
204 a.sub(b).shiftr(1); // a = (a-b)/2
205 sub(u, v);
206 byhalf(u);
207 }
208 }
209 return v;
210 }
211
212 // Reference implementation, unused.
213 N from_montgomery_reference(const Elt& x) const {
214 Elt r{N(1)};
215 mul(r, x);
216 return r.n;
217 }
218
219 // Optimized implementation of from_montgomery_reference(), exploiting
220 // the fact that the multiplicand is Elt{N(1)}.
221 N from_montgomery(const Elt& x) const {
222 limb_t a[2 * kLimbs + 1]; // uninitialized
223 mov(kLimbs, a, x.n.limb_);
224 a[kLimbs] = zero_limb<limb_t>();
225 for (size_t i = 0; i < kLimbs; ++i) {
226 a[i + kLimbs + 1] = zero_limb<limb_t>();
227 OPS::reduction_step(&a[i], mprime_, m_);
228 }
229 maybe_minus_m(a + kLimbs, a[2 * kLimbs]);
230 N r;
231 mov(kLimbs, r.limb_, a + kLimbs);
232 return r;
233 }
234
235 Elt to_montgomery(const N& xn) const {
236 Elt x{xn};
237 mul(x, rsquare_);
238 return x;
239 }
240
241 bool in_subfield(const Elt& e) const { return true; }
242
243 // The of_scalar methods should only be used on trusted inputs known
244 // at compile time to be valid field elements. As a result, they return
245 // Elt directly instead of std::optional, and panic if the condition is not
246 // satisfied. All untrusted input should be handled via the of_bytes method.
247 Elt of_scalar(uint64_t a) const { return of_scalar_field(a); }
248
249 // basis for the binary representation of of_scalar(), so that
250 // of_scalar(sum_i b[i] 2^i) = sum_i b[i] beta(i)
251 Elt beta(size_t i) const {
252 check(i < 64, "i < 64");
253 return of_scalar(static_cast<uint64_t>(1) << i);
254 }
255
256 Elt of_scalar_field(uint64_t a) const { return of_scalar_field(N(a)); }
257 Elt of_scalar_field(const std::array<uint64_t, W64>& a) const {
258 return of_scalar_field(N(a));
259 }
260 Elt of_scalar_field(const N& a) const {
261 check(a < m_, "of_scalar must be less than m");
262 return to_montgomery(a);
263 }
264
265 std::optional<Elt> of_bytes_field(const uint8_t ab[/* kBytes */]) const {
266 N an = N::of_bytes(ab);
267 if (an < m_) {
268 return to_montgomery(an);
269 } else {
270 return std::nullopt;
271 }
272 }
273
274 void to_bytes_field(uint8_t ab[/* kBytes */], const Elt& x) const {
275 from_montgomery(x).to_bytes(ab);
276 }
277
278 std::optional<Elt> of_bytes_subfield(const uint8_t ab[/* kBytes */]) const {
279 return of_bytes_field(ab);
280 }
281
282 void to_bytes_subfield(uint8_t ab[/* kBytes */], const Elt& x) const {
283 to_bytes_field(ab, x);
284 }
285
286 const Elt& zero() const { return k_[0]; }
287 const Elt& one() const { return k_[1]; }
288 const Elt& two() const { return k_[2]; }
289 const Elt& half() const { return half_; }
290 const Elt& mone() const { return mone_; }
291
292 Elt poly_evaluation_point(size_t i) const {
293 check(i < kNPolyEvaluationPoints, "i < kNPolyEvaluationPoints");
294 return poly_evaluation_points_[i];
295 }
296
297 // return (X[k] - X[k - i])^{-1}, were X[i] is the
298 // i-th poly evalaluation point.
299 Elt newton_denominator(size_t k, size_t i) const {
300 check(k < kNPolyEvaluationPoints, "k < kNPolyEvaluationPoints");
301 check(i <= k, "i <= k");
302 check(k != (k - i), "k != (k - i)");
303 return inv_small_scalars_[/* k - (k - i) = */ i];
304 }
305
306 // Type for counters. For prime fields counters and field
307 // elements have the same representation, so all conversions
308 // are trivial.
309 struct CElt {
310 Elt e;
311 };
312 CElt as_counter(uint64_t a) const { return CElt{of_scalar_field(a)}; }
313
314 // Convert a counter into *some* field element such that the counter is
315 // zero (as a counter) iff the field element is zero.
316 Elt znz_indicator(const CElt& celt) const { return celt.e; }
317
318 private:
319 void maybe_minus_m(limb_t a[kLimbs], limb_t ah) const {
320 limb_t a1[kLimbs];
321 mov(kLimbs, a1, negm_.limb_);
322 limb_t ah1 = add_limb(kLimbs, a1, a);
323 cmovne(kLimbs, a, ah, ah1, a1);
324 }
325 void maybe_plus_m(limb_t a[kLimbs], limb_t ah) const {
326 limb_t a1[kLimbs];
327 mov(kLimbs, a1, a);
328 (void)add_limb(kLimbs, a1, m_.limb_);
329 cmovnz(kLimbs, a, ah, a1);
330 }
331
332 void byhalf(Elt& a) const {
333 if (a.n.shiftr(1) != 0) {
334 // the lost bit is a raw 1, not one() in Montgomery form,
335 // hence we must add a raw 1/2, not half().
336 add(a, raw_half_);
337 }
338 }
339
340 // unoptimized montgomery multiplication that does not
341 // depend on the constants zero() and one() being defined.
342 void mul0(Elt& x, const Elt& y) const {
343 limb_t a[2 * kLimbs + 1]; // uninitialized
344 mulstep<true>(a, x.n.limb_[0], y.n.limb_);
345 for (size_t i = 1; i < kLimbs; ++i) {
346 mulstep<false>(a + i, x.n.limb_[i], y.n.limb_);
347 }
348 maybe_minus_m(a + kLimbs, a[2 * kLimbs]);
349 mov(kLimbs, x.n.limb_, a + kLimbs);
350 }
351
352 template <bool first>
353 inline void mulstep(limb_t* a, limb_t x, const limb_t y[kLimbs]) const {
354 if (kLimbs == 1) {
355 // The general case (below) represents the (kLimbs+1)-word
356 // product as L+(H<<bitsPerLimb), where in general L and H
357 // overlap, requiring two additions. For kLimbs==1, L and H do
358 // not overlap, and we can interpret [L, H] as a single
359 // double-precision number.
360 a[2] = zero_limb<limb_t>();
361 check(first, "mulstep template must be have first=true for 1 limb");
362 mulhl(1, a, a + 1, x, y);
363 OPS::reduction_step(a, mprime_, m_);
364 } else {
365 limb_t l[kLimbs], h[kLimbs];
366 a[kLimbs + 1] = zero_limb<limb_t>();
367 if (first) {
368 a[kLimbs] = zero_limb<limb_t>();
369 mulhl(kLimbs, a, h, x, y);
370 } else {
371 mulhl(kLimbs, l, h, x, y);
372 accum(kLimbs + 1, a, kLimbs, l);
373 }
374 accum(kLimbs + 1, a + 1, kLimbs, h);
375 OPS::reduction_step(a, mprime_, m_);
376 }
377 }
378
379 // This method should only be used on static strings known at
380 // compile time to be valid field elements. We make it
381 // private to prevent misuse.
382 Elt of_charp(const char* s) const {
383 Elt a(k_[0]);
384 Elt base = of_scalar(10);
385 if (s[0] == '0' && (s[1] == 'x' || s[1] == 'X')) {
386 s += 2;
387 base = of_scalar(16);
388 }
389
390 for (; *s; s++) {
391 Elt d = of_scalar(digit(*s));
392 mul(a, base);
393 add(a, d);
394 }
395 return a;
396 }
397
398 N m_;
399 N negm_;
400 Elt rsquare_;
401 limb_t mprime_;
402 Elt k_[3]; // small constants
403 Elt half_; // 1/2
404 Elt raw_half_;
405 Elt mone_; // minus one
406 Elt poly_evaluation_points_[kNPolyEvaluationPoints];
407 Elt inv_small_scalars_[kNPolyEvaluationPoints];
408};
409} // namespace proofs
410
411#endif // PRIVACY_PROOFS_ZK_LIB_ALGEBRA_FP_GENERIC_H_
Definition nat.h:60
Definition fp_generic.h:309
Definition fp_generic.h:55
Definition fp_generic.h:30