Longfellow ZK 0290cb32
Loading...
Searching...
No Matches
zk_proof.h
1// Copyright 2025 Google LLC.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#ifndef PRIVACY_PROOFS_ZK_LIB_ZK_ZK_PROOF_H_
16#define PRIVACY_PROOFS_ZK_LIB_ZK_ZK_PROOF_H_
17
18#include <cstddef>
19#include <cstdint>
20#include <optional>
21#include <vector>
22
23#include "ligero/ligero_param.h"
24#include "merkle/merkle_commitment.h"
25#include "merkle/merkle_tree.h"
26#include "sumcheck/circuit.h"
27#include "util/log.h"
28#include "util/readbuffer.h"
29#include "util/serialization.h"
30#include "zk/zk_common.h"
31
32namespace proofs {
33
34// ZkProof class handles proof serialization.
35//
36// We expect circuits to be created and stored locally by the prover and
37// verifier respectively, and thus the circuit representations are trusted and
38// are assumed to contain parameters that do not induce arithmetic overflows.
39// For example, we assume that values like c.logw and c.logc are smaller than
40// 2^24 and therefore do not cause any overflows (even on 32b machines) in the
41// range/length calculations that are performed during serialization.
42//
43// An earlier experiment implemented the IO methods using protobuf parsing.
44// Despite applying techniques like arena allocation, those methods required
45// an order of magnitude more time.
46template <class Field>
47struct ZkProof {
48 public:
49 const Circuit<Field> &c;
50 Proof<Field> proof;
53 LigeroProof<Field> com_proof;
54
55 // The max run length is 2^25, in order to prevent overflow issues on 32b
56 // machines when performing length calculations during serialization.
57 constexpr static size_t kMaxRunLen = (1 << 25);
58
59 constexpr static size_t kMaxNumDigests = (1 << 25);
60
61 typedef typename Field::Elt Elt;
62
63 explicit ZkProof(const Circuit<Field> &c, size_t rate, size_t req)
64 : c(c),
65 proof(c.nl),
66 param((c.ninputs - c.npub_in) + ZkCommon<Field>::pad_size(c), c.nl,
67 rate, req),
68 com_proof(&param) {}
69
70 // Maximum size of the proof in bytes. The actual size will be smaller
71 // because the Merkle proof is batched.
72 size_t size() const {
73 return Digest::kLength +
74
75 proof.size() * Field::kBytes +
76
77 com_proof.block * 2 * Field::kBytes +
78 com_proof.nreq * com_proof.nrow * Field::kBytes +
79 com_proof.nreq * com_proof.mc_pathlen * Digest::kLength;
80 }
81
82 void write(std::vector<uint8_t> &buf, const Field &F) const {
83 size_t s0 = buf.size();
84 write_com(com, buf, F);
85 size_t s1 = buf.size();
86 write_sc_proof(proof, buf, F);
87 size_t s2 = buf.size();
88 write_com_proof(com_proof, buf, F);
89 size_t s3 = buf.size();
90 log(INFO,
91 "com:%zu, sc:%zu, com_proof:%zu [%zu el, %zu el, %zu d in %zu "
92 "rows]: %zub",
93 s1 - s0, s2 - s1, s3 - s2, 2 * com_proof.block,
94 com_proof.nreq * com_proof.nrow, com_proof.merkle.path.size(),
95 com_proof.nrow, s3);
96 }
97
98 // The read function returns false on error or underflow.
99 bool read(ReadBuffer &buf, const Field &F) {
100 if (!read_com(com, buf, F)) return false;
101 if (!read_sc_proof(proof, buf, F)) return false;
102 if (!read_com_proof(com_proof, buf, F)) return false;
103 return true;
104 }
105
106 void write_sc_proof(const Proof<Field> &pr, std::vector<uint8_t> &buf,
107 const Field &F) const {
108 check(c.logc == 0, "cannot write sc proof with logc != 0");
109 for (size_t i = 0; i < pr.l.size(); ++i) {
110 for (size_t wi = 0; wi < c.l[i].logw; ++wi) {
111 for (size_t k = 0; k < 3; ++k) {
112 // Optimization: do not send p(1) as it is implied by constraints.
113 if (k != 1) {
114 write_elt(pr.l[i].hp[0][wi].t_[k], buf, F);
115 write_elt(pr.l[i].hp[1][wi].t_[k], buf, F);
116 }
117 }
118 }
119 write_elt(pr.l[i].wc[0], buf, F);
120 write_elt(pr.l[i].wc[1], buf, F);
121 }
122 }
123
124 void write_com(const LigeroCommitment<Field> &com0, std::vector<uint8_t> &buf,
125 const Field &F) const {
126 buf.insert(buf.end(), com0.root.data, com0.root.data + Digest::kLength);
127 }
128
129 void write_com_proof(const LigeroProof<Field> &pr, std::vector<uint8_t> &buf,
130 const Field &F) const {
131 for (size_t i = 0; i < pr.block; ++i) {
132 write_elt(pr.y_ldt[i], buf, F);
133 }
134 for (size_t i = 0; i < pr.dblock; ++i) {
135 write_elt(pr.y_dot[i], buf, F);
136 }
137 for (size_t i = 0; i < pr.r; ++i) {
138 write_elt(pr.y_quad_0[i], buf, F);
139 }
140 for (size_t i = 0; i < pr.dblock - pr.block; ++i) {
141 write_elt(pr.y_quad_2[i], buf, F);
142 }
143
144 // write all the Merkle nonces
145 for (size_t i = 0; i < pr.nreq; ++i) {
146 write_nonce(pr.merkle.nonce[i], buf);
147 }
148
149 // The format of the opened rows consists of a run of full-field elements,
150 // then a run of base-field elements, and finally a run of full-field
151 // elements. To compress, we employ a run-length encoding approach.
152 size_t ci = 0;
153 bool subfield_run = false;
154 while (ci < pr.nreq * pr.nrow) {
155 size_t runlen = 0;
156 while (ci + runlen < pr.nreq * pr.nrow && runlen < kMaxRunLen &&
157 F.in_subfield(pr.req[ci + runlen]) == subfield_run) {
158 ++runlen;
159 }
160 write_size(runlen, buf);
161 for (size_t i = ci; i < ci + runlen; ++i) {
162 if (subfield_run) {
163 write_subfield_elt(pr.req[i], buf, F);
164 } else {
165 write_elt(pr.req[i], buf, F);
166 }
167 }
168 ci += runlen;
169 subfield_run = !subfield_run;
170 }
171
172 write_size(pr.merkle.path.size(), buf);
173 for (size_t i = 0; i < pr.merkle.path.size(); ++i) {
174 write_digest(pr.merkle.path[i], buf);
175 }
176 }
177
178 private:
179 void write_elt(const Elt &x, std::vector<uint8_t> &buf,
180 const Field &F) const {
181 uint8_t tmp[Field::kBytes];
182 F.to_bytes_field(tmp, x);
183 buf.insert(buf.end(), tmp, tmp + Field::kBytes);
184 }
185
186 void write_subfield_elt(const Elt &x, std::vector<uint8_t> &buf,
187 const Field &F) const {
188 uint8_t tmp[Field::kSubFieldBytes];
189 F.to_bytes_subfield(tmp, x);
190 buf.insert(buf.end(), tmp, tmp + Field::kSubFieldBytes);
191 }
192
193 void write_digest(const Digest &x, std::vector<uint8_t> &buf) const {
194 buf.insert(buf.end(), x.data, x.data + Digest::kLength);
195 }
196
197 void write_nonce(const MerkleNonce &x, std::vector<uint8_t> &buf) const {
198 buf.insert(buf.end(), x.bytes, x.bytes + MerkleNonce::kLength);
199 }
200
201 // Assumption is that all of the sizes of arrays that are part of proofs
202 // fit into 4 bytes, and can thus work on 32-b machines.
203 void write_size(size_t g, std::vector<uint8_t> &buf) const {
204 for (size_t i = 0; i < 4; ++i) {
205 buf.push_back(static_cast<uint8_t>(g & 0xff));
206 g >>= 8;
207 }
208 }
209
210 bool read_sc_proof(Proof<Field> &pr, ReadBuffer &buf, const Field &F) {
211 if (c.logc != 0) return false;
212 for (size_t i = 0; i < pr.l.size(); ++i) {
213 size_t needed = (c.l[i].logw * (3 - 1) * 2 + 2) * Field::kBytes;
214 if (!buf.have(needed)) return false;
215 for (size_t wi = 0; wi < c.l[i].logw; ++wi) {
216 for (size_t k = 0; k < 3; ++k) {
217 // Optimization: the p(1) value was not sent.
218 if (k != 1) {
219 for (size_t hi = 0; hi < 2; ++hi) {
220 auto v = read_elt(buf, F);
221 if (v) {
222 pr.l[i].hp[hi][wi].t_[k] = v.value();
223 } else {
224 return false;
225 }
226 }
227 } else {
228 pr.l[i].hp[0][wi].t_[k] = F.zero();
229 pr.l[i].hp[1][wi].t_[k] = F.zero();
230 }
231 }
232 }
233 for (size_t wi = 0; wi < 2; ++wi) {
234 auto v = read_elt(buf, F);
235 if (v) {
236 pr.l[i].wc[wi] = v.value();
237 } else {
238 return false;
239 }
240 }
241 }
242 return true;
243 }
244
245 bool read_com(LigeroCommitment<Field> &com0, ReadBuffer &buf,
246 const Field &F) {
247 if (!buf.have(Digest::kLength)) return false;
248 read_digest(buf, com0.root);
249 return true;
250 }
251
252 bool read_com_proof(LigeroProof<Field> &pr, ReadBuffer &buf, const Field &F) {
253 if (!buf.have(pr.block * Field::kBytes)) return false;
254 for (size_t i = 0; i < pr.block; ++i) {
255 auto v = read_elt(buf, F);
256 if (v) {
257 pr.y_ldt[i] = v.value();
258 } else {
259 return false;
260 }
261 }
262
263 if (!buf.have(pr.dblock * Field::kBytes)) return false;
264 for (size_t i = 0; i < pr.dblock; ++i) {
265 auto v = read_elt(buf, F);
266 if (v) {
267 pr.y_dot[i] = v.value();
268 } else {
269 return false;
270 }
271 }
272
273 if (!buf.have(pr.r * Field::kBytes)) return false;
274 for (size_t i = 0; i < pr.r; ++i) {
275 auto v = read_elt(buf, F);
276 if (v) {
277 pr.y_quad_0[i] = v.value();
278 } else {
279 return false;
280 }
281 }
282
283 if (!buf.have((pr.dblock - pr.block) * Field::kBytes)) return false;
284 for (size_t i = 0; i < pr.dblock - pr.block; ++i) {
285 auto v = read_elt(buf, F);
286 if (v) {
287 pr.y_quad_2[i] = v.value();
288 } else {
289 return false;
290 }
291 }
292
293 if (!buf.have(pr.nreq * MerkleNonce::kLength)) return false;
294 for (size_t i = 0; i < pr.nreq; ++i) {
295 read_nonce(buf, pr.merkle.nonce[i]);
296 }
297
298 // Decode runs of real and full Field elements.
299 size_t ci = 0;
300 bool subfield_run = false;
301 while (ci < pr.nreq * pr.nrow) {
302 if (!buf.have(4)) return false;
303 size_t runlen = read_size(buf); /* untrusted size input */
304 if (runlen >= kMaxRunLen || ci + runlen > pr.nreq * pr.nrow) return false;
305 if (subfield_run) {
306 if (!buf.have(runlen * Field::kSubFieldBytes)) return false;
307 for (size_t i = ci; i < ci + runlen; ++i) {
308 auto v = read_subfield_elt(buf, F);
309 if (v) {
310 pr.req[i] = v.value();
311 } else {
312 return false;
313 }
314 }
315 } else {
316 if (!buf.have(runlen * Field::kBytes)) return false;
317 for (size_t i = ci; i < ci + runlen; ++i) {
318 auto v = read_elt(buf, F);
319 if (v) {
320 pr.req[i] = v.value();
321 } else {
322 return false;
323 }
324 }
325 }
326 ci += runlen;
327 subfield_run = !subfield_run;
328 }
329
330 if (!buf.have(4)) return false;
331 size_t sz = read_size(buf); /* untrusted size input */
332
333 // Merkle proofs of length < NREQ are not valid in the zk proof setting.
334 if (sz < pr.nreq || sz >= kMaxNumDigests) return false; // avoid overflow
335 if (!buf.have(sz * Digest::kLength)) return false;
336
337 // Sanity check, the proof should never be larger than this.
338 // That value should always fit into memory, so this check aims to avoid
339 // an exception by resize() if there is not enough memory to resize.
340 if (sz > pr.nreq * pr.mc_pathlen) return false;
341
342 pr.merkle.path.resize(sz);
343 for (size_t i = 0; i < sz; ++i) {
344 read_digest(buf, pr.merkle.path[i]);
345 }
346 return true;
347 }
348
349 std::optional<Elt> read_elt(ReadBuffer &buf, const Field &F) const {
350 return F.of_bytes_field(buf.next(Field::kBytes));
351 }
352
353 std::optional<Elt> read_subfield_elt(ReadBuffer &buf, const Field &F) const {
354 return F.of_bytes_subfield(buf.next(Field::kSubFieldBytes));
355 }
356
357 void read_digest(ReadBuffer &buf, Digest &x) const {
358 buf.next(Digest::kLength, x.data);
359 }
360
361 void read_nonce(ReadBuffer &buf, MerkleNonce &x) const {
362 buf.next(MerkleNonce::kLength, x.bytes);
363 }
364
365 size_t read_size(ReadBuffer &buf) { return u32_of_le(buf.next(4)); }
366};
367
368} // namespace proofs
369
370#endif // PRIVACY_PROOFS_ZK_LIB_ZK_ZK_PROOF_H_
Definition readbuffer.h:26
Definition circuit.h:45
Definition merkle_tree.h:33
Definition gf2_128.h:63
Definition ligero_param.h:294
Definition ligero_param.h:117
Definition ligero_param.h:299
Definition merkle_commitment.h:31
Definition circuit.h:130