Longfellow ZK 0290cb32
Loading...
Searching...
No Matches
zk_proof.h
1// Copyright 2025 Google LLC.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#ifndef PRIVACY_PROOFS_ZK_LIB_ZK_ZK_PROOF_H_
16#define PRIVACY_PROOFS_ZK_LIB_ZK_ZK_PROOF_H_
17
18#include <cstddef>
19#include <cstdint>
20#include <optional>
21#include <vector>
22
23#include "ligero/ligero_param.h"
24#include "merkle/merkle_commitment.h"
25#include "merkle/merkle_tree.h"
26#include "sumcheck/circuit.h"
27#include "util/log.h"
28#include "util/readbuffer.h"
29#include "util/serialization.h"
30#include "zk/zk_common.h"
31
32namespace proofs {
33
34// ZkProof class handles proof serialization.
35//
36// We expect circuits to be created and stored locally by the prover and
37// verifier respectively, and thus the circuit representations are trusted and
38// are assumed to contain parameters that do not induce arithmetic overflows.
39// For example, we assume that values like c.logw and c.logc are smaller than
40// 2^24 and therefore do not cause any overflows (even on 32b machines) in the
41// range/length calculations that are performed during serialization.
42//
43// An earlier experiment implemented the IO methods using protobuf parsing.
44// Despite applying techniques like arena allocation, those methods required
45// an order of magnitude more time.
46template <class Field>
47struct ZkProof {
48 public:
49 const Circuit<Field> &c;
50 Proof<Field> proof;
53 LigeroProof<Field> com_proof;
54
55 // The max run length is 2^25, in order to prevent overflow issues on 32b
56 // machines when performing length calculations during serialization.
57 constexpr static size_t kMaxRunLen = (1 << 25);
58
59 constexpr static size_t kMaxNumDigests = (1 << 25);
60
61 typedef typename Field::Elt Elt;
62
63 explicit ZkProof(const Circuit<Field> &c, size_t rate, size_t req)
64 : c(c),
65 proof(c.nl),
66 param((c.ninputs - c.npub_in) + ZkCommon<Field>::pad_size(c), c.nl,
67 rate, req),
68 com_proof(&param) {}
69
70 explicit ZkProof(const Circuit<Field> &c, size_t rate, size_t req,
71 size_t block_enc)
72 : c(c),
73 proof(c.nl),
74 param((c.ninputs - c.npub_in) + ZkCommon<Field>::pad_size(c), c.nl,
75 rate, req, block_enc),
76 com_proof(&param) {}
77
78 // Maximum size of the proof in bytes. The actual size will be smaller
79 // because the Merkle proof is batched.
80 size_t size() const {
81 return Digest::kLength +
82
83 proof.size() * Field::kBytes +
84
85 com_proof.block * 2 * Field::kBytes +
86 com_proof.nreq * com_proof.nrow * Field::kBytes +
87 com_proof.nreq * com_proof.mc_pathlen * Digest::kLength;
88 }
89
90 void write(std::vector<uint8_t> &buf, const Field &F) const {
91 size_t s0 = buf.size();
92 write_com(com, buf, F);
93 size_t s1 = buf.size();
94 write_sc_proof(proof, buf, F);
95 size_t s2 = buf.size();
96 write_com_proof(com_proof, buf, F);
97 size_t s3 = buf.size();
98 log(INFO,
99 "com:%zu, sc:%zu, com_proof:%zu [%zu el, %zu el, %zu d in %zu "
100 "rows]: %zub",
101 s1 - s0, s2 - s1, s3 - s2, 2 * com_proof.block,
102 com_proof.nreq * com_proof.nrow, com_proof.merkle.path.size(),
103 com_proof.nrow, s3);
104 }
105
106 // The read function returns false on error or underflow.
107 bool read(ReadBuffer &buf, const Field &F) {
108 if (!read_com(com, buf, F)) return false;
109 if (!read_sc_proof(proof, buf, F)) return false;
110 if (!read_com_proof(com_proof, buf, F)) return false;
111 return true;
112 }
113
114 void write_sc_proof(const Proof<Field> &pr, std::vector<uint8_t> &buf,
115 const Field &F) const {
116 check(c.logc == 0, "cannot write sc proof with logc != 0");
117 for (size_t i = 0; i < pr.l.size(); ++i) {
118 for (size_t wi = 0; wi < c.l[i].logw; ++wi) {
119 for (size_t k = 0; k < 3; ++k) {
120 // Optimization: do not send p(1) as it is implied by constraints.
121 if (k != 1) {
122 write_elt(pr.l[i].hp[0][wi].t_[k], buf, F);
123 write_elt(pr.l[i].hp[1][wi].t_[k], buf, F);
124 }
125 }
126 }
127 write_elt(pr.l[i].wc[0], buf, F);
128 write_elt(pr.l[i].wc[1], buf, F);
129 }
130 }
131
132 void write_com(const LigeroCommitment<Field> &com0, std::vector<uint8_t> &buf,
133 const Field &F) const {
134 buf.insert(buf.end(), com0.root.data, com0.root.data + Digest::kLength);
135 }
136
137 void write_com_proof(const LigeroProof<Field> &pr, std::vector<uint8_t> &buf,
138 const Field &F) const {
139 for (size_t i = 0; i < pr.block; ++i) {
140 write_elt(pr.y_ldt[i], buf, F);
141 }
142 for (size_t i = 0; i < pr.dblock; ++i) {
143 write_elt(pr.y_dot[i], buf, F);
144 }
145 for (size_t i = 0; i < pr.r; ++i) {
146 write_elt(pr.y_quad_0[i], buf, F);
147 }
148 for (size_t i = 0; i < pr.dblock - pr.block; ++i) {
149 write_elt(pr.y_quad_2[i], buf, F);
150 }
151
152 // write all the Merkle nonces
153 for (size_t i = 0; i < pr.nreq; ++i) {
154 write_nonce(pr.merkle.nonce[i], buf);
155 }
156
157 // The format of the opened rows consists of a run of full-field elements,
158 // then a run of base-field elements, and finally a run of full-field
159 // elements. To compress, we employ a run-length encoding approach.
160 size_t ci = 0;
161 bool subfield_run = false;
162 while (ci < pr.nreq * pr.nrow) {
163 size_t runlen = 0;
164 while (ci + runlen < pr.nreq * pr.nrow && runlen < kMaxRunLen &&
165 F.in_subfield(pr.req[ci + runlen]) == subfield_run) {
166 ++runlen;
167 }
168 write_size(runlen, buf);
169 for (size_t i = ci; i < ci + runlen; ++i) {
170 if (subfield_run) {
171 write_subfield_elt(pr.req[i], buf, F);
172 } else {
173 write_elt(pr.req[i], buf, F);
174 }
175 }
176 ci += runlen;
177 subfield_run = !subfield_run;
178 }
179
180 write_size(pr.merkle.path.size(), buf);
181 for (size_t i = 0; i < pr.merkle.path.size(); ++i) {
182 write_digest(pr.merkle.path[i], buf);
183 }
184 }
185
186 private:
187 void write_elt(const Elt &x, std::vector<uint8_t> &buf,
188 const Field &F) const {
189 uint8_t tmp[Field::kBytes];
190 F.to_bytes_field(tmp, x);
191 buf.insert(buf.end(), tmp, tmp + Field::kBytes);
192 }
193
194 void write_subfield_elt(const Elt &x, std::vector<uint8_t> &buf,
195 const Field &F) const {
196 uint8_t tmp[Field::kSubFieldBytes];
197 F.to_bytes_subfield(tmp, x);
198 buf.insert(buf.end(), tmp, tmp + Field::kSubFieldBytes);
199 }
200
201 void write_digest(const Digest &x, std::vector<uint8_t> &buf) const {
202 buf.insert(buf.end(), x.data, x.data + Digest::kLength);
203 }
204
205 void write_nonce(const MerkleNonce &x, std::vector<uint8_t> &buf) const {
206 buf.insert(buf.end(), x.bytes, x.bytes + MerkleNonce::kLength);
207 }
208
209 // Assumption is that all of the sizes of arrays that are part of proofs
210 // fit into 4 bytes, and can thus work on 32-b machines.
211 void write_size(size_t g, std::vector<uint8_t> &buf) const {
212 for (size_t i = 0; i < 4; ++i) {
213 buf.push_back(static_cast<uint8_t>(g & 0xff));
214 g >>= 8;
215 }
216 }
217
218 bool read_sc_proof(Proof<Field> &pr, ReadBuffer &buf, const Field &F) {
219 if (c.logc != 0) return false;
220 for (size_t i = 0; i < pr.l.size(); ++i) {
221 size_t needed = (c.l[i].logw * (3 - 1) * 2 + 2) * Field::kBytes;
222 if (!buf.have(needed)) return false;
223 for (size_t wi = 0; wi < c.l[i].logw; ++wi) {
224 for (size_t k = 0; k < 3; ++k) {
225 // Optimization: the p(1) value was not sent.
226 if (k != 1) {
227 for (size_t hi = 0; hi < 2; ++hi) {
228 auto v = read_elt(buf, F);
229 if (v) {
230 pr.l[i].hp[hi][wi].t_[k] = v.value();
231 } else {
232 return false;
233 }
234 }
235 } else {
236 pr.l[i].hp[0][wi].t_[k] = F.zero();
237 pr.l[i].hp[1][wi].t_[k] = F.zero();
238 }
239 }
240 }
241 for (size_t wi = 0; wi < 2; ++wi) {
242 auto v = read_elt(buf, F);
243 if (v) {
244 pr.l[i].wc[wi] = v.value();
245 } else {
246 return false;
247 }
248 }
249 }
250 return true;
251 }
252
253 bool read_com(LigeroCommitment<Field> &com0, ReadBuffer &buf,
254 const Field &F) {
255 if (!buf.have(Digest::kLength)) return false;
256 read_digest(buf, com0.root);
257 return true;
258 }
259
260 bool read_com_proof(LigeroProof<Field> &pr, ReadBuffer &buf, const Field &F) {
261 if (!buf.have(pr.block * Field::kBytes)) return false;
262 for (size_t i = 0; i < pr.block; ++i) {
263 auto v = read_elt(buf, F);
264 if (v) {
265 pr.y_ldt[i] = v.value();
266 } else {
267 return false;
268 }
269 }
270
271 if (!buf.have(pr.dblock * Field::kBytes)) return false;
272 for (size_t i = 0; i < pr.dblock; ++i) {
273 auto v = read_elt(buf, F);
274 if (v) {
275 pr.y_dot[i] = v.value();
276 } else {
277 return false;
278 }
279 }
280
281 if (!buf.have(pr.r * Field::kBytes)) return false;
282 for (size_t i = 0; i < pr.r; ++i) {
283 auto v = read_elt(buf, F);
284 if (v) {
285 pr.y_quad_0[i] = v.value();
286 } else {
287 return false;
288 }
289 }
290
291 if (!buf.have((pr.dblock - pr.block) * Field::kBytes)) return false;
292 for (size_t i = 0; i < pr.dblock - pr.block; ++i) {
293 auto v = read_elt(buf, F);
294 if (v) {
295 pr.y_quad_2[i] = v.value();
296 } else {
297 return false;
298 }
299 }
300
301 if (!buf.have(pr.nreq * MerkleNonce::kLength)) return false;
302 for (size_t i = 0; i < pr.nreq; ++i) {
303 read_nonce(buf, pr.merkle.nonce[i]);
304 }
305
306 // Decode runs of real and full Field elements.
307 size_t ci = 0;
308 bool subfield_run = false;
309 while (ci < pr.nreq * pr.nrow) {
310 if (!buf.have(4)) return false;
311 size_t runlen = read_size(buf); /* untrusted size input */
312 if (runlen >= kMaxRunLen || ci + runlen > pr.nreq * pr.nrow) return false;
313 if (subfield_run) {
314 if (!buf.have(runlen * Field::kSubFieldBytes)) return false;
315 for (size_t i = ci; i < ci + runlen; ++i) {
316 auto v = read_subfield_elt(buf, F);
317 if (v) {
318 pr.req[i] = v.value();
319 } else {
320 return false;
321 }
322 }
323 } else {
324 if (!buf.have(runlen * Field::kBytes)) return false;
325 for (size_t i = ci; i < ci + runlen; ++i) {
326 auto v = read_elt(buf, F);
327 if (v) {
328 pr.req[i] = v.value();
329 } else {
330 return false;
331 }
332 }
333 }
334 ci += runlen;
335 subfield_run = !subfield_run;
336 }
337
338 if (!buf.have(4)) return false;
339 size_t sz = read_size(buf); /* untrusted size input */
340
341 // Merkle proofs of length < NREQ are not valid in the zk proof setting.
342 if (sz < pr.nreq || sz >= kMaxNumDigests) return false; // avoid overflow
343 if (!buf.have(sz * Digest::kLength)) return false;
344
345 // Sanity check, the proof should never be larger than this.
346 // That value should always fit into memory, so this check aims to avoid
347 // an exception by resize() if there is not enough memory to resize.
348 if (sz > pr.nreq * pr.mc_pathlen) return false;
349
350 pr.merkle.path.resize(sz);
351 for (size_t i = 0; i < sz; ++i) {
352 read_digest(buf, pr.merkle.path[i]);
353 }
354 return true;
355 }
356
357 std::optional<Elt> read_elt(ReadBuffer &buf, const Field &F) const {
358 return F.of_bytes_field(buf.next(Field::kBytes));
359 }
360
361 std::optional<Elt> read_subfield_elt(ReadBuffer &buf, const Field &F) const {
362 return F.of_bytes_subfield(buf.next(Field::kSubFieldBytes));
363 }
364
365 void read_digest(ReadBuffer &buf, Digest &x) const {
366 buf.next(Digest::kLength, x.data);
367 }
368
369 void read_nonce(ReadBuffer &buf, MerkleNonce &x) const {
370 buf.next(MerkleNonce::kLength, x.bytes);
371 }
372
373 size_t read_size(ReadBuffer &buf) { return u32_of_le(buf.next(4)); }
374};
375
376} // namespace proofs
377
378#endif // PRIVACY_PROOFS_ZK_LIB_ZK_ZK_PROOF_H_
Definition readbuffer.h:26
Definition circuit.h:45
Definition merkle_tree.h:43
Definition gf2_128.h:63
Definition ligero_param.h:310
Definition ligero_param.h:117
Definition ligero_param.h:315
Definition merkle_commitment.h:31
Definition circuit.h:132