57 constexpr static size_t kMaxRunLen = (1 << 25);
59 constexpr static size_t kMaxNumDigests = (1 << 25);
66 param((c.ninputs - c.npub_in) + ZkCommon<Field>::pad_size(c), c.nl,
74 param((c.ninputs - c.npub_in) + ZkCommon<Field>::pad_size(c), c.nl,
75 rate, req, block_enc),
81 return Digest::kLength +
83 proof.size() * Field::kBytes +
85 com_proof.block * 2 * Field::kBytes +
86 com_proof.nreq * com_proof.nrow * Field::kBytes +
87 com_proof.nreq * com_proof.mc_pathlen * Digest::kLength;
90 void write(std::vector<uint8_t> &buf,
const Field &F)
const {
91 size_t s0 = buf.size();
92 write_com(com, buf, F);
93 size_t s1 = buf.size();
94 write_sc_proof(proof, buf, F);
95 size_t s2 = buf.size();
96 write_com_proof(com_proof, buf, F);
97 size_t s3 = buf.size();
99 "com:%zu, sc:%zu, com_proof:%zu [%zu el, %zu el, %zu d in %zu "
101 s1 - s0, s2 - s1, s3 - s2, 2 * com_proof.block,
102 com_proof.nreq * com_proof.nrow, com_proof.merkle.path.size(),
108 if (!read_com(com, buf, F))
return false;
109 if (!read_sc_proof(proof, buf, F))
return false;
110 if (!read_com_proof(com_proof, buf, F))
return false;
114 void write_sc_proof(
const Proof<Field> &pr, std::vector<uint8_t> &buf,
115 const Field &F)
const {
116 check(c.logc == 0,
"cannot write sc proof with logc != 0");
117 for (
size_t i = 0; i < pr.l.size(); ++i) {
118 for (
size_t wi = 0; wi < c.l[i].logw; ++wi) {
119 for (
size_t k = 0; k < 3; ++k) {
122 write_elt(pr.l[i].hp[0][wi].t_[k], buf, F);
123 write_elt(pr.l[i].hp[1][wi].t_[k], buf, F);
127 write_elt(pr.l[i].wc[0], buf, F);
128 write_elt(pr.l[i].wc[1], buf, F);
133 const Field &F)
const {
134 buf.insert(buf.end(), com0.root.data, com0.root.data + Digest::kLength);
138 const Field &F)
const {
139 for (
size_t i = 0; i < pr.block; ++i) {
140 write_elt(pr.y_ldt[i], buf, F);
142 for (
size_t i = 0; i < pr.dblock; ++i) {
143 write_elt(pr.y_dot[i], buf, F);
145 for (
size_t i = 0; i < pr.r; ++i) {
146 write_elt(pr.y_quad_0[i], buf, F);
148 for (
size_t i = 0; i < pr.dblock - pr.block; ++i) {
149 write_elt(pr.y_quad_2[i], buf, F);
153 for (
size_t i = 0; i < pr.nreq; ++i) {
154 write_nonce(pr.merkle.nonce[i], buf);
161 bool subfield_run =
false;
162 while (ci < pr.nreq * pr.nrow) {
164 while (ci + runlen < pr.nreq * pr.nrow && runlen < kMaxRunLen &&
165 F.in_subfield(pr.req[ci + runlen]) == subfield_run) {
168 write_size(runlen, buf);
169 for (
size_t i = ci; i < ci + runlen; ++i) {
171 write_subfield_elt(pr.req[i], buf, F);
173 write_elt(pr.req[i], buf, F);
177 subfield_run = !subfield_run;
180 write_size(pr.merkle.path.size(), buf);
181 for (
size_t i = 0; i < pr.merkle.path.size(); ++i) {
182 write_digest(pr.merkle.path[i], buf);
187 void write_elt(
const Elt &x, std::vector<uint8_t> &buf,
188 const Field &F)
const {
189 uint8_t tmp[Field::kBytes];
190 F.to_bytes_field(tmp, x);
191 buf.insert(buf.end(), tmp, tmp + Field::kBytes);
194 void write_subfield_elt(
const Elt &x, std::vector<uint8_t> &buf,
195 const Field &F)
const {
196 uint8_t tmp[Field::kSubFieldBytes];
197 F.to_bytes_subfield(tmp, x);
198 buf.insert(buf.end(), tmp, tmp + Field::kSubFieldBytes);
201 void write_digest(
const Digest &x, std::vector<uint8_t> &buf)
const {
202 buf.insert(buf.end(), x.data, x.data + Digest::kLength);
205 void write_nonce(
const MerkleNonce &x, std::vector<uint8_t> &buf)
const {
206 buf.insert(buf.end(), x.bytes, x.bytes + MerkleNonce::kLength);
211 void write_size(
size_t g, std::vector<uint8_t> &buf)
const {
212 for (
size_t i = 0; i < 4; ++i) {
213 buf.push_back(
static_cast<uint8_t
>(g & 0xff));
219 if (c.logc != 0)
return false;
220 for (
size_t i = 0; i < pr.l.size(); ++i) {
221 size_t needed = (c.l[i].logw * (3 - 1) * 2 + 2) * Field::kBytes;
222 if (!buf.have(needed))
return false;
223 for (
size_t wi = 0; wi < c.l[i].logw; ++wi) {
224 for (
size_t k = 0; k < 3; ++k) {
227 for (
size_t hi = 0; hi < 2; ++hi) {
228 auto v = read_elt(buf, F);
230 pr.l[i].hp[hi][wi].t_[k] = v.value();
236 pr.l[i].hp[0][wi].t_[k] = F.zero();
237 pr.l[i].hp[1][wi].t_[k] = F.zero();
241 for (
size_t wi = 0; wi < 2; ++wi) {
242 auto v = read_elt(buf, F);
244 pr.l[i].wc[wi] = v.value();
255 if (!buf.have(Digest::kLength))
return false;
256 read_digest(buf, com0.root);
261 if (!buf.have(pr.block * Field::kBytes))
return false;
262 for (
size_t i = 0; i < pr.block; ++i) {
263 auto v = read_elt(buf, F);
265 pr.y_ldt[i] = v.value();
271 if (!buf.have(pr.dblock * Field::kBytes))
return false;
272 for (
size_t i = 0; i < pr.dblock; ++i) {
273 auto v = read_elt(buf, F);
275 pr.y_dot[i] = v.value();
281 if (!buf.have(pr.r * Field::kBytes))
return false;
282 for (
size_t i = 0; i < pr.r; ++i) {
283 auto v = read_elt(buf, F);
285 pr.y_quad_0[i] = v.value();
291 if (!buf.have((pr.dblock - pr.block) * Field::kBytes))
return false;
292 for (
size_t i = 0; i < pr.dblock - pr.block; ++i) {
293 auto v = read_elt(buf, F);
295 pr.y_quad_2[i] = v.value();
301 if (!buf.have(pr.nreq * MerkleNonce::kLength))
return false;
302 for (
size_t i = 0; i < pr.nreq; ++i) {
303 read_nonce(buf, pr.merkle.nonce[i]);
308 bool subfield_run =
false;
309 while (ci < pr.nreq * pr.nrow) {
310 if (!buf.have(4))
return false;
311 size_t runlen = read_size(buf);
312 if (runlen >= kMaxRunLen || ci + runlen > pr.nreq * pr.nrow)
return false;
314 if (!buf.have(runlen * Field::kSubFieldBytes))
return false;
315 for (
size_t i = ci; i < ci + runlen; ++i) {
316 auto v = read_subfield_elt(buf, F);
318 pr.req[i] = v.value();
324 if (!buf.have(runlen * Field::kBytes))
return false;
325 for (
size_t i = ci; i < ci + runlen; ++i) {
326 auto v = read_elt(buf, F);
328 pr.req[i] = v.value();
335 subfield_run = !subfield_run;
338 if (!buf.have(4))
return false;
339 size_t sz = read_size(buf);
342 if (sz < pr.nreq || sz >= kMaxNumDigests)
return false;
343 if (!buf.have(sz * Digest::kLength))
return false;
348 if (sz > pr.nreq * pr.mc_pathlen)
return false;
350 pr.merkle.path.resize(sz);
351 for (
size_t i = 0; i < sz; ++i) {
352 read_digest(buf, pr.merkle.path[i]);
357 std::optional<Elt> read_elt(
ReadBuffer &buf,
const Field &F)
const {
358 return F.of_bytes_field(buf.next(Field::kBytes));
361 std::optional<Elt> read_subfield_elt(
ReadBuffer &buf,
const Field &F)
const {
362 return F.of_bytes_subfield(buf.next(Field::kSubFieldBytes));
366 buf.next(Digest::kLength, x.data);
370 buf.next(MerkleNonce::kLength, x.bytes);
373 size_t read_size(
ReadBuffer &buf) {
return u32_of_le(buf.next(4)); }