152 LigeroParam(
size_t nw,
size_t nq,
size_t rateinv,
size_t nreq)
153 : nw(nw), nq(nq), rateinv(rateinv), nreq(nreq) {
156 size_t min_proof_size = SIZE_MAX;
157 size_t best_block_enc = 1;
158 for (
size_t e = 1; e <= (1 << 28); e *= 2) {
159 size_t proof_size = layout(e);
160 if (proof_size < min_proof_size) {
161 min_proof_size = proof_size;
167 layout(best_block_enc);
172 LigeroParam(
size_t nw,
size_t nq,
size_t rateinv,
size_t nreq,
174 : nw(nw), nq(nq), rateinv(rateinv), nreq(nreq), block_enc(be) {
176 check(layout(block_enc) < SIZE_MAX,
"block_enc too large");
185 size_t layout(
size_t e) {
192 constexpr size_t max_lg_size = 28;
193 constexpr size_t max_size =
static_cast<size_t>(1) << max_lg_size;
197 size_t subfield_bits = 8 * Field::kSubFieldBytes;
198 if (subfield_bits <= max_lg_size) {
199 if (block_enc >= (
static_cast<size_t>(1) << subfield_bits)) {
206 if (block_enc > max_size || rateinv > max_size ||
207 (block_enc + 1) < (2 + rateinv)) {
211 block = (block_enc + 1) / (2 + rateinv);
233 dblock = 2 * block - 1;
238 if (block_enc < dblock) {
243 block_ext = block_enc - dblock;
246 nwrow = ceildiv(nw, w);
247 nqtriples = ceildiv(nq, w);
249 nwqrow = nwrow + 3 * nqtriples;
254 if (nrow >= max_size / block_enc) {
258 mc_pathlen = merkle_commitment_len(block_ext);
270 sz +=
static_cast<uint64_t
>(mc_pathlen) / 2 *
static_cast<uint64_t
>(nreq) *
271 static_cast<uint64_t
>(Digest::kLength);
274 sz +=
static_cast<uint64_t
>(block) *
static_cast<uint64_t
>(Field::kBytes);
277 sz +=
static_cast<uint64_t
>(dblock) *
static_cast<uint64_t
>(Field::kBytes);
282 sz +=
static_cast<uint64_t
>(dblock - w) *
283 static_cast<uint64_t
>(Field::kBytes);
286 sz +=
static_cast<uint64_t
>(nreq) *
287 static_cast<uint64_t
>(MerkleNonce::kLength);
290 sz +=
static_cast<uint64_t
>(nrow) *
static_cast<uint64_t
>(nreq) *
291 static_cast<uint64_t
>(Field::kSubFieldBytes);
293 sz = std::min<uint64_t>(sz, SIZE_MAX);
294 return static_cast<size_t>(sz);
299 proofs::check(block_enc > block,
"block_enc > block");
305 proofs::check(nrow == iq + 3 * nqtriples,
"nrow == iq + 3 * nqtriples");
321 block_enc(p->block_enc),
324 mc_pathlen(p->mc_pathlen),
328 y_quad_2(p->dblock - p->block),
329 req(p->nrow * p->nreq),
341 std::vector<Elt> y_ldt;
342 std::vector<Elt> y_dot;
343 std::vector<Elt> y_quad_0;
345 std::vector<Elt> y_quad_2;
346 std::vector<Elt> req;
349 Elt &req_at(
size_t i,
size_t j) {
return req[i * nreq + j]; }
350 const Elt &req_at(
size_t i,
size_t j)
const {
return req[i * nreq + j]; }
382 static void inner_product_vector(
386 const std::array<Elt, 3> alphaq[],
const Field &F) {
388 Blas<Field>::clear(p.nwqrow * p.w, A, 1, F);
391 for (
size_t l = 0; l < nllterm; ++l) {
392 const auto &
term = llterm[l];
393 proofs::check(
term.w < p.nw,
"term.w < p.nw");
394 proofs::check(
term.c < nl,
"term.c < nl");
399 Elt *Ax = &A[p.nwrow * p.w];
400 Elt *Ay = Ax + (p.nqtriples * p.w);
401 Elt *Az = Ay + (p.nqtriples * p.w);
403 for (
size_t i = 0; i < p.nqtriples; ++i) {
404 for (
size_t j = 0; j < p.w && j + i * p.w < p.nq; ++j) {
406 size_t iw = j + i * p.w;
407 const auto *l = &lqc[iw];
408 F.add(Ax[iw], alphaq[iw][0]);
409 F.sub(A[l->x], alphaq[iw][0]);
411 F.add(Ay[iw], alphaq[iw][1]);
412 F.sub(A[l->y], alphaq[iw][1]);
414 F.add(Az[iw], alphaq[iw][2]);
415 F.sub(A[l->z], alphaq[iw][2]);
423 size_t i,
const Elt A[],
425 Blas<Field>::clear(p.r, &Aext[0], 1, F);
426 Blas<Field>::copy(p.w, &Aext[p.r], 1, &A[i * p.w], 1);
429 static void column_hash(
size_t n,
const Elt x[],
size_t incx,
430 SHA256 &sha,
const Field &F) {
431 for (
size_t i = 0; i < n; ++i) {
432 uint8_t buf[Field::kBytes];
433 F.to_bytes_field(buf, x[i * incx]);
434 sha.Update(buf,
sizeof(buf));