Skip to content

Commit

Permalink
Make aggregation circuit generic over number of snarks (#1225)
Browse files Browse the repository at this point in the history
* 1

* clean up

* fix

* blob degree

* wip

* wip

* clean up

* clean up

* clean up

* fix clippy

* roll back to 15 snarks

* re-instate change that increases max_agg_snarks=45

* fix: use const instead of hard-coded number

* Revert "re-instate change that increases max_agg_snarks=45"

This reverts commit ca7f6f8.

* Clean up comments

* fix lint error from rebase

* make aggregation circuit generic over number of snarks

* make components generic over N_SNARKS

* fix 2 snark test

* fix clippy

* remove env logger init and add 14 snark test

* fix clippy

* move doc strings

* remove outdated comment and change to log::trace

* Add todo

---------

Co-authored-by: kunxian xia <xiakunxian130@gmail.com>
Co-authored-by: Zhang Zhuo <mycinbrin@gmail.com>
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: Mason Liang <mason@scroll.io>
Co-authored-by: z2trillion <dawnofthedinosaurs@gmail.com>
  • Loading branch information
6 people authored May 21, 2024
1 parent c228ed8 commit 352b3ec
Show file tree
Hide file tree
Showing 18 changed files with 740 additions and 1,050 deletions.
7 changes: 5 additions & 2 deletions aggregator/src/aggregation/barycentric.rs
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,10 @@ pub fn interpolate(z: Scalar, coefficients: &[Scalar; BLOB_WIDTH]) -> Scalar {
#[cfg(test)]
mod tests {
use super::*;
use crate::blob::{BlobData, KZG_TRUSTED_SETUP};
use crate::{
blob::{BlobData, KZG_TRUSTED_SETUP},
MAX_AGG_SNARKS,
};
use c_kzg::{Blob as RethBlob, KzgProof};
use std::collections::BTreeSet;

Expand Down Expand Up @@ -386,7 +389,7 @@ mod tests {

#[test]
fn interpolate_matches_reth_implementation() {
let blob = BlobData::from(&vec![
let blob = BlobData::<MAX_AGG_SNARKS>::from(&vec![
vec![30; 56],
vec![200; 100],
vec![0; 340],
Expand Down
106 changes: 58 additions & 48 deletions aggregator/src/aggregation/blob_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,12 @@ use zkevm_circuits::{

use crate::{
aggregation::rlc::POWS_OF_256,
blob::{
BlobData, BLOB_WIDTH, N_BYTES_U256, N_DATA_BYTES_PER_COEFFICIENT, N_ROWS_BLOB_DATA_CONFIG,
N_ROWS_DATA, N_ROWS_DIGEST_BYTES, N_ROWS_DIGEST_RLC, N_ROWS_METADATA,
},
RlcConfig, MAX_AGG_SNARKS,
blob::{BlobData, BLOB_WIDTH, N_BYTES_U256, N_DATA_BYTES_PER_COEFFICIENT},
RlcConfig,
};

#[derive(Clone, Debug)]
pub struct BlobDataConfig {
pub struct BlobDataConfig<const N_SNARKS: usize> {
/// The byte value at this row.
byte: Column<Advice>,
/// The accumulator serves several purposes.
Expand All @@ -32,7 +29,7 @@ pub struct BlobDataConfig {
/// resets to 1 if we encounter a chunk boundary. The accumulator here is referenced while
/// doing a lookup to the Keccak table that requires the input length.
accumulator: Column<Advice>,
/// An increasing counter that denotes the chunk ID. The chunk ID is from [1, MAX_AGG_SNARKS].
/// An increasing counter that denotes the chunk ID. The chunk ID is from [1, N_SNARKS].
chunk_idx: Column<Advice>,
/// A boolean witness that is set only when we encounter the end of a chunk. We enable a lookup
/// to the Keccak table when the boundary is met.
Expand All @@ -53,8 +50,8 @@ pub struct BlobDataConfig {
pub hash_selector: Selector,
/// Fixed table that consists of [0, 256).
u8_table: U8Table,
/// Fixed table that consists of [0, MAX_AGG_SNARKS).
chunk_idx_range_table: RangeTable<MAX_AGG_SNARKS>,
/// Fixed table that consists of [0, N_SNARKS).
chunk_idx_range_table: RangeTable<N_SNARKS>,
}

pub struct AssignedBlobDataExport {
Expand All @@ -74,14 +71,16 @@ pub struct AssignedBlobDataConfig {
pub digest_rlc: AssignedCell<Fr, Fr>,
}

impl BlobDataConfig {
impl<const N_SNARKS: usize> BlobDataConfig<N_SNARKS> {
pub fn configure(
meta: &mut ConstraintSystem<Fr>,
challenge: Challenges<Expression<Fr>>,
u8_table: U8Table,
range_table: RangeTable<MAX_AGG_SNARKS>,
range_table: RangeTable<N_SNARKS>,
keccak_table: &KeccakTable,
) -> Self {
let n_rows_metadata = BlobData::<N_SNARKS>::n_rows_metadata();

let config = Self {
u8_table,
chunk_idx_range_table: range_table,
Expand Down Expand Up @@ -126,7 +125,7 @@ impl BlobDataConfig {
let cond = is_not_hash * is_boundary * (1.expr() - is_padding_next);
let chunk_idx_curr = meta.query_advice(config.chunk_idx, Rotation::cur());
let chunk_idx_next = meta.query_advice(config.chunk_idx, Rotation::next());
// chunk_idx increases by at least 1 and at most MAX_AGG_SNARKS when condition is
// chunk_idx increases by at least 1 and at most N_SNARKS when condition is
// met.
vec![(
cond * (chunk_idx_next - chunk_idx_curr - 1.expr()),
Expand All @@ -136,7 +135,7 @@ impl BlobDataConfig {
);

meta.lookup(
"BlobDataConfig (chunk_idx for non-padding, data rows in [1..MAX_AGG_SNARKS])",
"BlobDataConfig (chunk_idx for non-padding, data rows in [1..N_SNARKS])",
|meta| {
let is_data = meta.query_selector(config.data_selector);
let is_padding = meta.query_advice(config.is_padding, Rotation::cur());
Expand Down Expand Up @@ -272,7 +271,7 @@ impl BlobDataConfig {

let accumulator = meta.query_advice(config.accumulator, Rotation::cur());
let preimage_len =
is_data.expr() * accumulator + (1.expr() - is_data) * N_ROWS_METADATA.expr();
is_data.expr() * accumulator + (1.expr() - is_data) * n_rows_metadata.expr();

[
1.expr(), // q_enable
Expand Down Expand Up @@ -331,7 +330,7 @@ impl BlobDataConfig {
// - metadata_digest: 32 bytes
// - chunk[i].chunk_data_digest: 32 bytes each
// - versioned_hash: 32 bytes
let preimage_len = 32.expr() * (MAX_AGG_SNARKS + 1 + 1).expr();
let preimage_len = 32.expr() * (N_SNARKS + 1 + 1).expr();

[
1.expr(), // q_enable
Expand All @@ -347,6 +346,11 @@ impl BlobDataConfig {
},
);

log::trace!("blob meta degree: {}", meta.degree());
log::trace!(
"blob meta degree with lookups: {}",
meta.clone().chunk_lookups().degree()
);
assert!(meta.degree() <= 5);

config
Expand All @@ -360,7 +364,7 @@ impl BlobDataConfig {
// The chunks_are_padding assigned cells are exports from the conditional constraints in
// `core.rs`. Since these are already constrained, we can just use them as is.
chunks_are_padding: &[AssignedCell<Fr, Fr>],
blob: &BlobData,
blob: &BlobData<N_SNARKS>,
barycentric_assignments: &[CRTInteger<Fr>],
) -> Result<AssignedBlobDataExport, Error> {
self.load_range_tables(layouter)?;
Expand Down Expand Up @@ -393,24 +397,25 @@ impl BlobDataConfig {
&self,
region: &mut Region<Fr>,
challenge_value: Challenges<Value<Fr>>,
blob: &BlobData,
blob: &BlobData<N_SNARKS>,
) -> Result<Vec<AssignedBlobDataConfig>, Error> {
let n_rows_data = BlobData::<N_SNARKS>::n_rows_data();
let n_rows_metadata = BlobData::<N_SNARKS>::n_rows_metadata();

let rows = blob.to_rows(challenge_value);
assert_eq!(rows.len(), N_ROWS_BLOB_DATA_CONFIG);
assert_eq!(rows.len(), BlobData::<N_SNARKS>::n_rows());

// enable data selector
for offset in N_ROWS_METADATA..N_ROWS_METADATA + N_ROWS_DATA {
for offset in n_rows_metadata..n_rows_metadata + n_rows_data {
self.data_selector.enable(region, offset)?;
}

// enable hash selector
for offset in
N_ROWS_METADATA + N_ROWS_DATA..N_ROWS_METADATA + N_ROWS_DATA + N_ROWS_DIGEST_RLC
{
for offset in n_rows_metadata + n_rows_data..BlobData::<N_SNARKS>::n_rows() {
self.hash_selector.enable(region, offset)?;
}

let mut assigned_rows = Vec::with_capacity(N_ROWS_BLOB_DATA_CONFIG);
let mut assigned_rows = Vec::with_capacity(BlobData::<N_SNARKS>::n_rows());
let mut count = 0u64;
for (i, row) in rows.iter().enumerate() {
let byte = region.assign_advice(
Expand All @@ -437,7 +442,7 @@ impl BlobDataConfig {
i,
|| Value::known(Fr::from(row.is_boundary as u64)),
)?;
let bcount = if (N_ROWS_METADATA..N_ROWS_METADATA + N_ROWS_DATA).contains(&i) {
let bcount = if (n_rows_metadata..n_rows_metadata + n_rows_data).contains(&i) {
count += row.is_boundary as u64;
count
} else {
Expand Down Expand Up @@ -488,6 +493,10 @@ impl BlobDataConfig {
barycentric_assignments: &[CRTInteger<Fr>],
assigned_rows: &[AssignedBlobDataConfig],
) -> Result<AssignedBlobDataExport, Error> {
let n_rows_metadata = BlobData::<N_SNARKS>::n_rows_metadata();
let n_rows_digest_rlc = BlobData::<N_SNARKS>::n_rows_digest_rlc();
let n_rows_data = BlobData::<N_SNARKS>::n_rows_data();

rlc_config.init(region)?;
let mut rlc_config_offset = 0;

Expand All @@ -506,9 +515,10 @@ impl BlobDataConfig {
};
let fixed_chunk_indices = {
let mut fixed_chunk_indices = vec![one.clone()];
for i in 2..=MAX_AGG_SNARKS {
for i in 2..=N_SNARKS {
let i_cell =
rlc_config.load_private(region, &Fr::from(i as u64), &mut rlc_config_offset)?;
// TODO: look into this....
let i_fixed_cell =
rlc_config.fixed_up_to_max_agg_snarks_cell(i_cell.cell().region_index, i);
region.constrain_equal(i_cell.cell(), i_fixed_cell)?;
Expand Down Expand Up @@ -582,8 +592,8 @@ impl BlobDataConfig {
////////////////////////////////////////////////////////////////////////////////

let mut num_nonempty_chunks = zero.clone();
let mut is_empty_chunks = Vec::with_capacity(MAX_AGG_SNARKS);
let mut chunk_sizes = Vec::with_capacity(MAX_AGG_SNARKS);
let mut is_empty_chunks = Vec::with_capacity(N_SNARKS);
let mut chunk_sizes = Vec::with_capacity(N_SNARKS);
for (i, is_padded_chunk) in chunks_are_padding.iter().enumerate() {
let rows = assigned_rows
.iter()
Expand Down Expand Up @@ -641,7 +651,7 @@ impl BlobDataConfig {
rlc_config.not(region, &all_chunks_empty, &mut rlc_config_offset)?;

// constrain preimage_rlc column
let metadata_rows = &assigned_rows[..N_ROWS_METADATA];
let metadata_rows = &assigned_rows[..n_rows_metadata];
region.constrain_equal(
metadata_rows[0].byte.cell(),
metadata_rows[0].preimage_rlc.cell(),
Expand All @@ -668,7 +678,7 @@ impl BlobDataConfig {
}

// in the metadata section, these columns are 0 except (possibly) on the last row.
for row in metadata_rows.iter().take(N_ROWS_METADATA - 1) {
for row in metadata_rows.iter().take(n_rows_metadata - 1) {
let cells = [&row.is_boundary, &row.digest_rlc].map(AssignedCell::cell);

for cell in cells {
Expand All @@ -679,7 +689,7 @@ impl BlobDataConfig {
// in the final row of the metadata section, boundary is 1. note that this triggers a keccak
// lookup which constrains digest_rlc.
region.constrain_equal(
metadata_rows[N_ROWS_METADATA - 1].is_boundary.cell(),
metadata_rows[n_rows_metadata - 1].is_boundary.cell(),
one.cell(),
)?;

Expand All @@ -691,8 +701,8 @@ impl BlobDataConfig {
// there are no non-empty chunks, this will be 0 and must also be a padding row.
let rows = assigned_rows
.iter()
.skip(N_ROWS_METADATA)
.take(N_ROWS_DATA)
.skip(n_rows_metadata)
.take(n_rows_data)
.collect::<Vec<_>>();
rlc_config.conditional_enforce_equal(
region,
Expand Down Expand Up @@ -736,31 +746,31 @@ impl BlobDataConfig {

let rows = assigned_rows
.iter()
.skip(N_ROWS_METADATA + N_ROWS_DATA)
.take(N_ROWS_DIGEST_RLC)
.skip(n_rows_metadata + n_rows_data)
.take(n_rows_digest_rlc)
.collect::<Vec<_>>();

// rows have chunk_idx set from 0 (metadata) -> MAX_AGG_SNARKS.
// rows have chunk_idx set from 0 (metadata) -> N_SNARKS.
region.constrain_equal(rows[0].chunk_idx.cell(), zero.cell())?;
for (row, fixed_chunk_idx) in rows
.iter()
.skip(1)
.take(MAX_AGG_SNARKS)
.take(N_SNARKS)
.zip_eq(fixed_chunk_indices.iter())
{
region.constrain_equal(row.chunk_idx.cell(), fixed_chunk_idx.cell())?;
}

let challenge_digest_preimage_rlc_specified = &rows.last().unwrap().preimage_rlc;
let challenge_digest_rlc_specified = &rows.last().unwrap().digest_rlc;
let versioned_hash_rlc = &rows.get(N_ROWS_DIGEST_RLC - 2).unwrap().digest_rlc;
let versioned_hash_rlc = &rows.get(n_rows_digest_rlc - 2).unwrap().digest_rlc;

// ensure that on the last row of this section the is_boundary is turned on
// which would enable the keccak table lookup for challenge_digest
region.constrain_equal(rows.last().unwrap().is_boundary.cell(), one.cell())?;

let metadata_digest_rlc_computed =
&assigned_rows.get(N_ROWS_METADATA - 1).unwrap().digest_rlc;
&assigned_rows.get(n_rows_metadata - 1).unwrap().digest_rlc;
let metadata_digest_rlc_specified = &rows.first().unwrap().digest_rlc;
region.constrain_equal(
metadata_digest_rlc_computed.cell(),
Expand All @@ -773,7 +783,7 @@ impl BlobDataConfig {
// Also, we know that the first chunk is valid. So we can just start the check from
// the second chunk's data digest.
region.constrain_equal(chunks_are_padding[0].cell(), zero.cell())?;
for i in 1..MAX_AGG_SNARKS {
for i in 1..N_SNARKS {
// Note that in `rows`, the first row is the metadata row (hence anyway skip
// it). That's why we have a +1.
rlc_config.conditional_enforce_equal(
Expand All @@ -785,11 +795,11 @@ impl BlobDataConfig {
)?;
}

let mut chunk_digest_evm_rlcs = Vec::with_capacity(MAX_AGG_SNARKS);
let mut chunk_digest_evm_rlcs = Vec::with_capacity(N_SNARKS);
for (((row, chunk_size_decoded), is_empty), is_padded_chunk) in rows
.iter()
.skip(1)
.take(MAX_AGG_SNARKS)
.take(N_SNARKS)
.zip_eq(chunk_sizes)
.zip_eq(is_empty_chunks)
.zip_eq(chunks_are_padding)
Expand Down Expand Up @@ -821,8 +831,8 @@ impl BlobDataConfig {
let mut challenge_digest_preimage_keccak_rlc = zero.clone();
let rows = assigned_rows
.iter()
.skip(N_ROWS_METADATA + N_ROWS_DATA + N_ROWS_DIGEST_RLC)
.take(N_ROWS_DIGEST_BYTES)
.skip(n_rows_metadata + n_rows_data + n_rows_digest_rlc)
.take(BlobData::<N_SNARKS>::n_rows_digest_bytes())
.collect::<Vec<_>>();
for (i, digest_rlc_specified) in std::iter::once(metadata_digest_rlc_specified)
.chain(chunk_digest_evm_rlcs)
Expand All @@ -845,7 +855,7 @@ impl BlobDataConfig {

// compute the keccak input RLC:
// we do this only for the metadata and chunks, not for the blob row itself.
if i < MAX_AGG_SNARKS + 1 + 1 {
if i < N_SNARKS + 1 + 1 {
let digest_keccak_rlc =
rlc_config.rlc(region, &digest_bytes, &r_keccak, &mut rlc_config_offset)?;
challenge_digest_preimage_keccak_rlc = rlc_config.mul_add(
Expand All @@ -869,19 +879,19 @@ impl BlobDataConfig {
let mut blob_fields: Vec<Vec<AssignedCell<Fr, Fr>>> = Vec::with_capacity(BLOB_WIDTH);
let blob_bytes = assigned_rows
.iter()
.take(N_ROWS_METADATA + N_ROWS_DATA)
.take(n_rows_metadata + n_rows_data)
.map(|row| row.byte.clone())
.collect::<Vec<_>>();
for chunk in blob_bytes.chunks_exact(N_DATA_BYTES_PER_COEFFICIENT) {
// blob bytes are supposed to be deserialised in big-endianness. However, we
// have the export from BarycentricConfig in little-endian bytes.
blob_fields.push(chunk.iter().rev().cloned().collect());
}
let mut chunk_data_digests = Vec::with_capacity(MAX_AGG_SNARKS);
let mut chunk_data_digests = Vec::with_capacity(N_SNARKS);
let chunk_data_digests_bytes = assigned_rows
.iter()
.skip(N_ROWS_METADATA + N_ROWS_DATA + N_ROWS_DIGEST_RLC + N_BYTES_U256)
.take(MAX_AGG_SNARKS * N_BYTES_U256)
.skip(n_rows_metadata + n_rows_data + n_rows_digest_rlc + N_BYTES_U256)
.take(N_SNARKS * N_BYTES_U256)
.map(|row| row.byte.clone())
.collect::<Vec<_>>();
for chunk in chunk_data_digests_bytes.chunks_exact(N_BYTES_U256) {
Expand Down
Loading

0 comments on commit 352b3ec

Please sign in to comment.