diff --git a/aggregator/src/aggregation/barycentric.rs b/aggregator/src/aggregation/barycentric.rs index e6e6799065..3c318a892d 100644 --- a/aggregator/src/aggregation/barycentric.rs +++ b/aggregator/src/aggregation/barycentric.rs @@ -349,7 +349,10 @@ pub fn interpolate(z: Scalar, coefficients: &[Scalar; BLOB_WIDTH]) -> Scalar { #[cfg(test)] mod tests { use super::*; - use crate::blob::{BlobData, KZG_TRUSTED_SETUP}; + use crate::{ + blob::{BlobData, KZG_TRUSTED_SETUP}, + MAX_AGG_SNARKS, + }; use c_kzg::{Blob as RethBlob, KzgProof}; use std::collections::BTreeSet; @@ -386,7 +389,7 @@ mod tests { #[test] fn interpolate_matches_reth_implementation() { - let blob = BlobData::from(&vec![ + let blob = BlobData::::from(&vec![ vec![30; 56], vec![200; 100], vec![0; 340], diff --git a/aggregator/src/aggregation/blob_data.rs b/aggregator/src/aggregation/blob_data.rs index ea99c87447..87d3d545c3 100644 --- a/aggregator/src/aggregation/blob_data.rs +++ b/aggregator/src/aggregation/blob_data.rs @@ -14,15 +14,12 @@ use zkevm_circuits::{ use crate::{ aggregation::rlc::POWS_OF_256, - blob::{ - BlobData, BLOB_WIDTH, N_BYTES_U256, N_DATA_BYTES_PER_COEFFICIENT, N_ROWS_BLOB_DATA_CONFIG, - N_ROWS_DATA, N_ROWS_DIGEST_BYTES, N_ROWS_DIGEST_RLC, N_ROWS_METADATA, - }, - RlcConfig, MAX_AGG_SNARKS, + blob::{BlobData, BLOB_WIDTH, N_BYTES_U256, N_DATA_BYTES_PER_COEFFICIENT}, + RlcConfig, }; #[derive(Clone, Debug)] -pub struct BlobDataConfig { +pub struct BlobDataConfig { /// The byte value at this row. byte: Column, /// The accumulator serves several purposes. @@ -32,7 +29,7 @@ pub struct BlobDataConfig { /// resets to 1 if we encounter a chunk boundary. The accumulator here is referenced while /// doing a lookup to the Keccak table that requires the input length. accumulator: Column, - /// An increasing counter that denotes the chunk ID. The chunk ID is from [1, MAX_AGG_SNARKS]. + /// An increasing counter that denotes the chunk ID. The chunk ID is from [1, N_SNARKS]. chunk_idx: Column, /// A boolean witness that is set only when we encounter the end of a chunk. We enable a lookup /// to the Keccak table when the boundary is met. @@ -53,8 +50,8 @@ pub struct BlobDataConfig { pub hash_selector: Selector, /// Fixed table that consists of [0, 256). u8_table: U8Table, - /// Fixed table that consists of [0, MAX_AGG_SNARKS). - chunk_idx_range_table: RangeTable, + /// Fixed table that consists of [0, N_SNARKS). + chunk_idx_range_table: RangeTable, } pub struct AssignedBlobDataExport { @@ -74,14 +71,16 @@ pub struct AssignedBlobDataConfig { pub digest_rlc: AssignedCell, } -impl BlobDataConfig { +impl BlobDataConfig { pub fn configure( meta: &mut ConstraintSystem, challenge: Challenges>, u8_table: U8Table, - range_table: RangeTable, + range_table: RangeTable, keccak_table: &KeccakTable, ) -> Self { + let n_rows_metadata = BlobData::::n_rows_metadata(); + let config = Self { u8_table, chunk_idx_range_table: range_table, @@ -126,7 +125,7 @@ impl BlobDataConfig { let cond = is_not_hash * is_boundary * (1.expr() - is_padding_next); let chunk_idx_curr = meta.query_advice(config.chunk_idx, Rotation::cur()); let chunk_idx_next = meta.query_advice(config.chunk_idx, Rotation::next()); - // chunk_idx increases by at least 1 and at most MAX_AGG_SNARKS when condition is + // chunk_idx increases by at least 1 and at most N_SNARKS when condition is // met. vec![( cond * (chunk_idx_next - chunk_idx_curr - 1.expr()), @@ -136,7 +135,7 @@ impl BlobDataConfig { ); meta.lookup( - "BlobDataConfig (chunk_idx for non-padding, data rows in [1..MAX_AGG_SNARKS])", + "BlobDataConfig (chunk_idx for non-padding, data rows in [1..N_SNARKS])", |meta| { let is_data = meta.query_selector(config.data_selector); let is_padding = meta.query_advice(config.is_padding, Rotation::cur()); @@ -272,7 +271,7 @@ impl BlobDataConfig { let accumulator = meta.query_advice(config.accumulator, Rotation::cur()); let preimage_len = - is_data.expr() * accumulator + (1.expr() - is_data) * N_ROWS_METADATA.expr(); + is_data.expr() * accumulator + (1.expr() - is_data) * n_rows_metadata.expr(); [ 1.expr(), // q_enable @@ -331,7 +330,7 @@ impl BlobDataConfig { // - metadata_digest: 32 bytes // - chunk[i].chunk_data_digest: 32 bytes each // - versioned_hash: 32 bytes - let preimage_len = 32.expr() * (MAX_AGG_SNARKS + 1 + 1).expr(); + let preimage_len = 32.expr() * (N_SNARKS + 1 + 1).expr(); [ 1.expr(), // q_enable @@ -347,6 +346,11 @@ impl BlobDataConfig { }, ); + log::trace!("blob meta degree: {}", meta.degree()); + log::trace!( + "blob meta degree with lookups: {}", + meta.clone().chunk_lookups().degree() + ); assert!(meta.degree() <= 5); config @@ -360,7 +364,7 @@ impl BlobDataConfig { // The chunks_are_padding assigned cells are exports from the conditional constraints in // `core.rs`. Since these are already constrained, we can just use them as is. chunks_are_padding: &[AssignedCell], - blob: &BlobData, + blob: &BlobData, barycentric_assignments: &[CRTInteger], ) -> Result { self.load_range_tables(layouter)?; @@ -393,24 +397,25 @@ impl BlobDataConfig { &self, region: &mut Region, challenge_value: Challenges>, - blob: &BlobData, + blob: &BlobData, ) -> Result, Error> { + let n_rows_data = BlobData::::n_rows_data(); + let n_rows_metadata = BlobData::::n_rows_metadata(); + let rows = blob.to_rows(challenge_value); - assert_eq!(rows.len(), N_ROWS_BLOB_DATA_CONFIG); + assert_eq!(rows.len(), BlobData::::n_rows()); // enable data selector - for offset in N_ROWS_METADATA..N_ROWS_METADATA + N_ROWS_DATA { + for offset in n_rows_metadata..n_rows_metadata + n_rows_data { self.data_selector.enable(region, offset)?; } // enable hash selector - for offset in - N_ROWS_METADATA + N_ROWS_DATA..N_ROWS_METADATA + N_ROWS_DATA + N_ROWS_DIGEST_RLC - { + for offset in n_rows_metadata + n_rows_data..BlobData::::n_rows() { self.hash_selector.enable(region, offset)?; } - let mut assigned_rows = Vec::with_capacity(N_ROWS_BLOB_DATA_CONFIG); + let mut assigned_rows = Vec::with_capacity(BlobData::::n_rows()); let mut count = 0u64; for (i, row) in rows.iter().enumerate() { let byte = region.assign_advice( @@ -437,7 +442,7 @@ impl BlobDataConfig { i, || Value::known(Fr::from(row.is_boundary as u64)), )?; - let bcount = if (N_ROWS_METADATA..N_ROWS_METADATA + N_ROWS_DATA).contains(&i) { + let bcount = if (n_rows_metadata..n_rows_metadata + n_rows_data).contains(&i) { count += row.is_boundary as u64; count } else { @@ -488,6 +493,10 @@ impl BlobDataConfig { barycentric_assignments: &[CRTInteger], assigned_rows: &[AssignedBlobDataConfig], ) -> Result { + let n_rows_metadata = BlobData::::n_rows_metadata(); + let n_rows_digest_rlc = BlobData::::n_rows_digest_rlc(); + let n_rows_data = BlobData::::n_rows_data(); + rlc_config.init(region)?; let mut rlc_config_offset = 0; @@ -506,9 +515,10 @@ impl BlobDataConfig { }; let fixed_chunk_indices = { let mut fixed_chunk_indices = vec![one.clone()]; - for i in 2..=MAX_AGG_SNARKS { + for i in 2..=N_SNARKS { let i_cell = rlc_config.load_private(region, &Fr::from(i as u64), &mut rlc_config_offset)?; + // TODO: look into this.... let i_fixed_cell = rlc_config.fixed_up_to_max_agg_snarks_cell(i_cell.cell().region_index, i); region.constrain_equal(i_cell.cell(), i_fixed_cell)?; @@ -582,8 +592,8 @@ impl BlobDataConfig { //////////////////////////////////////////////////////////////////////////////// let mut num_nonempty_chunks = zero.clone(); - let mut is_empty_chunks = Vec::with_capacity(MAX_AGG_SNARKS); - let mut chunk_sizes = Vec::with_capacity(MAX_AGG_SNARKS); + let mut is_empty_chunks = Vec::with_capacity(N_SNARKS); + let mut chunk_sizes = Vec::with_capacity(N_SNARKS); for (i, is_padded_chunk) in chunks_are_padding.iter().enumerate() { let rows = assigned_rows .iter() @@ -641,7 +651,7 @@ impl BlobDataConfig { rlc_config.not(region, &all_chunks_empty, &mut rlc_config_offset)?; // constrain preimage_rlc column - let metadata_rows = &assigned_rows[..N_ROWS_METADATA]; + let metadata_rows = &assigned_rows[..n_rows_metadata]; region.constrain_equal( metadata_rows[0].byte.cell(), metadata_rows[0].preimage_rlc.cell(), @@ -668,7 +678,7 @@ impl BlobDataConfig { } // in the metadata section, these columns are 0 except (possibly) on the last row. - for row in metadata_rows.iter().take(N_ROWS_METADATA - 1) { + for row in metadata_rows.iter().take(n_rows_metadata - 1) { let cells = [&row.is_boundary, &row.digest_rlc].map(AssignedCell::cell); for cell in cells { @@ -679,7 +689,7 @@ impl BlobDataConfig { // in the final row of the metadata section, boundary is 1. note that this triggers a keccak // lookup which constrains digest_rlc. region.constrain_equal( - metadata_rows[N_ROWS_METADATA - 1].is_boundary.cell(), + metadata_rows[n_rows_metadata - 1].is_boundary.cell(), one.cell(), )?; @@ -691,8 +701,8 @@ impl BlobDataConfig { // there are no non-empty chunks, this will be 0 and must also be a padding row. let rows = assigned_rows .iter() - .skip(N_ROWS_METADATA) - .take(N_ROWS_DATA) + .skip(n_rows_metadata) + .take(n_rows_data) .collect::>(); rlc_config.conditional_enforce_equal( region, @@ -736,16 +746,16 @@ impl BlobDataConfig { let rows = assigned_rows .iter() - .skip(N_ROWS_METADATA + N_ROWS_DATA) - .take(N_ROWS_DIGEST_RLC) + .skip(n_rows_metadata + n_rows_data) + .take(n_rows_digest_rlc) .collect::>(); - // rows have chunk_idx set from 0 (metadata) -> MAX_AGG_SNARKS. + // rows have chunk_idx set from 0 (metadata) -> N_SNARKS. region.constrain_equal(rows[0].chunk_idx.cell(), zero.cell())?; for (row, fixed_chunk_idx) in rows .iter() .skip(1) - .take(MAX_AGG_SNARKS) + .take(N_SNARKS) .zip_eq(fixed_chunk_indices.iter()) { region.constrain_equal(row.chunk_idx.cell(), fixed_chunk_idx.cell())?; @@ -753,14 +763,14 @@ impl BlobDataConfig { let challenge_digest_preimage_rlc_specified = &rows.last().unwrap().preimage_rlc; let challenge_digest_rlc_specified = &rows.last().unwrap().digest_rlc; - let versioned_hash_rlc = &rows.get(N_ROWS_DIGEST_RLC - 2).unwrap().digest_rlc; + let versioned_hash_rlc = &rows.get(n_rows_digest_rlc - 2).unwrap().digest_rlc; // ensure that on the last row of this section the is_boundary is turned on // which would enable the keccak table lookup for challenge_digest region.constrain_equal(rows.last().unwrap().is_boundary.cell(), one.cell())?; let metadata_digest_rlc_computed = - &assigned_rows.get(N_ROWS_METADATA - 1).unwrap().digest_rlc; + &assigned_rows.get(n_rows_metadata - 1).unwrap().digest_rlc; let metadata_digest_rlc_specified = &rows.first().unwrap().digest_rlc; region.constrain_equal( metadata_digest_rlc_computed.cell(), @@ -773,7 +783,7 @@ impl BlobDataConfig { // Also, we know that the first chunk is valid. So we can just start the check from // the second chunk's data digest. region.constrain_equal(chunks_are_padding[0].cell(), zero.cell())?; - for i in 1..MAX_AGG_SNARKS { + for i in 1..N_SNARKS { // Note that in `rows`, the first row is the metadata row (hence anyway skip // it). That's why we have a +1. rlc_config.conditional_enforce_equal( @@ -785,11 +795,11 @@ impl BlobDataConfig { )?; } - let mut chunk_digest_evm_rlcs = Vec::with_capacity(MAX_AGG_SNARKS); + let mut chunk_digest_evm_rlcs = Vec::with_capacity(N_SNARKS); for (((row, chunk_size_decoded), is_empty), is_padded_chunk) in rows .iter() .skip(1) - .take(MAX_AGG_SNARKS) + .take(N_SNARKS) .zip_eq(chunk_sizes) .zip_eq(is_empty_chunks) .zip_eq(chunks_are_padding) @@ -821,8 +831,8 @@ impl BlobDataConfig { let mut challenge_digest_preimage_keccak_rlc = zero.clone(); let rows = assigned_rows .iter() - .skip(N_ROWS_METADATA + N_ROWS_DATA + N_ROWS_DIGEST_RLC) - .take(N_ROWS_DIGEST_BYTES) + .skip(n_rows_metadata + n_rows_data + n_rows_digest_rlc) + .take(BlobData::::n_rows_digest_bytes()) .collect::>(); for (i, digest_rlc_specified) in std::iter::once(metadata_digest_rlc_specified) .chain(chunk_digest_evm_rlcs) @@ -845,7 +855,7 @@ impl BlobDataConfig { // compute the keccak input RLC: // we do this only for the metadata and chunks, not for the blob row itself. - if i < MAX_AGG_SNARKS + 1 + 1 { + if i < N_SNARKS + 1 + 1 { let digest_keccak_rlc = rlc_config.rlc(region, &digest_bytes, &r_keccak, &mut rlc_config_offset)?; challenge_digest_preimage_keccak_rlc = rlc_config.mul_add( @@ -869,7 +879,7 @@ impl BlobDataConfig { let mut blob_fields: Vec>> = Vec::with_capacity(BLOB_WIDTH); let blob_bytes = assigned_rows .iter() - .take(N_ROWS_METADATA + N_ROWS_DATA) + .take(n_rows_metadata + n_rows_data) .map(|row| row.byte.clone()) .collect::>(); for chunk in blob_bytes.chunks_exact(N_DATA_BYTES_PER_COEFFICIENT) { @@ -877,11 +887,11 @@ impl BlobDataConfig { // have the export from BarycentricConfig in little-endian bytes. blob_fields.push(chunk.iter().rev().cloned().collect()); } - let mut chunk_data_digests = Vec::with_capacity(MAX_AGG_SNARKS); + let mut chunk_data_digests = Vec::with_capacity(N_SNARKS); let chunk_data_digests_bytes = assigned_rows .iter() - .skip(N_ROWS_METADATA + N_ROWS_DATA + N_ROWS_DIGEST_RLC + N_BYTES_U256) - .take(MAX_AGG_SNARKS * N_BYTES_U256) + .skip(n_rows_metadata + n_rows_data + n_rows_digest_rlc + N_BYTES_U256) + .take(N_SNARKS * N_BYTES_U256) .map(|row| row.byte.clone()) .collect::>(); for chunk in chunk_data_digests_bytes.chunks_exact(N_BYTES_U256) { diff --git a/aggregator/src/aggregation/circuit.rs b/aggregator/src/aggregation/circuit.rs index e544528f99..db817b39bd 100644 --- a/aggregator/src/aggregation/circuit.rs +++ b/aggregator/src/aggregation/circuit.rs @@ -28,7 +28,7 @@ use zkevm_circuits::util::Challenges; use crate::{ batch::BatchHash, - constants::{ACC_LEN, DIGEST_LEN, MAX_AGG_SNARKS}, + constants::{ACC_LEN, DIGEST_LEN}, core::{assign_batch_hashes, extract_proof_and_instances_with_pairing_check}, util::parse_hash_digest_cells, AssignedBarycentricEvaluationConfig, ConfigParams, @@ -38,10 +38,10 @@ use super::AggregationConfig; /// Aggregation circuit that does not re-expose any public inputs from aggregated snarks #[derive(Clone)] -pub struct AggregationCircuit { +pub struct AggregationCircuit { pub svk: KzgSuccinctVerifyingKey, // the input snarks for the aggregation circuit - // it is padded already so it will have a fixed length of MAX_AGG_SNARKS + // it is padded already so it will have a fixed length of N_SNARKS pub snarks_with_padding: Vec, // the public instance for this circuit consists of // - an accumulator (12 elements) @@ -51,15 +51,15 @@ pub struct AggregationCircuit { pub as_proof: Value>, // batch hash circuit for which the snarks are generated // the chunks in this batch are also padded already - pub batch_hash: BatchHash, + pub batch_hash: BatchHash, } -impl AggregationCircuit { +impl AggregationCircuit { pub fn new( params: &ParamsKZG, snarks_with_padding: &[Snark], rng: impl Rng + Send, - batch_hash: BatchHash, + batch_hash: BatchHash, ) -> Result { let timer = start_timer!(|| "generate aggregation circuit"); @@ -118,8 +118,8 @@ impl AggregationCircuit { } } -impl Circuit for AggregationCircuit { - type Config = (AggregationConfig, Challenges); +impl Circuit for AggregationCircuit { + type Config = (AggregationConfig, Challenges); type FloorPlanner = SimpleFloorPlanner; fn without_witnesses(&self) -> Self { unimplemented!() @@ -281,7 +281,7 @@ impl Circuit for AggregationCircuit { }, )?; - assert_eq!(snark_inputs.len(), MAX_AGG_SNARKS * DIGEST_LEN); + assert_eq!(snark_inputs.len(), N_SNARKS * DIGEST_LEN); (accumulator_instances, snark_inputs, barycentric) }; end_timer!(timer); @@ -293,7 +293,7 @@ impl Circuit for AggregationCircuit { let timer = start_timer!(|| "load aux table"); - let assigned_batch_hash = { + let assigned_blobs = { config .keccak_circuit_config .load_aux_tables(&mut layouter)?; @@ -302,7 +302,7 @@ impl Circuit for AggregationCircuit { let timer = start_timer!(|| "extract hash"); // orders: // - batch_public_input_hash - // - chunk\[i\].piHash for i in \[0, MAX_AGG_SNARKS) + // - chunk\[i\].piHash for i in \[0, N_SNARKS) // - batch_data_hash_preimage // - preimage for blob metadata // - preimage of chunk data digest (only for valid chunks) @@ -310,7 +310,7 @@ impl Circuit for AggregationCircuit { let preimages = self.batch_hash.extract_hash_preimages(); assert_eq!( preimages.len(), - 4 + MAX_AGG_SNARKS + self.batch_hash.number_of_valid_chunks, + 4 + N_SNARKS + self.batch_hash.number_of_valid_chunks, "error extracting preimages" ); end_timer!(timer); @@ -322,36 +322,32 @@ impl Circuit for AggregationCircuit { .iter() .map(|chunk| !chunk.is_padding) .collect::>(); - let assigned_batch_hash = assign_batch_hashes( - &config, + let assigned_blobs = assign_batch_hashes::( + &config.keccak_circuit_config, + &config.rlc_config, &mut layouter, challenges, &chunks_are_valid, + self.batch_hash.number_of_valid_chunks, &preimages, ) .map_err(|_e| Error::ConstraintSystemFailure)?; end_timer!(timer); - assigned_batch_hash + assigned_blobs }; // digests let (batch_pi_hash_digest, chunk_pi_hash_digests, _potential_batch_data_hash_digest) = - parse_hash_digest_cells(&assigned_batch_hash.hash_output); + parse_hash_digest_cells::(&assigned_blobs.hash_output); // ============================================== // step 3: assert public inputs to the snarks are correct // ============================================== for (i, chunk) in chunk_pi_hash_digests.iter().enumerate() { let hash = self.batch_hash.chunks_with_padding[i].public_input_hash(); - for j in 0..4 { - for k in 0..8 { - log::trace!( - "pi {:02x} {:?}", - hash[j * 8 + k], - chunk[8 * (3 - j) + k].value() - ); - } + for j in 0..DIGEST_LEN { + log::trace!("pi {:02x} {:?}", hash[j], chunk[j].value()); } } @@ -369,29 +365,23 @@ impl Circuit for AggregationCircuit { return Ok(()); } - for i in 0..MAX_AGG_SNARKS { - for j in 0..4 { - for k in 0..8 { - let mut t1 = Fr::default(); - let mut t2 = Fr::default(); - chunk_pi_hash_digests[i][j * 8 + k].value().map(|x| t1 = *x); - snark_inputs[i * DIGEST_LEN + (3 - j) * 8 + k] - .value() - .map(|x| t2 = *x); - log::trace!( - "{}-th snark: {:?} {:?}", - i, - chunk_pi_hash_digests[i][j * 8 + k].value(), - snark_inputs[i * DIGEST_LEN + (3 - j) * 8 + k].value() - ); - - region.constrain_equal( - // in the keccak table, the input and output data have different - // endianess - chunk_pi_hash_digests[i][j * 8 + k].cell(), - snark_inputs[i * DIGEST_LEN + (3 - j) * 8 + k].cell(), - )?; - } + for i in 0..N_SNARKS { + for j in 0..DIGEST_LEN { + let mut t1 = Fr::default(); + let mut t2 = Fr::default(); + chunk_pi_hash_digests[i][j].value().map(|x| t1 = *x); + snark_inputs[i * DIGEST_LEN + j].value().map(|x| t2 = *x); + log::trace!( + "{}-th snark: {:?} {:?}", + i, + chunk_pi_hash_digests[i][j].value(), + snark_inputs[i * DIGEST_LEN + j].value() + ); + + region.constrain_equal( + chunk_pi_hash_digests[i][j].cell(), + snark_inputs[i * DIGEST_LEN + j].cell(), + )?; } } @@ -412,20 +402,18 @@ impl Circuit for AggregationCircuit { } // public input hash - for i in 0..4 { - for j in 0..8 { - log::trace!( - "pi (circuit vs real): {:?} {:?}", - batch_pi_hash_digest[i * 8 + j].value(), - self.instances()[0][(3 - i) * 8 + j + ACC_LEN] - ); + for (index, batch_pi_hash_digest_cell) in batch_pi_hash_digest.iter().enumerate() { + log::trace!( + "pi (circuit vs real): {:?} {:?}", + batch_pi_hash_digest_cell.value(), + self.instances()[0][index + ACC_LEN] + ); - layouter.constrain_instance( - batch_pi_hash_digest[i * 8 + j].cell(), - config.instance, - (3 - i) * 8 + j + ACC_LEN, - )?; - } + layouter.constrain_instance( + batch_pi_hash_digest_cell.cell(), + config.instance, + index + ACC_LEN, + )?; } // blob data config @@ -439,7 +427,7 @@ impl Circuit for AggregationCircuit { &mut layouter, challenges, &config.rlc_config, - &assigned_batch_hash.chunks_are_padding, + &assigned_blobs.chunks_are_padding, &blob_data, barycentric_assignments, )?; @@ -448,42 +436,46 @@ impl Circuit for AggregationCircuit { || "blob checks", |mut region| -> Result<(), Error> { region.constrain_equal( - assigned_batch_hash.num_valid_snarks.cell(), + assigned_blobs.num_valid_snarks.cell(), blob_data_exports.num_valid_chunks.cell(), )?; for (chunk_data_digest, expected_chunk_data_digest) in blob_data_exports .chunk_data_digests .iter() - .zip_eq(assigned_batch_hash.blob.chunk_tx_data_digests.iter()) + .zip_eq(assigned_blobs.blob.chunk_tx_data_digests.iter()) { for (c, ec) in chunk_data_digest .iter() .zip_eq(expected_chunk_data_digest.iter()) { + log::trace!("blob chunk tx: {:?} {:?}", c.value(), ec.value()); region.constrain_equal(c.cell(), ec.cell())?; } } for (c, ec) in evaluation_le .iter() - .zip_eq(assigned_batch_hash.blob.y.iter().rev()) + .zip_eq(assigned_blobs.blob.y.iter().rev()) { + log::trace!("blob y: {:?} {:?}", c.value(), ec.value()); region.constrain_equal(c.cell(), ec.cell())?; } for (c, ec) in challenge_le .iter() - .zip_eq(assigned_batch_hash.blob.z.iter().rev()) + .zip_eq(assigned_blobs.blob.z.iter().rev()) { + log::trace!("blob z: {:?} {:?}", c.value(), ec.value()); region.constrain_equal(c.cell(), ec.cell())?; } for (c, ec) in blob_data_exports .versioned_hash .iter() - .zip_eq(assigned_batch_hash.blob.versioned_hash.iter()) + .zip_eq(assigned_blobs.blob.versioned_hash.iter()) { + log::trace!("blob version hash: {:?} {:?}", c.value(), ec.value()); region.constrain_equal(c.cell(), ec.cell())?; } @@ -498,7 +490,7 @@ impl Circuit for AggregationCircuit { } } -impl CircuitExt for AggregationCircuit { +impl CircuitExt for AggregationCircuit { fn num_instance(&self) -> Vec { // 12 elements from accumulator // 32 elements from batch's public_input_hash @@ -525,6 +517,7 @@ impl CircuitExt for AggregationCircuit { .chain( [ config.0.rlc_config.selector, + config.0.rlc_config.lookup_gate_selector, config.0.rlc_config.enable_challenge1, config.0.rlc_config.enable_challenge2, config.0.blob_data_config.data_selector, diff --git a/aggregator/src/aggregation/config.rs b/aggregator/src/aggregation/config.rs index a30d90f524..822a4cafa4 100644 --- a/aggregator/src/aggregation/config.rs +++ b/aggregator/src/aggregation/config.rs @@ -26,7 +26,7 @@ use crate::{ #[rustfmt::skip] /// Configurations for aggregation circuit. /// This config is hard coded for BN256 curve. -pub struct AggregationConfig { +pub struct AggregationConfig { /// Non-native field chip configurations pub base_field_config: FpConfig, /// Keccak circuit configurations @@ -34,7 +34,7 @@ pub struct AggregationConfig { /// RLC config pub rlc_config: RlcConfig, /// The blob data's config. - pub blob_data_config: BlobDataConfig, + pub blob_data_config: BlobDataConfig, /// Config to do the barycentric evaluation on blob polynomial. pub barycentric: BarycentricEvaluationConfig, /// Instance for public input; stores @@ -44,7 +44,7 @@ pub struct AggregationConfig { pub instance: Column, } -impl AggregationConfig { +impl AggregationConfig { /// Build a configuration from parameters. pub fn configure( meta: &mut ConstraintSystem, @@ -56,9 +56,6 @@ impl AggregationConfig { "For now we fix limb_bits = {BITS}, otherwise change code", ); - // RLC configuration - let rlc_config = RlcConfig::configure(meta, challenges); - // hash configuration for aggregation circuit let (keccak_table, keccak_circuit_config) = { let keccak_table = KeccakTable::construct(meta); @@ -75,6 +72,9 @@ impl AggregationConfig { ) }; + // RLC configuration + let rlc_config = RlcConfig::configure(meta, &keccak_table, challenges); + // base field configuration for aggregation circuit let base_field_config = FpConfig::configure( meta, diff --git a/aggregator/src/aggregation/rlc/config.rs b/aggregator/src/aggregation/rlc/config.rs index f8b7846d30..2922eec2f2 100644 --- a/aggregator/src/aggregation/rlc/config.rs +++ b/aggregator/src/aggregation/rlc/config.rs @@ -6,7 +6,11 @@ use halo2_proofs::{ #[cfg(test)] use halo2_proofs::plonk::FirstPhase; -use zkevm_circuits::util::{Challenges, Expr}; +use itertools::Itertools; +use zkevm_circuits::{ + table::{KeccakTable, LookupTable}, + util::{Challenges, Expr}, +}; /// This config is used to compute RLCs for bytes. /// It requires a phase 2 column @@ -17,14 +21,20 @@ pub struct RlcConfig { pub(crate) _phase_1_column: Column, pub(crate) phase_2_column: Column, pub(crate) selector: Selector, + pub(crate) lookup_gate_selector: Selector, pub(crate) fixed: Column, pub(crate) enable_challenge1: Selector, pub(crate) enable_challenge2: Selector, } impl RlcConfig { - pub(crate) fn configure(meta: &mut ConstraintSystem, challenge: Challenges) -> Self { + pub(crate) fn configure( + meta: &mut ConstraintSystem, + keccak_table: &KeccakTable, + challenge: Challenges, + ) -> Self { let selector = meta.complex_selector(); + let lookup_gate_selector = meta.complex_selector(); let enable_challenge1 = meta.complex_selector(); let enable_challenge2 = meta.complex_selector(); let challenge_expr = challenge.exprs(meta); @@ -71,11 +81,29 @@ impl RlcConfig { vec![cs1, cs2, cs3] }); + + meta.lookup_any("rlc keccak lookup", |meta| { + let q = meta.query_selector(lookup_gate_selector); + let input_rlc = meta.query_advice(phase_2_column, Rotation::cur()); + let output_rlc = meta.query_advice(phase_2_column, Rotation::next()); + let data_len = meta.query_advice(phase_2_column, Rotation(2)); + + let input_exprs = vec![1.expr(), 1.expr(), input_rlc, data_len, output_rlc]; + let table_exprs = keccak_table.table_exprs(meta); + + input_exprs + .into_iter() + .zip_eq(table_exprs) + .map(|(input, table)| (q.clone() * input, table)) + .collect::>() + }); + Self { #[cfg(test)] _phase_1_column, phase_2_column, selector, + lookup_gate_selector, fixed, enable_challenge1, enable_challenge2, diff --git a/aggregator/src/aggregation/rlc/gates.rs b/aggregator/src/aggregation/rlc/gates.rs index 946008fd25..dea2ac8d4d 100644 --- a/aggregator/src/aggregation/rlc/gates.rs +++ b/aggregator/src/aggregation/rlc/gates.rs @@ -7,7 +7,8 @@ use halo2_proofs::{ }; use zkevm_circuits::util::Challenges; -use crate::{constants::LOG_DEGREE, util::assert_equal, MAX_AGG_SNARKS}; +// TODO: remove MAX_AGG_SNARKS and make this generic over N_SNARKS +use crate::{constants::LOG_DEGREE, util::assert_equal, DIGEST_LEN, MAX_AGG_SNARKS}; use super::RlcConfig; @@ -97,7 +98,7 @@ impl RlcConfig { )?; offset += 1; } - assert_eq!(offset, FIXED_OFFSET_EMPTY_KECCAK + 32); + assert_eq!(offset, FIXED_OFFSET_EMPTY_KECCAK + DIGEST_LEN); Ok(()) } @@ -130,6 +131,7 @@ impl RlcConfig { } #[inline] + #[allow(dead_code)] pub(crate) fn five_cell(&self, region_index: RegionIndex) -> Cell { Cell { region_index, @@ -139,6 +141,7 @@ impl RlcConfig { } #[inline] + #[allow(dead_code)] pub(crate) fn nine_cell(&self, region_index: RegionIndex) -> Cell { Cell { region_index, @@ -148,6 +151,7 @@ impl RlcConfig { } #[inline] + #[allow(dead_code)] pub(crate) fn thirteen_cell(&self, region_index: RegionIndex) -> Cell { Cell { region_index, @@ -171,6 +175,7 @@ impl RlcConfig { } #[inline] + #[allow(dead_code)] pub(crate) fn thirty_two_cell(&self, region_index: RegionIndex) -> Cell { Cell { region_index, @@ -180,6 +185,7 @@ impl RlcConfig { } #[inline] + #[allow(dead_code)] pub(crate) fn one_hundred_and_sixty_eight_cell(&self, region_index: RegionIndex) -> Cell { Cell { region_index, @@ -189,6 +195,7 @@ impl RlcConfig { } #[inline] + #[allow(dead_code)] pub(crate) fn two_hundred_and_thirty_two_cell(&self, region_index: RegionIndex) -> Cell { Cell { region_index, @@ -522,6 +529,7 @@ impl RlcConfig { // decompose a field element into log_size bits of boolean cells // require the input to be less than 2^log_size // require log_size < 254 + #[allow(dead_code)] pub(crate) fn decomposition( &self, region: &mut Region, @@ -594,6 +602,7 @@ impl RlcConfig { // return a boolean if a is smaller than b // requires that both a and b are less than 32 bits + #[allow(dead_code)] pub(crate) fn is_smaller_than( &self, region: &mut Region, @@ -705,6 +714,36 @@ impl RlcConfig { let diff = self.sub(region, a, b, offset)?; self.is_zero(region, &diff, offset) } + + // lookup the input and output rlcs from the lookup table + pub(crate) fn lookup_keccak_rlcs( + &self, + region: &mut Region, + input_rlcs: &AssignedCell, + output_rlcs: &AssignedCell, + data_len: &AssignedCell, + offset: &mut usize, + ) -> Result<(), Error> { + self.lookup_gate_selector.enable(region, *offset)?; + let _input_rlcs_copied = + input_rlcs.copy_advice(|| "lookup input rlc", region, self.phase_2_column, *offset)?; + let _output_rlcs_copied = output_rlcs.copy_advice( + || "lookup output rlc", + region, + self.phase_2_column, + *offset + 1, + )?; + let _data_len = data_len.copy_advice( + || "lookup data len", + region, + self.phase_2_column, + *offset + 2, + )?; + + *offset += 3; + + Ok(()) + } } #[inline] diff --git a/aggregator/src/batch.rs b/aggregator/src/batch.rs index d865f25835..c1bf974e3d 100644 --- a/aggregator/src/batch.rs +++ b/aggregator/src/batch.rs @@ -7,23 +7,22 @@ use ethers_core::utils::keccak256; use crate::{ blob::{BlobAssignments, BlobData}, chunk::ChunkHash, - constants::MAX_AGG_SNARKS, }; #[derive(Default, Debug, Clone)] -/// A batch is a set of MAX_AGG_SNARKS num of continuous chunks +/// A batch is a set of N_SNARKS num of continuous chunks /// - the first k chunks are from real traces -/// - the last (#MAX_AGG_SNARKS-k) chunks are from empty traces +/// - the last (#N_SNARKS-k) chunks are from empty traces /// A BatchHash consists of 2 hashes. /// - batch_pi_hash := keccak(chain_id || chunk_0.prev_state_root || chunk_k-1.post_state_root || /// chunk_k-1.withdraw_root || batch_data_hash) /// - batch_data_hash := keccak(chunk_0.data_hash || ... || chunk_k-1.data_hash) -pub struct BatchHash { +pub struct BatchHash { /// Chain ID of the network. pub(crate) chain_id: u64, /// chunks with padding. /// - the first [0..number_of_valid_chunks) are real ones - /// - the last [number_of_valid_chunks, MAX_AGG_SNARKS) are padding + /// - the last [number_of_valid_chunks, N_SNARKS) are padding pub(crate) chunks_with_padding: Vec, /// The batch data hash: /// - keccak256([chunk.hash for chunk in batch]) @@ -40,14 +39,14 @@ pub struct BatchHash { pub(crate) versioned_hash: H256, } -impl BatchHash { - /// Build Batch hash from an ordered list of #MAX_AGG_SNARKS of chunks. +impl BatchHash { + /// Build Batch hash from an ordered list of #N_SNARKS of chunks. #[allow(dead_code)] pub fn construct(chunks_with_padding: &[ChunkHash]) -> Self { assert_eq!( chunks_with_padding.len(), - MAX_AGG_SNARKS, - "input chunk slice does not match MAX_AGG_SNARKS" + N_SNARKS, + "input chunk slice does not match N_SNARKS" ); let number_of_valid_chunks = match chunks_with_padding @@ -56,7 +55,7 @@ impl BatchHash { .find(|(_index, chunk)| chunk.is_padding) { Some((index, _)) => index, - None => MAX_AGG_SNARKS, + None => N_SNARKS, }; assert_ne!( @@ -74,7 +73,7 @@ impl BatchHash { // sanity checks // ======================== // todo: return errors instead - for i in 0..MAX_AGG_SNARKS - 1 { + for i in 0..N_SNARKS - 1 { assert_eq!( chunks_with_padding[i].chain_id, chunks_with_padding[i + 1].chain_id, @@ -118,7 +117,7 @@ impl BatchHash { .collect::>(); let batch_data_hash = keccak256(preimage); - let blob_data = BlobData::new(number_of_valid_chunks, chunks_with_padding); + let blob_data = BlobData::::new(number_of_valid_chunks, chunks_with_padding); let blob_assignments = BlobAssignments::from(&blob_data); let versioned_hash = blob_data.get_versioned_hash(); @@ -136,12 +135,8 @@ impl BatchHash { let preimage = [ chunks_with_padding[0].chain_id.to_be_bytes().as_ref(), chunks_with_padding[0].prev_state_root.as_bytes(), - chunks_with_padding[MAX_AGG_SNARKS - 1] - .post_state_root - .as_bytes(), - chunks_with_padding[MAX_AGG_SNARKS - 1] - .withdraw_root - .as_bytes(), + chunks_with_padding[N_SNARKS - 1].post_state_root.as_bytes(), + chunks_with_padding[N_SNARKS - 1].withdraw_root.as_bytes(), batch_data_hash.as_slice(), blob_assignments.challenge.to_be_bytes().as_ref(), blob_assignments.evaluation.to_be_bytes().as_ref(), @@ -174,14 +169,14 @@ impl BatchHash { } /// Extract all the hash inputs that will ever be used. - /// There are MAX_AGG_SNARKS + 2 hashes. + /// There are N_SNARKS + 2 hashes. /// /// orders: /// - batch_public_input_hash - /// - chunk\[i\].piHash for i in \[0, MAX_AGG_SNARKS) + /// - chunk\[i\].piHash for i in \[0, N_SNARKS) /// - batch_data_hash_preimage /// - preimage for blob metadata - /// - chunk\[i\].flattened_l2_signed_data for i in \[0, MAX_AGG_SNARKS) + /// - chunk\[i\].flattened_l2_signed_data for i in \[0, N_SNARKS) /// - preimage for challenge digest pub(crate) fn extract_hash_preimages(&self) -> Vec> { let mut res = vec![]; @@ -200,10 +195,10 @@ impl BatchHash { let batch_public_input_hash_preimage = [ self.chain_id.to_be_bytes().as_ref(), self.chunks_with_padding[0].prev_state_root.as_bytes(), - self.chunks_with_padding[MAX_AGG_SNARKS - 1] + self.chunks_with_padding[N_SNARKS - 1] .post_state_root .as_bytes(), - self.chunks_with_padding[MAX_AGG_SNARKS - 1] + self.chunks_with_padding[N_SNARKS - 1] .withdraw_root .as_bytes(), self.data_hash.as_bytes(), @@ -214,7 +209,7 @@ impl BatchHash { .concat(); res.push(batch_public_input_hash_preimage); - // compute piHash for each chunk for i in [0..MAX_AGG_SNARKS) + // compute piHash for each chunk for i in [0..N_SNARKS) // chunk[i].piHash = // keccak( // chain id || diff --git a/aggregator/src/blob.rs b/aggregator/src/blob.rs index 323dbe77c4..9db00542de 100644 --- a/aggregator/src/blob.rs +++ b/aggregator/src/blob.rs @@ -1,6 +1,6 @@ use crate::{ aggregation::{interpolate, BLS_MODULUS}, - BatchHash, ChunkHash, MAX_AGG_SNARKS, + BatchHash, ChunkHash, }; use eth_types::{ToBigEndian, H256, U256}; @@ -35,37 +35,11 @@ pub const N_DATA_BYTES_PER_COEFFICIENT: usize = 31; /// Data config. Since num_valid_chunks is u16, we use 2 bytes/rows. pub const N_ROWS_NUM_CHUNKS: usize = 2; -/// The number of rows to encode the size of each chunk in a batch, in the Blob Data config. -/// chunk_size is u32, we use 4 bytes/rows. -pub const N_ROWS_CHUNK_SIZES: usize = MAX_AGG_SNARKS * 4; - /// The number of bytes that we can fit in a blob. Note that each coefficient is represented in 32 /// bytes, however, since those 32 bytes must represent a BLS12-381 scalar in its canonical form, /// we explicitly set the most-significant byte to 0, effectively utilising only 31 bytes. pub const N_BLOB_BYTES: usize = BLOB_WIDTH * N_DATA_BYTES_PER_COEFFICIENT; -/// The number of rows in Blob Data config's layout to represent the "blob metadata" section. -pub const N_ROWS_METADATA: usize = N_ROWS_NUM_CHUNKS + N_ROWS_CHUNK_SIZES; - -/// The number of rows in Blob Data config's layout to represent the "chunk data" section. -pub const N_ROWS_DATA: usize = N_BLOB_BYTES - N_ROWS_METADATA; - -/// The number of rows in Blob Data config's layout to represent the "digest rlc" section. -/// - metadata digest RLC (1 row) -/// - chunk_digests RLC for each chunk (MAX_AGG_SNARKS rows) -/// - blob versioned hash RLC (1 row) -/// - challenge digest RLC (1 row) -pub const N_ROWS_DIGEST_RLC: usize = 1 + MAX_AGG_SNARKS + 1 + 1; - -/// The number of rows in Blob Data config's layout to represent the "digest bytes" section. -pub const N_ROWS_DIGEST_BYTES: usize = N_ROWS_DIGEST_RLC * N_BYTES_U256; - -/// The total number of rows in "digest rlc" and "digest bytes" sections. -pub const N_ROWS_DIGEST: usize = N_ROWS_DIGEST_RLC + N_ROWS_DIGEST_BYTES; - -/// The total number of rows used in Blob Data config's layout. -pub const N_ROWS_BLOB_DATA_CONFIG: usize = N_ROWS_METADATA + N_ROWS_DATA + N_ROWS_DIGEST; - /// KZG trusted setup pub static KZG_TRUSTED_SETUP: Lazy> = Lazy::new(|| { Arc::new( @@ -79,25 +53,25 @@ pub static KZG_TRUSTED_SETUP: Lazy> = Lazy::new(|| { /// Helper struct to generate witness for the Blob Data Config. #[derive(Clone, Debug)] -pub struct BlobData { +pub struct BlobData { /// The number of valid chunks in the batch. This could be any number between: - /// [1, MAX_AGG_SNARKS] + /// [1, N_SNARKS] pub num_valid_chunks: u16, /// The size of each chunk. The chunk size can be zero if: /// - The chunk is a padded chunk (not a valid chunk). /// - The chunk has no L2 transactions, but only L1 msg txs. - pub chunk_sizes: [u32; MAX_AGG_SNARKS], + pub chunk_sizes: [u32; N_SNARKS], /// Flattened L2 signed transaction data, for each chunk. /// /// Note that in BlobData struct, only `num_valid_chunks` number of chunks' bytes are supposed /// to be read (for witness generation). For simplicity, the last valid chunk's bytes are /// copied over for the padded chunks. The `chunk_data_digest` for padded chunks is the /// `chunk_data_digest` of the last valid chunk (from Aggregation Circuit's perspective). - pub chunk_data: [Vec; MAX_AGG_SNARKS], + pub chunk_data: [Vec; N_SNARKS], } -impl From<&BatchHash> for BlobData { - fn from(batch_hash: &BatchHash) -> Self { +impl From<&BatchHash> for BlobData { + fn from(batch_hash: &BatchHash) -> Self { Self::new( batch_hash.number_of_valid_chunks, &batch_hash.chunks_with_padding, @@ -107,41 +81,41 @@ impl From<&BatchHash> for BlobData { // If the chunk data is represented as a vector of u8's this implementation converts data from // dynamic number of chunks into BlobData. -impl From<&Vec>> for BlobData { +impl From<&Vec>> for BlobData { fn from(chunks: &Vec>) -> Self { let num_valid_chunks = chunks.len(); assert!(num_valid_chunks > 0); - assert!(num_valid_chunks <= MAX_AGG_SNARKS); + assert!(num_valid_chunks <= N_SNARKS); - let chunk_sizes: [u32; MAX_AGG_SNARKS] = chunks + let chunk_sizes: [u32; N_SNARKS] = chunks .iter() .map(|chunk| chunk.len() as u32) .chain(repeat(0)) - .take(MAX_AGG_SNARKS) + .take(N_SNARKS) .collect::>() .try_into() - .expect("we have MAX_AGG_SNARKS chunks"); - assert!(chunk_sizes.iter().sum::() <= N_ROWS_DATA as u32); + .expect("we have N_SNARKS chunks"); + assert!(chunk_sizes.iter().sum::() <= Self::n_rows_data().try_into().unwrap()); let last_chunk_data = chunks.last().expect("last chunk exists"); let chunk_data = chunks .iter() .chain(repeat(last_chunk_data)) - .take(MAX_AGG_SNARKS) + .take(N_SNARKS) .cloned() .collect::>() .try_into() - .expect("we have MAX_AGG_SNARKS chunks"); + .expect("we have N_SNARKS chunks"); Self { - num_valid_chunks: num_valid_chunks as u16, + num_valid_chunks: num_valid_chunks.try_into().unwrap(), chunk_sizes, chunk_data, } } } -impl Default for BlobData { +impl Default for BlobData { fn default() -> Self { // default value corresponds to a batch with 1 chunk with no transactions Self::from(&vec![vec![]]) @@ -154,14 +128,54 @@ fn kzg_to_versioned_hash(commitment: &c_kzg::KzgCommitment) -> H256 { H256::from_slice(&res[..]) } -impl BlobData { +impl BlobData { + /// The number of rows in Blob Data config's layout to represent the "digest rlc" section. + /// - metadata digest RLC (1 row) + /// - chunk_digests RLC for each chunk (MAX_AGG_SNARKS rows) + /// - blob versioned hash RLC (1 row) + /// - challenge digest RLC (1 row) + pub const fn n_rows_digest_rlc() -> usize { + 1 + N_SNARKS + 1 + 1 + } + + /// The number of rows in Blob Data config's layout to represent the "digest bytes" section. + pub const fn n_rows_digest_bytes() -> usize { + Self::n_rows_digest_rlc() * N_BYTES_U256 + } + + /// The number of rows to encode the size of each chunk in a batch, in the Blob Data config. + /// chunk_size is u32, we use 4 bytes/rows. + const fn n_rows_chunk_sizes() -> usize { + N_SNARKS * 4 + } + + /// The total number of rows in "digest rlc" and "digest bytes" sections. + const fn n_rows_digest() -> usize { + Self::n_rows_digest_rlc() + Self::n_rows_digest_bytes() + } + + /// The number of rows in Blob Data config's layout to represent the "blob metadata" section. + pub const fn n_rows_metadata() -> usize { + N_ROWS_NUM_CHUNKS + Self::n_rows_chunk_sizes() + } + + /// The number of rows in Blob Data config's layout to represent the "chunk data" section. + pub const fn n_rows_data() -> usize { + N_BLOB_BYTES - Self::n_rows_metadata() + } + + /// The total number of rows used in Blob Data config's layout. + pub const fn n_rows() -> usize { + N_BLOB_BYTES + Self::n_rows_digest() + } + pub(crate) fn new(num_valid_chunks: usize, chunks_with_padding: &[ChunkHash]) -> Self { assert!(num_valid_chunks > 0); - assert!(num_valid_chunks <= MAX_AGG_SNARKS); + assert!(num_valid_chunks <= N_SNARKS); // padded chunk has 0 size, valid chunk's size is the number of bytes consumed by the // flattened data from signed L2 transactions. - let chunk_sizes: [u32; MAX_AGG_SNARKS] = chunks_with_padding + let chunk_sizes: [u32; N_SNARKS] = chunks_with_padding .iter() .map(|chunk| { if chunk.is_padding { @@ -173,7 +187,7 @@ impl BlobData { .collect::>() .try_into() .unwrap(); - assert!(chunk_sizes.iter().sum::() <= N_ROWS_DATA as u32); + assert!(chunk_sizes.iter().sum::() <= Self::n_rows_data() as u32); // chunk data of the "last valid chunk" is repeated over the padded chunks for simplicity // in calculating chunk_data_digest for those padded chunks. However, for the "chunk data" @@ -191,9 +205,7 @@ impl BlobData { chunk_data, } } -} -impl BlobData { /// Get the versioned hash as per EIP-4844. pub(crate) fn get_versioned_hash(&self) -> H256 { let coefficients = self.get_coefficients(); @@ -219,7 +231,7 @@ impl BlobData { // preimage = // metadata_digest || // chunk[0].chunk_data_digest || ... - // chunk[MAX_AGG_SNARKS-1].chunk_data_digest || + // chunk[N_SNARKS-1].chunk_data_digest || // blob_versioned_hash // // where chunk_data_digest for a padded chunk is set equal to the "last valid chunk"'s @@ -261,7 +273,7 @@ impl BlobData { /// eventually required to be checked for the consistency of blob's metadata, its chunks' bytes /// and the final blob preimage. pub fn preimages(&self) -> Vec> { - let mut preimages = Vec::with_capacity(2 + MAX_AGG_SNARKS); + let mut preimages = Vec::with_capacity(2 + N_SNARKS); // metadata preimages.push(self.to_metadata_bytes()); @@ -276,19 +288,17 @@ impl BlobData { preimages } -} -impl BlobData { /// Get the witness rows for assignment to the BlobDataConfig. pub(crate) fn to_rows(&self, challenge: Challenges>) -> Vec> { let metadata_rows = self.to_metadata_rows(challenge); - assert_eq!(metadata_rows.len(), N_ROWS_METADATA); + assert_eq!(metadata_rows.len(), Self::n_rows_metadata()); let data_rows = self.to_data_rows(challenge); - assert_eq!(data_rows.len(), N_ROWS_DATA); + assert_eq!(data_rows.len(), Self::n_rows_data()); let digest_rows = self.to_digest_rows(challenge); - assert_eq!(digest_rows.len(), N_ROWS_DIGEST); + assert_eq!(digest_rows.len(), Self::n_rows_digest()); metadata_rows .into_iter() @@ -302,7 +312,7 @@ impl BlobData { /// metadata_bytes = /// be_bytes(num_valid_chunks) || /// be_bytes(chunks[0].chunk_size) || ... - /// be_bytes(chunks[MAX_AGG_SNARKS-1].chunk_size) + /// be_bytes(chunks[N_SNARKS-1].chunk_size) /// /// where: /// - chunk_size of a padded chunk is 0 @@ -349,7 +359,7 @@ impl BlobData { acc * challenge.evm_word() + Value::known(Fr::from(x as u64)) }); repeat(Value::known(Fr::zero())) - .take(N_ROWS_METADATA - 1) + .take(Self::n_rows_metadata() - 1) .chain(once(digest_rlc)) }; @@ -419,7 +429,7 @@ impl BlobData { ) }) .chain(repeat(BlobDataRow::padding_row())) - .take(N_ROWS_DATA) + .take(Self::n_rows_data()) .collect() } @@ -506,7 +516,7 @@ impl BlobData { .chain(once(BlobDataRow { preimage_rlc: challenge_digest_preimage_rlc, digest_rlc: challenge_digest_rlc, - accumulator: 32 * (MAX_AGG_SNARKS + 1 + 1) as u64, + accumulator: 32 * (N_SNARKS + 1 + 1) as u64, is_boundary: true, ..Default::default() })) @@ -565,8 +575,8 @@ impl Default for BlobAssignments { } } -impl From<&BlobData> for BlobAssignments { - fn from(blob: &BlobData) -> Self { +impl From<&BlobData> for BlobAssignments { + fn from(blob: &BlobData) -> Self { // blob polynomial in evaluation form. // // also termed P(x) @@ -626,10 +636,13 @@ impl BlobDataRow { #[cfg(test)] mod tests { use super::*; + use crate::MAX_AGG_SNARKS; #[test] #[ignore = "only required for logging challenge digest"] fn log_challenge() { + let n_rows_data = BlobData::::n_rows_data(); + for (annotation, tcase) in [ ("single empty chunk", vec![vec![]]), ("single non-empty chunk", vec![vec![1, 2, 3]]), @@ -656,23 +669,23 @@ mod tests { .map(|i| (10u8..11 + u8::try_from(i).unwrap()).collect()) .collect(), ), - ("single chunk blob full", vec![vec![123; N_ROWS_DATA]]), + ("single chunk blob full", vec![vec![123; n_rows_data]]), ( "multiple chunks blob full", - vec![vec![123; 1111], vec![231; N_ROWS_DATA - 1111]], + vec![vec![123; 1111], vec![231; n_rows_data - 1111]], ), ( "max number of chunks only last one non-empty not full blob", repeat(vec![]) .take(MAX_AGG_SNARKS - 1) - .chain(once(vec![132; N_ROWS_DATA - 1111])) + .chain(once(vec![132; n_rows_data - 1111])) .collect(), ), ( "max number of chunks only last one non-empty full blob", repeat(vec![]) .take(MAX_AGG_SNARKS - 1) - .chain(once(vec![132; N_ROWS_DATA])) + .chain(once(vec![132; n_rows_data])) .collect(), ), ( @@ -685,7 +698,7 @@ mod tests { ] .iter() { - let blob: BlobData = tcase.into(); + let blob: BlobData = tcase.into(); let blob_assignments = BlobAssignments::from(&blob); println!( "{:60}: challenge (z) = {:0>64x}, evaluation (y) = {:0>64x}", @@ -696,12 +709,12 @@ mod tests { #[test] fn default_blob_data() { - let mut default_metadata = [0u8; 62]; + let mut default_metadata = [0u8; BlobData::::n_rows_metadata()]; default_metadata[1] = 1; let default_metadata_digest = keccak256(default_metadata); let default_chunk_digests = [keccak256([]); MAX_AGG_SNARKS]; - let default_blob = BlobData::default(); + let default_blob = BlobData::::default(); let versioned_hash = default_blob.get_versioned_hash(); assert_eq!( default_blob.get_challenge_digest(), @@ -718,7 +731,7 @@ mod tests { #[test] fn coefficients_endianness() { // Check that the blob bytes are being packed into coefficients in big endian order. - let coefficients = BlobData::default().get_coefficients(); + let coefficients = BlobData::::default().get_coefficients(); assert_eq!(coefficients[0], U256::one() << 232); assert_eq!(coefficients[1..], vec![U256::zero(); BLOB_WIDTH - 1]); diff --git a/aggregator/src/constants.rs b/aggregator/src/constants.rs index bdefc6709b..6513ecc697 100644 --- a/aggregator/src/constants.rs +++ b/aggregator/src/constants.rs @@ -8,8 +8,6 @@ pub(crate) const CHAIN_ID_LEN: usize = 8; /// Digest length pub(crate) const DIGEST_LEN: usize = 32; -/// Input length per round -pub(crate) const INPUT_LEN_PER_ROUND: usize = 136; // TODO(ZZ): update to the right degree pub(crate) const LOG_DEGREE: u32 = 19; diff --git a/aggregator/src/core.rs b/aggregator/src/core.rs index 340a66cd06..5002d83393 100644 --- a/aggregator/src/core.rs +++ b/aggregator/src/core.rs @@ -1,4 +1,7 @@ +use std::iter::repeat; + use ark_std::{end_timer, start_timer}; +use ethers_core::utils::keccak256; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::{ @@ -24,25 +27,17 @@ use snark_verifier_sdk::{ Snark, }; use zkevm_circuits::{ - keccak_circuit::{ - keccak_packed_multi::{self, multi_keccak}, - KeccakCircuit, KeccakCircuitConfig, - }, - table::{KeccakTable, LookupTable}, + keccak_circuit::{keccak_packed_multi::multi_keccak, KeccakCircuit, KeccakCircuitConfig}, util::Challenges, }; use crate::{ constants::{ - BATCH_VH_OFFSET, BATCH_Y_OFFSET, BATCH_Z_OFFSET, CHAIN_ID_LEN, DIGEST_LEN, - INPUT_LEN_PER_ROUND, LOG_DEGREE, MAX_AGG_SNARKS, + BATCH_VH_OFFSET, BATCH_Y_OFFSET, BATCH_Z_OFFSET, CHAIN_ID_LEN, DIGEST_LEN, LOG_DEGREE, }, - util::{ - assert_conditional_equal, assert_equal, assert_exist, get_indices, get_max_keccak_updates, - parse_hash_digest_cells, parse_hash_preimage_cells, parse_pi_hash_rlc_cells, - }, - AggregationConfig, RlcConfig, BITS, CHUNK_DATA_HASH_INDEX, CHUNK_TX_DATA_HASH_INDEX, LIMBS, - POST_STATE_ROOT_INDEX, PREV_STATE_ROOT_INDEX, WITHDRAW_ROOT_INDEX, + util::{assert_conditional_equal, assert_equal, parse_hash_preimage_cells}, + RlcConfig, BITS, CHUNK_DATA_HASH_INDEX, CHUNK_TX_DATA_HASH_INDEX, LIMBS, POST_STATE_ROOT_INDEX, + PREV_STATE_ROOT_INDEX, WITHDRAW_ROOT_INDEX, }; /// Subroutine for the witness generations. @@ -149,13 +144,170 @@ pub fn extract_proof_and_instances_with_pairing_check( Ok((as_proof, acc_instances)) } -#[derive(Default)] -pub(crate) struct ExtractedHashCells { - hash_input_cells: Vec>, - hash_output_cells: Vec>, - data_rlc_cells: Vec>, - hash_input_len_cells: Vec>, - is_final_cells: Vec>, +/// Extracted hash cells. Including the padded ones so that the circuit is static. +pub(crate) struct ExtractedHashCells { + inputs: Vec>>, + input_rlcs: Vec>, + outputs: Vec>>, + output_rlcs: Vec>, + data_lens: Vec>, + num_valid_snarks: AssignedCell, + chunks_are_padding: Vec>, +} + +impl ExtractedHashCells { + /// Assign the cells for hash input/outputs and their RLCs. + /// Padded the number of hashes to N_SNARKS + /// DOES NOT CONSTRAIN THE CORRECTNESS. + /// Call `check_against_lookup_table` function to constrain the hash is correct. + #[allow(clippy::too_many_arguments)] + pub(crate) fn assign_hash_cells( + plonk_config: &RlcConfig, + region: &mut Region, + offset: &mut usize, + keccak_input_challenge: &AssignedCell, + evm_word_challenge: &AssignedCell, + num_valid_chunks: usize, + preimages: &[Vec], + chunk_is_valid_cell32s: &[AssignedCell], + num_valid_snarks: AssignedCell, + chunks_are_padding: Vec>, + ) -> Result { + let mut inputs = vec![]; + let mut input_rlcs = vec![]; + let mut outputs = vec![]; + let mut output_rlcs = vec![]; + let mut data_lens = vec![]; + + // preimages are padded as follows + // - the first hash is batch_public_input_hash + // - the next hashes are chunk\[i\].piHash, we padded it to N_SNARKS by repeating the last + // chunk + // - the last hash is batch_data_hash, its input is padded to 32*N_SNARKS + log::trace!("preimage len: {}", preimages.len()); + for preimage in preimages + .iter() + .take(num_valid_chunks + 1) + .chain(repeat(&preimages[num_valid_chunks]).take(N_SNARKS - num_valid_chunks)) + { + { + let mut preimage_cells = vec![]; + for input in preimage.iter() { + let v = Fr::from(*input as u64); + let cell = plonk_config.load_private(region, &v, offset)?; + preimage_cells.push(cell); + } + let input_rlc = + plonk_config.rlc(region, &preimage_cells, keccak_input_challenge, offset)?; + inputs.push(preimage_cells); + input_rlcs.push(input_rlc); + } + { + let mut digest_cells = vec![]; + let digest = keccak256(preimage); + for output in digest.iter() { + let v = Fr::from(*output as u64); + let cell = plonk_config.load_private(region, &v, offset)?; + digest_cells.push(cell); + } + let output_rlc = + plonk_config.rlc(region, &digest_cells, evm_word_challenge, offset)?; + outputs.push(digest_cells); + output_rlcs.push(output_rlc) + } + + data_lens.push(plonk_config.load_private( + region, + &Fr::from(preimage.len() as u64), + offset, + )?); + } + + { + let batch_data_hash_preimage = &preimages[N_SNARKS + 1]; + let batch_data_hash_digest = keccak256(batch_data_hash_preimage); + let batch_data_hash_padded_preimage = batch_data_hash_preimage + .iter() + .cloned() + .chain(repeat(0).take(N_SNARKS * 32 - batch_data_hash_preimage.len())); + + { + let mut preimage_cells = vec![]; + for input in batch_data_hash_padded_preimage { + let v = Fr::from(input as u64); + let cell = plonk_config.load_private(region, &v, offset)?; + preimage_cells.push(cell); + } + let input_rlc = plonk_config.rlc_with_flag( + region, + &preimage_cells, + keccak_input_challenge, + chunk_is_valid_cell32s, + offset, + )?; + inputs.push(preimage_cells); + input_rlcs.push(input_rlc); + } + + { + let mut digest_cells = vec![]; + for output in batch_data_hash_digest.iter() { + let v = Fr::from(*output as u64); + let cell = plonk_config.load_private(region, &v, offset)?; + digest_cells.push(cell); + } + let output_rlc = + plonk_config.rlc(region, &digest_cells, evm_word_challenge, offset)?; + outputs.push(digest_cells); + output_rlcs.push(output_rlc) + } + + data_lens.push(plonk_config.load_private( + region, + &Fr::from(batch_data_hash_preimage.len() as u64), + offset, + )?); + } + + Ok(Self { + inputs, + input_rlcs, + outputs, + output_rlcs, + data_lens, + num_valid_snarks, + chunks_are_padding, + }) + } + + pub(crate) fn check_against_lookup_table( + &self, + plonk_config: &RlcConfig, + region: &mut Region, + offset: &mut usize, + ) -> Result<(), halo2_proofs::plonk::Error> { + for (input_rlcs, (output_rlcs, data_len)) in self + .input_rlcs + .iter() + .zip_eq(self.output_rlcs.iter().zip_eq(self.data_lens.iter())) + { + plonk_config.lookup_keccak_rlcs(region, input_rlcs, output_rlcs, data_len, offset)?; + } + for (i, (input_rlcs, output_rlcs)) in self + .input_rlcs + .iter() + .zip_eq(self.output_rlcs.iter()) + .enumerate() + { + log::info!( + "{}-th rlc {:?} {:?}", + i, + input_rlcs.value(), + output_rlcs.value() + ); + } + Ok(()) + } } #[derive(Default)] @@ -167,7 +319,7 @@ pub(crate) struct ExpectedBlobCells { } pub(crate) struct AssignedBatchHash { - pub(crate) hash_output: Vec>, + pub(crate) hash_output: Vec>>, pub(crate) blob: ExpectedBlobCells, pub(crate) num_valid_snarks: AssignedCell, pub(crate) chunks_are_padding: Vec>, @@ -183,8 +335,8 @@ pub(crate) struct AssignedBatchHash { // 1. batch_data_hash digest is reused for public input hash // 2. batch_pi_hash used same roots as chunk_pi_hash // 2.1. batch_pi_hash and chunk[0] use a same prev_state_root -// 2.2. batch_pi_hash and chunk[MAX_AGG_SNARKS-1] use a same post_state_root -// 2.3. batch_pi_hash and chunk[MAX_AGG_SNARKS-1] use a same withdraw_root +// 2.2. batch_pi_hash and chunk[N_SNARKS-1] use a same post_state_root +// 2.3. batch_pi_hash and chunk[N_SNARKS-1] use a same withdraw_root // 3. batch_data_hash and chunk[i].pi_hash use a same chunk[i].data_hash when chunk[i] is not padded // 4. chunks are continuous: they are linked via the state roots // 5. batch and all its chunks use a same chain id @@ -192,29 +344,21 @@ pub(crate) struct AssignedBatchHash { // padded // 7. the hash input length are correct // - hashes[0] has 200 bytes -// - hashes[1..MAX_AGG_SNARKS+1] has 168 bytes input +// - hashes[1..N_SNARKS+1] has 168 bytes input // - batch's data_hash length is 32 * number_of_valid_snarks // 8. batch data hash is correct w.r.t. its RLCs // 9. is_final_cells are set correctly -pub(crate) fn assign_batch_hashes( - config: &AggregationConfig, +pub(crate) fn assign_batch_hashes( + keccak_config: &KeccakCircuitConfig, + rlc_config: &RlcConfig, layouter: &mut impl Layouter, challenges: Challenges>, chunks_are_valid: &[bool], + num_valid_chunks: usize, preimages: &[Vec], ) -> Result { - let extracted_hash_cells = extract_hash_cells( - &config.keccak_circuit_config, - layouter, - challenges, - preimages, - )?; - // 2. batch_pi_hash used same roots as chunk_pi_hash - // 2.1. batch_pi_hash and chunk[0] use a same prev_state_root - // 2.2. batch_pi_hash and chunk[MAX_AGG_SNARKS-1] use a same post_state_root - // 2.3. batch_pi_hash and chunk[MAX_AGG_SNARKS-1] use a same withdraw_root - // 5. batch and all its chunks use a same chain id - copy_constraints(layouter, &extracted_hash_cells.hash_input_cells)?; + // assign the hash table + assign_keccak_table(keccak_config, layouter, challenges, preimages)?; // 1. batch_data_hash digest is reused for public input hash // 3. batch_data_hash and chunk[i].pi_hash use a same chunk[i].data_hash when chunk[i] is not @@ -222,51 +366,53 @@ pub(crate) fn assign_batch_hashes( // 4. chunks are continuous: they are linked via the state roots // 6. chunk[i]'s chunk_pi_hash_rlc_cells == chunk[i-1].chunk_pi_hash_rlc_cells when chunk[i] is // padded - // 7. the hash input length are correct - // - first MAX_AGG_SNARKS + 1 hashes all have 136 bytes input - // - batch's data_hash length is 32 * number_of_valid_snarks - // 8. batch data hash is correct w.r.t. its RLCs - // 9. is_final_cells are set correctly - let (num_valid_snarks, chunks_are_padding) = conditional_constraints( - &config.rlc_config, + // 7. batch data hash is correct w.r.t. its RLCs + let extracted_hash_cells = conditional_constraints::( + rlc_config, layouter, challenges, chunks_are_valid, - &extracted_hash_cells, + num_valid_chunks, + preimages, )?; - let batch_pi_input = &extracted_hash_cells.hash_input_cells[0..INPUT_LEN_PER_ROUND * 2]; + // 2. batch_pi_hash used same roots as chunk_pi_hash + // 2.1. batch_pi_hash and chunk[0] use a same prev_state_root + // 2.2. batch_pi_hash and chunk[N_SNARKS-1] use a same post_state_root + // 2.3. batch_pi_hash and chunk[N_SNARKS-1] use a same withdraw_root + // 5. batch and all its chunks use a same chain id + copy_constraints::(layouter, &extracted_hash_cells.inputs)?; + + let batch_pi_input = &extracted_hash_cells.inputs[0]; //[0..INPUT_LEN_PER_ROUND * 2]; let expected_blob_cells = ExpectedBlobCells { - z: batch_pi_input[BATCH_Z_OFFSET..BATCH_Z_OFFSET + 32].to_vec(), - y: batch_pi_input[BATCH_Y_OFFSET..BATCH_Y_OFFSET + 32].to_vec(), - versioned_hash: batch_pi_input[BATCH_VH_OFFSET..BATCH_VH_OFFSET + 32].to_vec(), - chunk_tx_data_digests: (0..MAX_AGG_SNARKS) + z: batch_pi_input[BATCH_Z_OFFSET..BATCH_Z_OFFSET + DIGEST_LEN].to_vec(), + y: batch_pi_input[BATCH_Y_OFFSET..BATCH_Y_OFFSET + DIGEST_LEN].to_vec(), + versioned_hash: batch_pi_input[BATCH_VH_OFFSET..BATCH_VH_OFFSET + DIGEST_LEN].to_vec(), + chunk_tx_data_digests: (0..N_SNARKS) .map(|i| { - let chunk_pi_input = &extracted_hash_cells.hash_input_cells - [INPUT_LEN_PER_ROUND * (2 + 2 * i)..INPUT_LEN_PER_ROUND * (2 + 2 * (i + 1))]; - chunk_pi_input[CHUNK_TX_DATA_HASH_INDEX..CHUNK_TX_DATA_HASH_INDEX + 32].to_vec() + extracted_hash_cells.inputs[i + 1] + [CHUNK_TX_DATA_HASH_INDEX..CHUNK_TX_DATA_HASH_INDEX + DIGEST_LEN] + .to_vec() }) .collect(), }; Ok(AssignedBatchHash { - hash_output: extracted_hash_cells.hash_output_cells, + hash_output: extracted_hash_cells.outputs, blob: expected_blob_cells, - num_valid_snarks, - chunks_are_padding, + num_valid_snarks: extracted_hash_cells.num_valid_snarks, + chunks_are_padding: extracted_hash_cells.chunks_are_padding, }) } -pub(crate) fn extract_hash_cells( - keccak_config: &KeccakCircuitConfig, +/// assign hash table +pub(crate) fn assign_keccak_table( + config: &KeccakCircuitConfig, layouter: &mut impl Layouter, challenges: Challenges>, preimages: &[Vec], -) -> Result { - let mut is_first_time = true; +) -> Result<(), Error> { let keccak_capacity = KeccakCircuit::::capacity_for_row(1 << LOG_DEGREE); - let max_keccak_updates = get_max_keccak_updates(MAX_AGG_SNARKS); - let keccak_f_rows = keccak_packed_multi::get_num_rows_per_update(); let timer = start_timer!(|| ("multi keccak").to_string()); // preimages consists of the following parts @@ -275,14 +421,11 @@ pub(crate) fn extract_hash_cells( // chunk[0].prev_state_root || // chunk[k-1].post_state_root || // chunk[k-1].withdraw_root || - // batch_data_hash || - // z || - // y) + // batch_data_hash) // (2) chunk[i].piHash preimage = // (chain id || // chunk[i].prevStateRoot || chunk[i].postStateRoot || - // chunk[i].withdrawRoot || chunk[i].datahash || - // chunk[i].tx_data_hash) + // chunk[i].withdrawRoot || chunk[i].datahash) // (3) batchDataHash preimage = // (chunk[0].dataHash || ... || chunk[k-1].dataHash) // each part of the preimage is mapped to image by Keccak256 @@ -290,111 +433,32 @@ pub(crate) fn extract_hash_cells( .map_err(|e| Error::AssertionFailure(format!("multi keccak assignment failed: {e:?}")))?; end_timer!(timer); - // extract the indices of the rows for which the preimage and the digest cells lie in - let (preimage_indices, digest_indices) = get_indices(preimages); - - let extracted_hash_cells = layouter + layouter .assign_region( || "assign keccak rows", - |mut region| -> Result { - if is_first_time { - is_first_time = false; - let offset = witness.len() - 1; - keccak_config.set_row(&mut region, offset, &witness[offset])?; - return Ok(ExtractedHashCells::default()); - } - - let mut preimage_indices_iter = preimage_indices.iter(); - let mut digest_indices_iter = digest_indices.iter(); - - let mut cur_preimage_index = preimage_indices_iter.next(); - let mut cur_digest_index = digest_indices_iter.next(); - - // ==================================================== - // Step 1. Extract the hash cells - // ==================================================== - let mut hash_input_cells = vec![]; - let mut hash_output_cells = vec![]; - let mut data_rlc_cells = vec![]; - let mut hash_input_len_cells = vec![]; - let mut is_final_cells = vec![]; - + |mut region| { let timer = start_timer!(|| "assign row"); log::trace!("witness length: {}", witness.len()); - let input_bytes_col_idx = - keccak_packed_multi::get_input_bytes_col_idx_in_cell_manager() - + >::columns(&keccak_config.keccak_table) - .len() - - 1; for (offset, keccak_row) in witness.iter().enumerate() { - let row = keccak_config.set_row(&mut region, offset, keccak_row)?; - - if cur_preimage_index.is_some() && *cur_preimage_index.unwrap() == offset { - hash_input_cells.push(row[input_bytes_col_idx].clone()); - cur_preimage_index = preimage_indices_iter.next(); - } - if cur_digest_index.is_some() && *cur_digest_index.unwrap() == offset { - // last column is Keccak output in Keccak circuit - hash_output_cells.push(row.last().unwrap().clone()); // sage unwrap - cur_digest_index = digest_indices_iter.next(); - } - if offset % keccak_f_rows == 0 && offset / keccak_f_rows <= max_keccak_updates { - // first column is is_final - is_final_cells.push(row[0].clone()); - // second column is data rlc - data_rlc_cells.push(row[1].clone()); - // third column is hash len - hash_input_len_cells.push(row[2].clone()); - } + let _row = config.set_row(&mut region, offset, keccak_row)?; } end_timer!(timer); - for (i, e) in is_final_cells.iter().enumerate() { - log::trace!("{}-th round is final {:?}", i, e.value()); - } - - // sanity - assert_eq!( - hash_input_cells.len(), - max_keccak_updates * INPUT_LEN_PER_ROUND - ); - let num_digests = - (MAX_AGG_SNARKS * DIGEST_LEN + INPUT_LEN_PER_ROUND - 1) / INPUT_LEN_PER_ROUND; - assert_eq!( - hash_output_cells.len(), - (MAX_AGG_SNARKS + 1 + num_digests) * DIGEST_LEN - ); - - keccak_config - .keccak_table - .annotate_columns_in_region(&mut region); - keccak_config.annotate_circuit(&mut region); - Ok(ExtractedHashCells { - hash_input_cells, - hash_output_cells, - data_rlc_cells, - hash_input_len_cells, - is_final_cells, - }) + Ok(()) }, ) .map_err(|e| Error::AssertionFailure(format!("assign keccak rows: {e}")))?; - - for (i, e) in extracted_hash_cells.hash_input_len_cells.iter().enumerate() { - log::trace!("{}'s round hash input len {:?}", i, e.value()) - } - - Ok(extracted_hash_cells) + Ok(()) } // Assert the following constraints // 2. batch_pi_hash used same roots as chunk_pi_hash // 2.1. batch_pi_hash and chunk[0] use a same prev_state_root -// 2.2. batch_pi_hash and chunk[MAX_AGG_SNARKS-1] use a same post_state_root -// 2.3. batch_pi_hash and chunk[MAX_AGG_SNARKS-1] use a same withdraw_root +// 2.2. batch_pi_hash and chunk[N_SNARKS-1] use a same post_state_root +// 2.3. batch_pi_hash and chunk[N_SNARKS-1] use a same withdraw_root // 5. batch and all its chunks use a same chain id -fn copy_constraints( +fn copy_constraints( layouter: &mut impl Layouter, - hash_input_cells: &[AssignedCell], + hash_input_cells: &[Vec>], ) -> Result<(), Error> { let mut is_first_time = true; @@ -416,7 +480,7 @@ fn copy_constraints( batch_pi_hash_preimage, chunk_pi_hash_preimages, _potential_batch_data_hash_preimage, - ) = parse_hash_preimage_cells(hash_input_cells); + ) = parse_hash_preimage_cells::(hash_input_cells); // ==================================================== // Constraint the relations between hash preimages @@ -470,35 +534,33 @@ fn copy_constraints( // sanity check assert_equal( &batch_pi_hash_preimage[i + POST_STATE_ROOT_INDEX], - &chunk_pi_hash_preimages[MAX_AGG_SNARKS - 1][i + POST_STATE_ROOT_INDEX], + &chunk_pi_hash_preimages[N_SNARKS - 1][i + POST_STATE_ROOT_INDEX], format!( "chunk and batch's post_state_root do not match: {:?} {:?}", &batch_pi_hash_preimage[i + POST_STATE_ROOT_INDEX].value(), - &chunk_pi_hash_preimages[MAX_AGG_SNARKS - 1][i + POST_STATE_ROOT_INDEX] + &chunk_pi_hash_preimages[N_SNARKS - 1][i + POST_STATE_ROOT_INDEX] .value(), ) .as_str(), )?; region.constrain_equal( batch_pi_hash_preimage[i + POST_STATE_ROOT_INDEX].cell(), - chunk_pi_hash_preimages[MAX_AGG_SNARKS - 1][i + POST_STATE_ROOT_INDEX] - .cell(), + chunk_pi_hash_preimages[N_SNARKS - 1][i + POST_STATE_ROOT_INDEX].cell(), )?; // 2.3 chunk[k-1].withdraw_root assert_equal( &batch_pi_hash_preimage[i + WITHDRAW_ROOT_INDEX], - &chunk_pi_hash_preimages[MAX_AGG_SNARKS - 1][i + WITHDRAW_ROOT_INDEX], + &chunk_pi_hash_preimages[N_SNARKS - 1][i + WITHDRAW_ROOT_INDEX], format!( "chunk and batch's withdraw_root do not match: {:?} {:?}", &batch_pi_hash_preimage[i + WITHDRAW_ROOT_INDEX].value(), - &chunk_pi_hash_preimages[MAX_AGG_SNARKS - 1][i + WITHDRAW_ROOT_INDEX] - .value(), + &chunk_pi_hash_preimages[N_SNARKS - 1][i + WITHDRAW_ROOT_INDEX].value(), ) .as_str(), )?; region.constrain_equal( batch_pi_hash_preimage[i + WITHDRAW_ROOT_INDEX].cell(), - chunk_pi_hash_preimages[MAX_AGG_SNARKS - 1][i + WITHDRAW_ROOT_INDEX].cell(), + chunk_pi_hash_preimages[N_SNARKS - 1][i + WITHDRAW_ROOT_INDEX].cell(), )?; } @@ -539,38 +601,34 @@ fn copy_constraints( // padded // 7. the hash input length are correct // - hashes[0] has 200 bytes -// - hashes[1..MAX_AGG_SNARKS+1] has 168 bytes input +// - hashes[1..N_SNARKS+1] has 168 bytes input // - batch's data_hash length is 32 * number_of_valid_snarks // 8. batch data hash is correct w.r.t. its RLCs // 9. is_final_cells are set correctly #[allow(clippy::type_complexity)] -pub(crate) fn conditional_constraints( +pub(crate) fn conditional_constraints( rlc_config: &RlcConfig, layouter: &mut impl Layouter, challenges: Challenges>, chunks_are_valid: &[bool], - extracted_hash_cells: &ExtractedHashCells, -) -> Result<(AssignedCell, Vec>), Error> { - let ExtractedHashCells { - hash_input_cells, - hash_output_cells, - hash_input_len_cells, - data_rlc_cells, - is_final_cells, - } = extracted_hash_cells; - + num_valid_chunks: usize, + preimages: &[Vec], +) -> Result, Error> { layouter .assign_region( || "rlc conditional constraints", - |mut region| -> Result< - (AssignedCell, Vec>), - halo2_proofs::plonk::Error, - > { - rlc_config.init(&mut region)?; + |mut region| -> Result, halo2_proofs::plonk::Error> { let mut offset = 0; + rlc_config.init(&mut region)?; // ==================================================== // build the flags to indicate the chunks are empty or not // ==================================================== + + let keccak_input_challenge = + rlc_config.read_challenge1(&mut region, challenges, &mut offset)?; + let evm_word_challenge = + rlc_config.read_challenge2(&mut region, challenges, &mut offset)?; + let chunk_is_valid_cells = chunks_are_valid .iter() .map(|chunk_is_valid| -> Result<_, halo2_proofs::plonk::Error> { @@ -581,191 +639,94 @@ pub(crate) fn conditional_constraints( ) }) .collect::, halo2_proofs::plonk::Error>>()?; + + let chunk_is_valid_cell32s = chunk_is_valid_cells + .iter() + .flat_map(|cell| vec![cell; 32]) + .cloned() + .collect::>(); + + let chunks_are_padding = chunk_is_valid_cells + .iter() + .map(|chunk_is_valid| rlc_config.not(&mut region, chunk_is_valid, &mut offset)) + .collect::, halo2_proofs::plonk::Error>>()?; + let num_valid_snarks = constrain_flags(rlc_config, &mut region, &chunk_is_valid_cells, &mut offset)?; log::trace!("number of valid chunks: {:?}", num_valid_snarks.value()); - // - // if the num_of_valid_snarks <= 4, which only needs 1 keccak-f round. Therefore - // the batch's data hash (input, len, data_rlc, output_rlc) are in the first 300 - // keccak rows; - // - // else if the num_of_valid_snarks <= 8, which needs - // 2 keccak-f rounds. Therefore the batch's data hash (input, len, data_rlc, - // output_rlc) are in the 2nd 300 keccak rows; - // - // else if the - // num_of_valid_snarks <= 12, which needs 3 keccak-f rounds. Therefore the batch's - // data hash (input, len, data_rlc, output_rlc) are in the 3rd 300 keccak rows; - // - // else if the - // num_of_valid_snarks <= 16, which needs 4 keccak-f rounds. Therefore the batch's - // data hash (input, len, data_rlc, output_rlc) are in the 4th 300 keccak rows; - // - // the following flag is build to indicate which row the final data_rlc exists - // - // #valid snarks | offset of data hash | flags - // 1,2,3,4 | 0 | 1, 0, 0, 0 - // 5,6,7,8 | 32 | 0, 1, 0, 0 - // 9,10,11,12 | 64 | 0, 0, 1, 0 - // 13,14,15,16 | 96 | 0, 0, 0, 1 - - let five = { - let five = rlc_config.load_private(&mut region, &Fr::from(5), &mut offset)?; - let five_cell = rlc_config.five_cell(five.cell().region_index); - region.constrain_equal(five_cell, five.cell())?; - five - }; - let nine = { - let nine = rlc_config.load_private(&mut region, &Fr::from(9), &mut offset)?; - let nine_cell = rlc_config.nine_cell(nine.cell().region_index); - region.constrain_equal(nine_cell, nine.cell())?; - nine - }; - let thirteen = { - let thirteen = - rlc_config.load_private(&mut region, &Fr::from(13), &mut offset)?; - let thirteen_cell = rlc_config.thirteen_cell(thirteen.cell().region_index); - region.constrain_equal(thirteen_cell, thirteen.cell())?; - thirteen - }; - let smaller_or_eq_4 = rlc_config.is_smaller_than( - &mut region, - &num_valid_snarks, - &five, - &mut offset, - )?; - let greater_than_4 = rlc_config.not(&mut region, &smaller_or_eq_4, &mut offset)?; - let smaller_or_eq_8 = rlc_config.is_smaller_than( + // ==================================================== + // extract the hash cells from the witnesses and check against the lookup table + // ==================================================== + let assigned_hash_cells = ExtractedHashCells::assign_hash_cells( + rlc_config, &mut region, - &num_valid_snarks, - &nine, &mut offset, + &keccak_input_challenge, + &evm_word_challenge, + num_valid_chunks, + preimages, + &chunk_is_valid_cell32s, + num_valid_snarks, + chunks_are_padding.clone(), )?; - let greater_than_8 = rlc_config.not(&mut region, &smaller_or_eq_8, &mut offset)?; - let smaller_or_eq_12 = rlc_config.is_smaller_than( + assigned_hash_cells.check_against_lookup_table( + rlc_config, &mut region, - &num_valid_snarks, - &thirteen, &mut offset, )?; - let greater_than_12 = - rlc_config.not(&mut region, &smaller_or_eq_12, &mut offset)?; - - let flag1 = smaller_or_eq_4; - let flag2 = - rlc_config.mul(&mut region, &greater_than_4, &smaller_or_eq_8, &mut offset)?; - let flag3 = - rlc_config.mul(&mut region, &greater_than_8, &smaller_or_eq_12, &mut offset)?; - let flag4 = greater_than_12; - - log::trace!( - "flags: {:?} {:?} {:?} {:?}", - flag1.value(), - flag2.value(), - flag3.value(), - flag4.value() - ); + // ==================================================== // parse the hashes // ==================================================== // preimages - let ( - batch_pi_hash_preimage, - chunk_pi_hash_preimages, - potential_batch_data_hash_preimage, - ) = parse_hash_preimage_cells(hash_input_cells); + let (batch_pi_hash_preimage, chunk_pi_hash_preimages, batch_data_hash_preimage) = + parse_hash_preimage_cells::(&assigned_hash_cells.inputs); - // digests - let ( - _batch_pi_hash_digest, - _chunk_pi_hash_digests, - potential_batch_data_hash_digest, - ) = parse_hash_digest_cells(hash_output_cells); // ==================================================== // start the actual statements // ==================================================== // - // 1 batch_data_hash digest is reused for public input hash + // ==================================================== + // 1. batch_data_hash digest is reused for public input hash + // ==================================================== // - // the following part of the code is hard coded for the case where - // MAX_AGG_SNARKS <= 10 - // in theory it may support up to 12 SNARKS (not tested) - // more SNARKs beyond 12 will require a revamp of the circuit // // public input hash is build as - // keccak( + // public_input_hash = keccak( // chain_id || // chunk[0].prev_state_root || // chunk[k-1].post_state_root || // chunk[k-1].withdraw_root || - // batch_data_hash || - // z || - // y - // ) + // batch_data_hash ) // // batchDataHash = keccak(chunk[0].dataHash || ... || chunk[k-1].dataHash) - // - // - // #valid snarks | offset of data hash | flags - // 1,2,3,4 | 0 | 1, 0, 0, 0 - // 5,6,7,8 | 32 | 0, 1, 0, 0 - // 9,10,11,12 | 64 | 0, 0, 1, 0 - // 13,14,15,16 | 96 | 0, 0, 0, 1 - for i in 0..4 { - for j in 0..8 { - // sanity check - assert_exist( - &batch_pi_hash_preimage[i * 8 + j + CHUNK_DATA_HASH_INDEX], - &[ - potential_batch_data_hash_digest[(3 - i) * 8 + j].clone(), - potential_batch_data_hash_digest[(3 - i) * 8 + j + 32].clone(), - potential_batch_data_hash_digest[(3 - i) * 8 + j + 64].clone(), - potential_batch_data_hash_digest[(3 - i) * 8 + j + 96].clone(), - ], - )?; - // assert - // batch_pi_hash_preimage[i * 8 + j + CHUNK_DATA_HASH_INDEX] - // = flag1 * potential_batch_data_hash_digest[(3 - i) * 8 + j] - // + flag2 * potential_batch_data_hash_digest[(3 - i) * 8 + j + 32] - // + flag3 * potential_batch_data_hash_digest[(3 - i) * 8 + j + 64] - // + flag4 * potential_batch_data_hash_digest[(3 - i) * 8 + j + 96] - - let rhs = rlc_config.mul( - &mut region, - &flag1, - &potential_batch_data_hash_digest[(3 - i) * 8 + j], - &mut offset, - )?; - let rhs = rlc_config.mul_add( - &mut region, - &flag2, - &potential_batch_data_hash_digest[(3 - i) * 8 + j + 32], - &rhs, - &mut offset, - )?; - let rhs = rlc_config.mul_add( - &mut region, - &flag3, - &potential_batch_data_hash_digest[(3 - i) * 8 + j + 64], - &rhs, - &mut offset, - )?; - let rhs = rlc_config.mul_add( - &mut region, - &flag4, - &potential_batch_data_hash_digest[(3 - i) * 8 + j + 96], - &rhs, - &mut offset, - )?; - region.constrain_equal( - batch_pi_hash_preimage[i * 8 + j + CHUNK_DATA_HASH_INDEX].cell(), - rhs.cell(), - )?; - } - } + // the strategy here is to generate the RLCs of the batch_pi_hash_preimage and + // compare it with batchDataHash's input RLC + let batch_data_hash_rlc = rlc_config.rlc( + &mut region, + batch_pi_hash_preimage + [CHUNK_DATA_HASH_INDEX..CHUNK_DATA_HASH_INDEX + DIGEST_LEN] + .as_ref(), + &evm_word_challenge, + &mut offset, + )?; + + log::info!( + "batch data hash rlc recomputed: {:?}", + batch_data_hash_rlc.value() + ); + log::info!( + "batch data hash rlc from table: {:?}", + assigned_hash_cells.output_rlcs[N_SNARKS + 1].value() + ); + + region.constrain_equal( + batch_data_hash_rlc.cell(), + assigned_hash_cells.output_rlcs[N_SNARKS + 1].cell(), + )?; // 3 batch_data_hash and chunk[i].pi_hash use a same chunk[i].data_hash when // chunk[i] is not padded @@ -781,32 +742,51 @@ pub(crate) fn conditional_constraints( // chunk[i].datahash || // chunk[i].tx_data_hash // ) - for i in 0..MAX_AGG_SNARKS { - for j in 0..DIGEST_LEN { - assert_conditional_equal( - &chunk_pi_hash_preimages[i][j + CHUNK_DATA_HASH_INDEX], - &potential_batch_data_hash_preimage[i * DIGEST_LEN + j], - &chunk_is_valid_cells[i], - format!( - "chunk_{i}'s data hash does not match batch's: {:?} {:?} {:?}", - &chunk_pi_hash_preimages[i][j + CHUNK_DATA_HASH_INDEX].value(), - &potential_batch_data_hash_preimage[i * DIGEST_LEN + j].value(), - &chunk_is_valid_cells[i].value() - ) - .as_str(), - )?; - rlc_config.conditional_enforce_equal( - &mut region, - &chunk_pi_hash_preimages[i][j + CHUNK_DATA_HASH_INDEX], - &potential_batch_data_hash_preimage[i * DIGEST_LEN + j], - &chunk_is_valid_cells[i], - &mut offset, - )?; - } - } + // the strategy here is to generate the RLCs of the chunk[i].dataHash and compare it + // with batchDataHash's input RLC + + let batch_data_hash_reconstructed_rlc = { + let batch_data_hash_reconstructed = chunk_pi_hash_preimages + .iter() + .flat_map(|&chunk_pi_hash_preimage| { + chunk_pi_hash_preimage + [CHUNK_DATA_HASH_INDEX..CHUNK_DATA_HASH_INDEX + DIGEST_LEN] + .iter() + }) + .cloned() + .collect::>(); + rlc_config.rlc_with_flag( + &mut region, + &batch_data_hash_reconstructed, + &keccak_input_challenge, + &chunk_is_valid_cell32s, + &mut offset, + )? + }; + + region.constrain_equal( + batch_data_hash_reconstructed_rlc.cell(), + assigned_hash_cells.input_rlcs[N_SNARKS + 1].cell(), + )?; + + log::info!( + "batch data hash rlc reconstructed: {:?}", + batch_data_hash_reconstructed_rlc.value() + ); + log::info!( + "batch data hash rlc from table: {:?}", + assigned_hash_cells.input_rlcs[N_SNARKS + 1].value() + ); + // ==================================================== // 4 __valid__ chunks are continuous: they are linked via the state roots - for i in 0..MAX_AGG_SNARKS - 1 { + // ==================================================== + // chunk[i].piHash = + // keccak( + // chain id || + // chunk[i].prevStateRoot || chunk[i].postStateRoot || chunk[i].withdrawRoot + // || chunk[i].datahash) + for i in 0..N_SNARKS - 1 { for j in 0..DIGEST_LEN { // sanity check assert_conditional_equal( @@ -831,20 +811,18 @@ pub(crate) fn conditional_constraints( } } + // ==================================================== // 6. chunk[i]'s chunk_pi_hash_rlc_cells == chunk[i-1].chunk_pi_hash_rlc_cells when // chunk[i] is padded - let chunks_are_padding = chunk_is_valid_cells - .iter() - .map(|chunk_is_valid| rlc_config.not(&mut region, chunk_is_valid, &mut offset)) - .collect::, halo2_proofs::plonk::Error>>()?; + // ==================================================== - let chunk_pi_hash_rlc_cells = parse_pi_hash_rlc_cells(data_rlc_cells); + let chunk_pi_hash_rlc_cells = &assigned_hash_cells.input_rlcs[1..N_SNARKS + 1]; - for i in 1..MAX_AGG_SNARKS { + for i in 1..N_SNARKS { rlc_config.conditional_enforce_equal( &mut region, - chunk_pi_hash_rlc_cells[i - 1], - chunk_pi_hash_rlc_cells[i], + &chunk_pi_hash_rlc_cells[i - 1], + &chunk_pi_hash_rlc_cells[i], &chunks_are_padding[i], &mut offset, )?; @@ -859,249 +837,25 @@ pub(crate) fn conditional_constraints( log::trace!("{i}-th chunk is valid: {:?}", f.value()); } - // 7. the hash input length are correct - // - hashes[0] has 232 bytes (preimage of batch pi hash) - // - hashes[1..MAX_AGG_SNARKS+1] has 168 bytes input (preimage of chunk pi hash) - // - batch's data_hash length is 32 * number_of_valid_snarks - - // note: hash_input_len_cells[0] is from dummy rows of keccak circuit. - let batch_pi_hash_input_cell = hash_input_len_cells[2].cell(); - region.constrain_equal( - batch_pi_hash_input_cell, - rlc_config - .two_hundred_and_thirty_two_cell(batch_pi_hash_input_cell.region_index), - )?; - - // - hashes[1..MAX_AGG_SNARKS+1] has 168 bytes input - hash_input_len_cells - .iter() - .skip(3) // dummy (1) and batch pi hash (2) - .take(MAX_AGG_SNARKS * 2) - .chunks(2) - .into_iter() - .try_for_each(|chunk| { - let cur_hash_len = chunk.last().unwrap(); // safe unwrap - region.constrain_equal( - cur_hash_len.cell(), - rlc_config - .one_hundred_and_sixty_eight_cell(cur_hash_len.cell().region_index), - ) - })?; - - // - batch's data_hash length is 32 * number_of_valid_snarks - let const32 = rlc_config.load_private(&mut region, &Fr::from(32), &mut offset)?; - let const32_cell = rlc_config.thirty_two_cell(const32.cell().region_index); - region.constrain_equal(const32.cell(), const32_cell)?; - let data_hash_inputs_len = - rlc_config.mul(&mut region, &num_valid_snarks, &const32, &mut offset)?; - - // sanity check - assert_exist( - &data_hash_inputs_len, - &[ - hash_input_len_cells[MAX_AGG_SNARKS * 2 + 3].clone(), - hash_input_len_cells[MAX_AGG_SNARKS * 2 + 4].clone(), - hash_input_len_cells[MAX_AGG_SNARKS * 2 + 5].clone(), - hash_input_len_cells[MAX_AGG_SNARKS * 2 + 6].clone(), - ], - )?; - - log::trace!("data_hash_inputs: {:?}", data_hash_inputs_len.value()); - log::trace!( - "candidate 1: {:?}", - hash_input_len_cells[MAX_AGG_SNARKS * 2 + 3].value() - ); - log::trace!( - "candidate 2: {:?}", - hash_input_len_cells[MAX_AGG_SNARKS * 2 + 4].value() - ); - log::trace!( - "candidate 3: {:?}", - hash_input_len_cells[MAX_AGG_SNARKS * 2 + 5].value() - ); - log::trace!( - "candidate 4: {:?}", - hash_input_len_cells[MAX_AGG_SNARKS * 2 + 6].value() - ); - - let mut data_hash_inputs_len_rec = rlc_config.mul( - &mut region, - &hash_input_len_cells[MAX_AGG_SNARKS * 2 + 3], - &flag1, - &mut offset, - )?; - data_hash_inputs_len_rec = rlc_config.mul_add( - &mut region, - &hash_input_len_cells[MAX_AGG_SNARKS * 2 + 4], - &flag2, - &data_hash_inputs_len_rec, - &mut offset, - )?; - data_hash_inputs_len_rec = rlc_config.mul_add( - &mut region, - &hash_input_len_cells[MAX_AGG_SNARKS * 2 + 5], - &flag3, - &data_hash_inputs_len_rec, - &mut offset, - )?; - data_hash_inputs_len_rec = rlc_config.mul_add( - &mut region, - &hash_input_len_cells[MAX_AGG_SNARKS * 2 + 6], - &flag4, - &data_hash_inputs_len_rec, - &mut offset, - )?; - - // sanity check - assert_equal( - &data_hash_inputs_len, - &data_hash_inputs_len_rec, - format!( - "data_hash_input_len do not match: {:?} {:?}", - &data_hash_inputs_len.value(), - &data_hash_inputs_len_rec.value(), - ) - .as_str(), - )?; - region.constrain_equal( - data_hash_inputs_len.cell(), - data_hash_inputs_len_rec.cell(), - )?; - - // 8. batch data hash is correct w.r.t. its RLCs + // ==================================================== + // 7. batch data hash is correct w.r.t. its RLCs + // ==================================================== // batchDataHash = keccak(chunk[0].dataHash || ... || chunk[k-1].dataHash) - let challenge_cell = - rlc_config.read_challenge1(&mut region, challenges, &mut offset)?; - - let flags = chunk_is_valid_cells - .iter() - .flat_map(|cell| vec![cell; 32]) - .cloned() - .collect::>(); - let rlc_cell = rlc_config.rlc_with_flag( &mut region, - potential_batch_data_hash_preimage[..DIGEST_LEN * MAX_AGG_SNARKS].as_ref(), - &challenge_cell, - &flags, + batch_data_hash_preimage, + &keccak_input_challenge, + &chunk_is_valid_cell32s, &mut offset, )?; - assert_exist( - &rlc_cell, - &[ - data_rlc_cells[MAX_AGG_SNARKS * 2 + 3].clone(), - data_rlc_cells[MAX_AGG_SNARKS * 2 + 4].clone(), - data_rlc_cells[MAX_AGG_SNARKS * 2 + 5].clone(), - data_rlc_cells[MAX_AGG_SNARKS * 2 + 6].clone(), - ], - )?; - log::trace!("rlc from chip {:?}", rlc_cell.value()); - log::trace!( - "rlc from table {:?}", - data_rlc_cells[MAX_AGG_SNARKS * 2 + 3].value() - ); - log::trace!( - "rlc from table {:?}", - data_rlc_cells[MAX_AGG_SNARKS * 2 + 4].value() - ); - log::trace!( - "rlc from table {:?}", - data_rlc_cells[MAX_AGG_SNARKS * 2 + 5].value() - ); - - // assertion - let t1 = rlc_config.sub( - &mut region, - &rlc_cell, - &data_rlc_cells[MAX_AGG_SNARKS * 2 + 3], - &mut offset, - )?; - let t2 = rlc_config.sub( - &mut region, - &rlc_cell, - &data_rlc_cells[MAX_AGG_SNARKS * 2 + 4], - &mut offset, - )?; - let t3 = rlc_config.sub( - &mut region, - &rlc_cell, - &data_rlc_cells[MAX_AGG_SNARKS * 2 + 5], - &mut offset, - )?; - let t4 = rlc_config.sub( - &mut region, - &rlc_cell, - &data_rlc_cells[MAX_AGG_SNARKS * 2 + 6], - &mut offset, + region.constrain_equal( + rlc_cell.cell(), + assigned_hash_cells.input_rlcs[N_SNARKS + 1].cell(), )?; - let t1t2 = rlc_config.mul(&mut region, &t1, &t2, &mut offset)?; - let t1t2t3 = rlc_config.mul(&mut region, &t1t2, &t3, &mut offset)?; - let t1t2t3t4 = rlc_config.mul(&mut region, &t1t2t3, &t4, &mut offset)?; - rlc_config.enforce_zero(&mut region, &t1t2t3t4)?; - - // 9. is_final_cells are set correctly - // the is_final_cells are set as - // index | value | comments - // --------------------------|-------|------------ - // 0 | 0 | 0-th row is prefix pad - // 1 | 0 | first keccak: - // 2 | 1 | batch_pi_hash use 2 rounds - // 3 | 0 | second keccak: - // 4 | 1 | chunk[0].pi_hash use 2 rounds - // 5 | 0 | third keccak: - // 6 | 1 | chunk[1].pi_hash use 2 rounds - // ... - // 2*(MAX_AGG_SNARKS) + 1 | 0 | MAX_AGG_SNARKS+1's keccak - // 2*(MAX_AGG_SNARKS) + 2 | 1 | chunk[MAX_AGG_SNARKS].pi_hash use 2 rounds - // 2*(MAX_AGG_SNARKS) + 3 | a | MAX_AGG_SNARKS+2's keccak - // 2*(MAX_AGG_SNARKS) + 4 | b | batch_data_hash may use 1, 2, 3 - // 2*(MAX_AGG_SNARKS) + 5 | c | or 4 rounds - // 2*(MAX_AGG_SNARKS) + 6 | d | - // - // so a,b,c are constrained as follows - // - // #valid snarks | flags | a | b | c | d - // 1,2,3,4 | 1, 0, 0, 0 | 1 | - | - | - - // 5,6,7,8 | 0, 1, 0, 0 | 0 | 1 | - | - - // 9,10,11,12 | 0, 0, 1, 0 | 0 | 0 | 1 | - - // 13,14,15,16 | 0, 0, 0, 1 | 0 | 0 | 0 | 1 - - // first MAX_AGG_SNARKS + 1 keccak - for mut chunk in is_final_cells - .iter() - .skip(1) - .take((MAX_AGG_SNARKS + 1) * 2) - .chunks(2) - .into_iter() - { - // first round - let first_round_cell = chunk.next().unwrap(); - let second_round_cell = chunk.next().unwrap(); - region.constrain_equal( - first_round_cell.cell(), - rlc_config.zero_cell(first_round_cell.cell().region_index), - )?; - region.constrain_equal( - second_round_cell.cell(), - rlc_config.one_cell(second_round_cell.cell().region_index), - )?; - } - // last keccak - // we constrain a * flag1 + b * flag2 + c * flag3 + d * flag4 == 1 - let a = &is_final_cells[2 * (MAX_AGG_SNARKS) + 3]; - let b = &is_final_cells[2 * (MAX_AGG_SNARKS) + 4]; - let c = &is_final_cells[2 * (MAX_AGG_SNARKS) + 5]; - let d = &is_final_cells[2 * (MAX_AGG_SNARKS) + 6]; - let mut left = rlc_config.mul(&mut region, a, &flag1, &mut offset)?; - left = rlc_config.mul_add(&mut region, b, &flag2, &left, &mut offset)?; - left = rlc_config.mul_add(&mut region, c, &flag3, &left, &mut offset)?; - left = rlc_config.mul_add(&mut region, d, &flag4, &left, &mut offset)?; - region - .constrain_equal(left.cell(), rlc_config.one_cell(left.cell().region_index))?; log::trace!("rlc chip uses {} rows", offset); - Ok((num_valid_snarks, chunks_are_padding)) + Ok(assigned_hash_cells) }, ) .map_err(|e| Error::AssertionFailure(format!("aggregation: {e}"))) diff --git a/aggregator/src/param.rs b/aggregator/src/param.rs index 23102d9db5..65c6f3b5d9 100644 --- a/aggregator/src/param.rs +++ b/aggregator/src/param.rs @@ -20,8 +20,8 @@ impl ConfigParams { Self { strategy: FpStrategy::Simple, degree: 19, - num_advice: vec![75], - num_lookup_advice: vec![10], + num_advice: vec![100], + num_lookup_advice: vec![13], num_fixed: 2, lookup_bits: 18, limb_bits: BITS, diff --git a/aggregator/src/tests/aggregation.rs b/aggregator/src/tests/aggregation.rs index 77e6fc59a6..5ea2781eac 100644 --- a/aggregator/src/tests/aggregation.rs +++ b/aggregator/src/tests/aggregation.rs @@ -12,13 +12,33 @@ use crate::{ }; #[test] -fn test_aggregation_circuit() { +fn test_max_agg_snarks_aggregation_circuit() { env_logger::init(); let k = 20; // This set up requires one round of keccak for chunk's data hash - let circuit = build_new_aggregation_circuit(2); + let circuit: AggregationCircuit = build_new_aggregation_circuit(2); + let instance = circuit.instances(); + let mock_prover = MockProver::::run(k, &circuit, instance).unwrap(); + mock_prover.assert_satisfied_par(); +} + +#[test] +fn test_2_snark_aggregation_circuit() { + let k = 20; + + let circuit: AggregationCircuit<2> = build_new_aggregation_circuit(1); + let instance = circuit.instances(); + let mock_prover = MockProver::::run(k, &circuit, instance).unwrap(); + mock_prover.assert_satisfied_par(); +} + +#[test] +fn test_14_snark_aggregation_circuit() { + let k = 20; + + let circuit: AggregationCircuit<14> = build_new_aggregation_circuit(12); let instance = circuit.instances(); let mock_prover = MockProver::::run(k, &circuit, instance).unwrap(); mock_prover.assert_satisfied_par(); @@ -34,7 +54,7 @@ fn test_aggregation_circuit_all_possible_num_snarks() { for i in 1..=MAX_AGG_SNARKS { println!("{i} real chunks and {} padded chunks", MAX_AGG_SNARKS - i); // This set up requires one round of keccak for chunk's data hash - let circuit = build_new_aggregation_circuit(i); + let circuit: AggregationCircuit = build_new_aggregation_circuit(i); let instance = circuit.instances(); let mock_prover = MockProver::::run(k, &circuit, instance).unwrap(); mock_prover.assert_satisfied_par(); @@ -54,7 +74,7 @@ fn test_aggregation_circuit_full() { fs::create_dir(path).unwrap(); // This set up requires one round of keccak for chunk's data hash - let circuit = build_new_aggregation_circuit(2); + let circuit: AggregationCircuit = build_new_aggregation_circuit(2); let instance = circuit.instances(); let mock_prover = MockProver::::run(25, &circuit, instance).unwrap(); mock_prover.assert_satisfied_par(); @@ -70,7 +90,7 @@ fn test_aggregation_circuit_full() { let snark = gen_snark_shplonk(¶m, &pk, circuit.clone(), &mut rng, None::); log::trace!("finished snark generation for circuit"); - assert!(verify_snark_shplonk::( + assert!(verify_snark_shplonk::>( ¶m, snark, pk.get_vk() @@ -78,11 +98,11 @@ fn test_aggregation_circuit_full() { log::trace!("finished verification for circuit"); // This set up requires two rounds of keccak for chunk's data hash - let circuit = build_new_aggregation_circuit(5); + let circuit: AggregationCircuit = build_new_aggregation_circuit(5); let snark = gen_snark_shplonk(¶m, &pk, circuit, &mut rng, None::); log::trace!("finished snark generation for circuit"); - assert!(verify_snark_shplonk::( + assert!(verify_snark_shplonk::>( ¶m, snark, pk.get_vk() @@ -90,7 +110,9 @@ fn test_aggregation_circuit_full() { log::trace!("finished verification for circuit"); } -fn build_new_aggregation_circuit(num_real_chunks: usize) -> AggregationCircuit { +fn build_new_aggregation_circuit( + num_real_chunks: usize, +) -> AggregationCircuit { // inner circuit: Mock circuit let k0 = 8; @@ -107,7 +129,7 @@ fn build_new_aggregation_circuit(num_real_chunks: usize) -> AggregationCircuit { ChunkHash::mock_padded_chunk_hash_for_testing(&chunks_without_padding[num_real_chunks - 1]); let chunks_with_padding = [ chunks_without_padding, - vec![padded_chunk; MAX_AGG_SNARKS - num_real_chunks], + vec![padded_chunk; N_SNARKS - num_real_chunks], ] .concat(); @@ -132,8 +154,7 @@ fn build_new_aggregation_circuit(num_real_chunks: usize) -> AggregationCircuit { // ========================== // padded chunks // ========================== - let padded_snarks = - { vec![real_snarks.last().unwrap().clone(); MAX_AGG_SNARKS - num_real_chunks] }; + let padded_snarks = { vec![real_snarks.last().unwrap().clone(); N_SNARKS - num_real_chunks] }; // ========================== // batch diff --git a/aggregator/src/tests/blob.rs b/aggregator/src/tests/blob.rs index f13ff8ab74..feb15bcabd 100644 --- a/aggregator/src/tests/blob.rs +++ b/aggregator/src/tests/blob.rs @@ -2,10 +2,7 @@ use crate::{ aggregation::{ AssignedBarycentricEvaluationConfig, BarycentricEvaluationConfig, BlobDataConfig, RlcConfig, }, - blob::{ - BlobAssignments, BlobData, N_BYTES_U256, N_ROWS_BLOB_DATA_CONFIG, N_ROWS_DATA, - N_ROWS_METADATA, - }, + blob::{BlobAssignments, BlobData, N_BYTES_U256}, param::ConfigParams, MAX_AGG_SNARKS, }; @@ -26,7 +23,7 @@ use zkevm_circuits::{ #[derive(Default)] struct BlobCircuit { - data: BlobData, + data: BlobData, overwrite_num_valid_chunks: bool, overwrite_challenge_digest: Option, @@ -46,7 +43,7 @@ struct BlobConfig { keccak_table: KeccakTable, rlc: RlcConfig, - blob_data: BlobDataConfig, + blob_data: BlobDataConfig, barycentric: BarycentricEvaluationConfig, } @@ -63,7 +60,7 @@ impl Circuit for BlobCircuit { let challenges = Challenges::construct(meta); let keccak_table = KeccakTable::construct(meta); - let rlc = RlcConfig::configure(meta, challenges); + let rlc = RlcConfig::configure(meta, &keccak_table, challenges); let parameters = ConfigParams::aggregation_param(); let range = RangeConfig::::configure( @@ -201,7 +198,8 @@ impl Circuit for BlobCircuit { if let Some(i) = self.overwrite_challenge_digest { increment_cell( &mut region, - &assigned_rows[N_ROWS_BLOB_DATA_CONFIG - N_BYTES_U256 + i].byte, + &assigned_rows[BlobData::::n_rows() - N_BYTES_U256 + i] + .byte, )?; } if let Some((i, j)) = self.overwrite_chunk_data_digests { @@ -229,7 +227,7 @@ fn increment_cell( ) } -fn check_data(data: BlobData) -> Result<(), Vec> { +fn check_data(data: BlobData) -> Result<(), Vec> { let circuit = BlobCircuit { data, ..Default::default() @@ -246,7 +244,7 @@ fn check_circuit(circuit: &BlobCircuit) -> Result<(), Vec> { #[test] fn blob_circuit_completeness() { // single chunk in batch, but the chunk has a size of N_ROWS_DATA - let full_blob = vec![vec![123; N_ROWS_DATA]]; + let full_blob = vec![vec![123; BlobData::::n_rows_data()]]; let all_empty_chunks: Vec> = vec![vec![]; MAX_AGG_SNARKS]; let one_chunk = vec![vec![2, 3, 4, 100, 1]]; let two_chunks = vec![vec![100; 1000], vec![2, 3, 4, 100, 1]]; @@ -282,7 +280,7 @@ fn blob_circuit_completeness() { } } -fn generic_blob_data() -> BlobData { +fn generic_blob_data() -> BlobData { BlobData::from(&vec![ vec![3, 100, 24, 30], vec![], @@ -364,10 +362,10 @@ fn overwrite_chunk_data_digest_byte() { const OVERWRITE_ROWS: [usize; 6] = [ 0, 10, - N_ROWS_METADATA - 1, - N_ROWS_METADATA, - N_ROWS_METADATA + 100, - N_ROWS_METADATA + N_ROWS_DATA - 1, + BlobData::::n_rows_metadata() - 1, + BlobData::::n_rows_metadata(), + BlobData::::n_rows_metadata() + 100, + BlobData::::n_rows_metadata() + BlobData::::n_rows_data() - 1, ]; #[test] diff --git a/aggregator/src/tests/mock_chunk.rs b/aggregator/src/tests/mock_chunk.rs index 338a374adb..74177bd847 100644 --- a/aggregator/src/tests/mock_chunk.rs +++ b/aggregator/src/tests/mock_chunk.rs @@ -9,7 +9,7 @@ use halo2_proofs::{ }; use snark_verifier::loader::halo2::halo2_ecc::halo2_base::SKIP_FIRST_PASS; use snark_verifier_sdk::CircuitExt; -use zkevm_circuits::util::Challenges; +use zkevm_circuits::{table::KeccakTable, util::Challenges}; use crate::{ constants::{ACC_LEN, DIGEST_LEN}, @@ -83,10 +83,14 @@ impl Circuit for MockChunkCircuit { meta.set_minimum_degree(4); let challenges = Challenges::construct(meta); - let rlc_config = RlcConfig::configure(meta, challenges); + let keccak_table = KeccakTable::construct(meta); + let rlc_config = RlcConfig::configure(meta, &keccak_table, challenges); let instance = meta.instance_column(); meta.enable_equality(instance); + let cs = meta.clone().chunk_lookups(); + assert!(cs.degree() <= 5); + MockConfig { rlc_config, instance, diff --git a/aggregator/src/tests/rlc/dynamic_hashes.rs b/aggregator/src/tests/rlc/dynamic_hashes.rs index e7d0b46c74..dca39bd749 100644 --- a/aggregator/src/tests/rlc/dynamic_hashes.rs +++ b/aggregator/src/tests/rlc/dynamic_hashes.rs @@ -42,9 +42,6 @@ impl Circuit for DynamicHashCircuit { fn configure(meta: &mut ConstraintSystem) -> Self::Config { let challenges = Challenges::construct(meta); - // RLC configuration - let rlc_config = RlcConfig::configure(meta, challenges); - // hash config // hash configuration for aggregation circuit let keccak_circuit_config = { @@ -58,6 +55,11 @@ impl Circuit for DynamicHashCircuit { KeccakCircuitConfig::new(meta, keccak_circuit_config_args) }; + + // RLC configuration + let rlc_config = + RlcConfig::configure(meta, &keccak_circuit_config.keccak_table, challenges); + // enable equality for the data RLC column meta.enable_equality(keccak_circuit_config.keccak_table.input_rlc); diff --git a/aggregator/src/tests/rlc/gates.rs b/aggregator/src/tests/rlc/gates.rs index 32d7da61b1..4ffcb055ac 100644 --- a/aggregator/src/tests/rlc/gates.rs +++ b/aggregator/src/tests/rlc/gates.rs @@ -8,7 +8,7 @@ use halo2_proofs::{ plonk::{Circuit, ConstraintSystem, Error}, }; use rand::RngCore; -use zkevm_circuits::util::Challenges; +use zkevm_circuits::{table::KeccakTable, util::Challenges}; use crate::{aggregation::RlcConfig, util::rlc}; @@ -33,7 +33,8 @@ impl Circuit for ArithTestCircuit { fn configure(meta: &mut ConstraintSystem) -> Self::Config { let challenges = Challenges::construct(meta); - RlcConfig::configure(meta, challenges) + let keccak_table = KeccakTable::construct(meta); + RlcConfig::configure(meta, &keccak_table, challenges) } fn synthesize( diff --git a/aggregator/src/util.rs b/aggregator/src/util.rs index f50c000120..88f952a412 100644 --- a/aggregator/src/util.rs +++ b/aggregator/src/util.rs @@ -1,128 +1,5 @@ -use crate::constants::{DIGEST_LEN, INPUT_LEN_PER_ROUND, MAX_AGG_SNARKS}; use eth_types::Field; use halo2_proofs::{circuit::AssignedCell, halo2curves::bn256::Fr, plonk::Error}; -use itertools::Itertools; -use zkevm_circuits::keccak_circuit::keccak_packed_multi::{ - get_num_rows_per_round, get_num_rows_per_update, -}; - -// Calculates the maximum keccak updates (1 absorb, or 1 f-box invoke) -// needed for the number of snarks -pub(crate) fn get_max_keccak_updates(max_snarks: usize) -> usize { - // The public input hash for the batch is derived from hashing - // [ - // chain_id || - // chunk_0's prev_state || - // chunk_k-1's post_state || - // chunk_k-1's withdraw_root || - // batch_data_hash || - // z || - // y - // ] - // - // In total there are 200 bytes. Therefore 2 keccak-f rounds are required, - // as in a single round we can absorb 136 bytes (INPUT_LEN_PER_ROUND). - let pi_rounds = 2; - // Hash for each chunk is derived from hashing the chunk's - // [ - // chain_id || - // prev_state || - // post_state || - // withdraw_root || - // chunk_data_hash || - // tx_data_hash - // ] - // - // In total there are 168 bytes, therefore each chunk hash - // also requires 2 keccak rounds for 168 bytes. - let chunk_hash_rounds = 2 * max_snarks; - let data_hash_rounds = get_data_hash_keccak_updates(max_snarks); - - pi_rounds + chunk_hash_rounds + data_hash_rounds -} -pub(crate) fn get_data_hash_keccak_updates(max_snarks: usize) -> usize { - let data_hash_rounds = (32 * max_snarks) / INPUT_LEN_PER_ROUND; - // when `32 * max_snarks` happens to match a multiple of 136, a padding round will still be - // added - let padding_round = if data_hash_rounds * INPUT_LEN_PER_ROUND <= 32 * max_snarks { - 1 - } else { - 0 - }; - - data_hash_rounds + padding_round -} - -/// Return -/// - the indices of the rows that contain the input preimages -/// - the indices of the rows that contain the output digest -pub(crate) fn get_indices(preimages: &[Vec]) -> (Vec, Vec) { - let mut preimage_indices = vec![]; - let mut digest_indices = vec![]; - let mut round_ctr = 0; - - let keccak_f_rows = get_num_rows_per_update(); - let inner_round_rows = get_num_rows_per_round(); - - for preimage in preimages.iter().take(MAX_AGG_SNARKS + 1) { - // 136 = 17 * 8 is the size in bytes of each - // input chunk that can be processed by Keccak circuit using absorb - - // For example, if num_rows_per_inner_round for Keccak is 12, then - // each chunk of size 136 needs 300 Keccak circuit rows to prove - // which consists of 12 Keccak rows for each of 24 + 1 Keccak circuit rounds - // digest only happens at the end of the last input chunk with - // 4 Keccak circuit rounds, so 48 Keccak rows, and 300 - 48 = 252 - let num_rounds = 1 + preimage.len() / INPUT_LEN_PER_ROUND; - let mut preimage_padded = preimage.clone(); - preimage_padded.resize(INPUT_LEN_PER_ROUND * num_rounds, 0); - for (i, round) in preimage_padded.chunks(INPUT_LEN_PER_ROUND).enumerate() { - let f_round_offset = round_ctr * keccak_f_rows; - // indices for preimages - for (j, _chunk) in round.chunks(8).enumerate() { - let inner_offset = f_round_offset + (j + 1) * inner_round_rows; - for k in 0..8 { - preimage_indices.push(inner_offset + k); - } - } - // indices for digests - if i == num_rounds - 1 { - for j in 0..4 { - let inner_offset = f_round_offset - + j * inner_round_rows - + (keccak_f_rows - inner_round_rows * (DIGEST_LEN / 8)); - for k in 0..8 { - digest_indices.push(inner_offset + k); - } - } - } - round_ctr += 1; - } - } - // last hash is for data_hash and has various length, so we output all the possible cells - for _i in 0..get_data_hash_keccak_updates(MAX_AGG_SNARKS) { - for (j, _) in (0..INPUT_LEN_PER_ROUND).chunks(8).into_iter().enumerate() { - let inner_offset = round_ctr * keccak_f_rows + (j + 1) * inner_round_rows; - for k in 0..8 { - preimage_indices.push(inner_offset + k); - } - } - for j in 0..4 { - let inner_offset = round_ctr * keccak_f_rows - + j * inner_round_rows - + (keccak_f_rows - inner_round_rows * (DIGEST_LEN / 8)); - for k in 0..8 { - digest_indices.push(inner_offset + k); - } - } - round_ctr += 1; - } - - debug_assert!(is_ascending(&preimage_indices)); - debug_assert!(is_ascending(&digest_indices)); - - (preimage_indices, digest_indices) -} #[inline] // assert two cells have same value @@ -159,100 +36,54 @@ pub(crate) fn assert_conditional_equal( }) } -#[inline] -// assert a \in [b1, b2, b3...] if both a and bi are known -// (NOT constraining equality in circuit) -pub(crate) fn assert_exist( - a: &AssignedCell, - bi_s: &[AssignedCell], -) -> Result<(), Error> { - let mut res = false; - - let a_value = a.value(); - let bi_values = bi_s.iter().map(|x| x.value()).collect::>(); - - for &bi_value in bi_values.iter() { - a_value.zip(bi_value).assert_if_known(|(a, bi)| { - res = res || (a == bi); - true - }) - } - a_value.zip(bi_values[0]).error_if_known_and(|_| !res) -} - -#[inline] -// assert that the slice is ascending -fn is_ascending(a: &[usize]) -> bool { - a.windows(2).all(|w| w[0] <= w[1]) -} - #[inline] #[allow(clippy::type_complexity)] -pub(crate) fn parse_hash_preimage_cells( - hash_input_cells: &[AssignedCell], +pub(crate) fn parse_hash_preimage_cells( + hash_input_cells: &[Vec>], ) -> ( &[AssignedCell], - Vec<&[AssignedCell]>, + Vec<&Vec>>, &[AssignedCell], ) { + // each pi hash has INPUT_LEN_PER_ROUND bytes as input + // keccak will pad the input with another INPUT_LEN_PER_ROUND bytes // we extract all those bytes - // batch_pi_hash's input has 200 bytes which means - // its keccak takes two rounds of keccak-f permutation - let batch_pi_hash_preimage = &hash_input_cells[0..INPUT_LEN_PER_ROUND * 2]; + let batch_pi_hash_preimage = &hash_input_cells[0]; let mut chunk_pi_hash_preimages = vec![]; - for i in 0..MAX_AGG_SNARKS { - // each chunk_pi_hash's input has 168 bytes which means - // each chunk takes two rounds of keccak-f permutation - chunk_pi_hash_preimages.push( - &hash_input_cells[INPUT_LEN_PER_ROUND * 2 * (i + 1)..INPUT_LEN_PER_ROUND * 2 * (i + 2)], - ); + for i in 0..N_SNARKS { + chunk_pi_hash_preimages.push(&hash_input_cells[i + 1]); } - let potential_batch_data_hash_preimage = - &hash_input_cells[INPUT_LEN_PER_ROUND * 2 * (MAX_AGG_SNARKS + 1)..]; + let batch_data_hash_preimage = hash_input_cells.last().unwrap(); ( batch_pi_hash_preimage, chunk_pi_hash_preimages, - potential_batch_data_hash_preimage, + batch_data_hash_preimage, ) } #[inline] #[allow(clippy::type_complexity)] -pub(crate) fn parse_hash_digest_cells( - hash_output_cells: &[AssignedCell], +pub(crate) fn parse_hash_digest_cells( + hash_output_cells: &[Vec>], ) -> ( &[AssignedCell], - Vec<&[AssignedCell]>, + Vec<&Vec>>, &[AssignedCell], ) { - let batch_pi_hash_digest = &hash_output_cells[0..DIGEST_LEN]; + let batch_pi_hash_digest = &hash_output_cells[0]; let mut chunk_pi_hash_digests = vec![]; - for i in 0..MAX_AGG_SNARKS { - chunk_pi_hash_digests.push(&hash_output_cells[DIGEST_LEN * (i + 1)..DIGEST_LEN * (i + 2)]); + for i in 0..N_SNARKS { + chunk_pi_hash_digests.push(&hash_output_cells[i + 1]); } - let potential_batch_data_hash_digest = &hash_output_cells[DIGEST_LEN * (MAX_AGG_SNARKS + 1)..]; + let batch_data_hash_digest = &hash_output_cells[N_SNARKS + 1]; ( batch_pi_hash_digest, chunk_pi_hash_digests, - potential_batch_data_hash_digest, + batch_data_hash_digest, ) } -#[inline] -pub(crate) fn parse_pi_hash_rlc_cells( - data_rlc_cells: &[AssignedCell], -) -> Vec<&AssignedCell> { - data_rlc_cells - .iter() - .skip(3) // the first 3 rlc cells are pad (1) + batch pi hash (2) - .take(MAX_AGG_SNARKS * 2) // each chunk hash takes 2 rounds - .chunks(2) - .into_iter() - .map(|t| t.last().unwrap()) - .collect() -} - #[cfg(test)] pub(crate) fn rlc(inputs: &[Fr], randomness: &Fr) -> Fr { assert!(!inputs.is_empty()); diff --git a/prover/src/common/prover/aggregation.rs b/prover/src/common/prover/aggregation.rs index 678204103e..bfd1c11342 100644 --- a/prover/src/common/prover/aggregation.rs +++ b/prover/src/common/prover/aggregation.rs @@ -4,7 +4,7 @@ use crate::{ io::{load_snark, write_snark}, utils::gen_rng, }; -use aggregator::{AggregationCircuit, BatchHash, ChunkHash}; +use aggregator::{AggregationCircuit, BatchHash, ChunkHash, MAX_AGG_SNARKS}; use anyhow::{anyhow, Result}; use rand::Rng; use snark_verifier_sdk::Snark; @@ -23,7 +23,7 @@ impl Prover { let batch_hash = BatchHash::construct(chunk_hashes); - let circuit = + let circuit: AggregationCircuit = AggregationCircuit::new(self.params(degree), previous_snarks, &mut rng, batch_hash) .map_err(|err| anyhow!("Failed to construct aggregation circuit: {err:?}"))?;