From 689b1102f809d75c10046b83509f8b4fc33fb9b6 Mon Sep 17 00:00:00 2001 From: Nick Dimitriou Date: Thu, 25 Jul 2024 21:38:06 +0300 Subject: [PATCH 1/4] Updated from Sonobe --- README.md | 13 +- examples/circom_full_flow.rs | 46 +- examples/external_inputs.rs | 40 +- examples/full_flow.rs | 43 +- examples/multi_inputs.rs | 26 +- examples/multiple_inputs/bench_utils.rs | 3 +- examples/multiple_inputs/hypernova.rs | 129 ++ examples/multiple_inputs/mova.rs | 59 +- examples/multiple_inputs/mova_prove_times.csv | 3 + examples/multiple_inputs/nova.rs | 40 +- examples/multiple_inputs/nova_prove_times.csv | 3 + examples/noname_full_flow.rs | 158 +++ examples/sha256.rs | 28 +- folding-schemes/Cargo.toml | 39 +- folding-schemes/src/arith/ccs.rs | 135 +++ folding-schemes/src/arith/mod.rs | 15 + folding-schemes/src/arith/r1cs.rs | 205 ++++ folding-schemes/src/commitment/ipa.rs | 45 +- folding-schemes/src/commitment/kzg.rs | 22 +- folding-schemes/src/commitment/mod.rs | 23 +- folding-schemes/src/commitment/pedersen.rs | 27 +- .../src/folding/circuits/cyclefold.rs | 686 +++++++++++ folding-schemes/src/folding/circuits/mod.rs | 1 + .../src/folding/circuits/nonnative/affine.rs | 74 +- .../src/folding/circuits/nonnative/uint.rs | 53 +- .../src/folding/circuits/sum_check.rs | 148 ++- folding-schemes/src/folding/hypernova/cccs.rs | 91 +- .../src/folding/hypernova/circuits.rs | 1046 +++++++++++++++-- .../src/folding/hypernova/lcccs.rs | 117 +- folding-schemes/src/folding/hypernova/mod.rs | 940 +++++++++++++++ .../src/folding/hypernova/nimfs.rs | 305 ++--- .../src/folding/hypernova/utils.rs | 18 +- .../src/folding/mova/homogenization.rs | 143 +-- folding-schemes/src/folding/mova/mod.rs | 112 +- folding-schemes/src/folding/mova/nifs.rs | 50 +- folding-schemes/src/folding/mova/traits.rs | 3 +- folding-schemes/src/folding/nova/circuits.rs | 212 ++-- .../src/folding/nova/decider_eth.rs | 111 +- .../src/folding/nova/decider_eth_circuit.rs | 193 ++- folding-schemes/src/folding/nova/mod.rs | 528 +++++---- folding-schemes/src/folding/nova/nifs.rs | 57 +- folding-schemes/src/folding/nova/serialize.rs | 96 +- folding-schemes/src/folding/nova/traits.rs | 4 +- .../src/folding/protogalaxy/circuits.rs | 179 +++ .../src/folding/protogalaxy/folding.rs | 212 ++-- .../src/folding/protogalaxy/mod.rs | 45 +- .../src/folding/protogalaxy/traits.rs | 57 +- .../src/folding/protogalaxy/utils.rs | 119 +- folding-schemes/src/frontend/circom/mod.rs | 3 +- folding-schemes/src/frontend/circom/utils.rs | 1 - folding-schemes/src/frontend/mod.rs | 5 +- folding-schemes/src/frontend/noname/mod.rs | 201 ++++ folding-schemes/src/frontend/noname/utils.rs | 58 + folding-schemes/src/lib.rs | 86 +- folding-schemes/src/transcript/mod.rs | 106 +- folding-schemes/src/transcript/poseidon.rs | 305 ++--- .../src/utils/espresso/sum_check/mod.rs | 121 +- .../src/utils/espresso/sum_check/prover.rs | 34 +- .../src/utils/espresso/sum_check/structs.rs | 15 +- .../src/utils/espresso/sum_check/verifier.rs | 28 +- .../src/utils/espresso/virtual_polynomial.rs | 2 - folding-schemes/src/utils/lagrange_poly.rs | 2 +- folding-schemes/src/utils/mle.rs | 2 +- folding-schemes/src/utils/mod.rs | 79 ++ solidity-verifiers/Cargo.toml | 5 + solidity-verifiers/src/verifiers/g16.rs | 12 +- solidity-verifiers/src/verifiers/kzg.rs | 20 +- solidity-verifiers/src/verifiers/mod.rs | 4 +- .../src/verifiers/nova_cyclefold.rs | 224 ++-- .../nova_cyclefold_decider.askama.sol | 31 +- 70 files changed, 6138 insertions(+), 1908 deletions(-) create mode 100644 examples/multiple_inputs/hypernova.rs create mode 100644 examples/multiple_inputs/mova_prove_times.csv create mode 100644 examples/multiple_inputs/nova_prove_times.csv create mode 100644 examples/noname_full_flow.rs create mode 100644 folding-schemes/src/arith/ccs.rs create mode 100644 folding-schemes/src/arith/mod.rs create mode 100644 folding-schemes/src/arith/r1cs.rs create mode 100644 folding-schemes/src/folding/circuits/cyclefold.rs create mode 100644 folding-schemes/src/folding/protogalaxy/circuits.rs create mode 100644 folding-schemes/src/frontend/noname/mod.rs create mode 100644 folding-schemes/src/frontend/noname/utils.rs diff --git a/README.md b/README.md index 5a24cabb..0e592dd8 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,5 @@ # sonobe -# Benchmarks - -To run the benchmarks use - -`cargo run --release --example ` - -For Mova you need to specify the homogenization. Either `point` or `sumcheck` - -# Sonobe ReadMe - Experimental folding schemes library implemented jointly by [0xPARC](https://0xparc.org/) and [PSE](https://pse.dev). @@ -31,10 +21,10 @@ Folding schemes implemented: - [Nova: Recursive Zero-Knowledge Arguments from Folding Schemes](https://eprint.iacr.org/2021/370.pdf), Abhiram Kothapalli, Srinath Setty, Ioanna Tzialla. 2021 - [CycleFold: Folding-scheme-based recursive arguments over a cycle of elliptic curves](https://eprint.iacr.org/2023/1192.pdf), Abhiram Kothapalli, Srinath Setty. 2023 +- [HyperNova: Recursive arguments for customizable constraint systems](https://eprint.iacr.org/2023/573.pdf), Abhiram Kothapalli, Srinath Setty. 2023 Work in progress: -- [HyperNova: Recursive arguments for customizable constraint systems](https://eprint.iacr.org/2023/573.pdf), Abhiram Kothapalli, Srinath Setty. 2023 - [ProtoGalaxy: Efficient ProtoStar-style folding of multiple instances](https://eprint.iacr.org/2023/1106.pdf), Liam Eagen, Ariel Gabizon. 2023 ## Available frontends @@ -43,6 +33,7 @@ Available frontends to define the folded circuit: - [arkworks](https://github.com/arkworks-rs), arkworks contributors - [Circom](https://github.com/iden3/circom), iden3, 0Kims Association +- [Noname](https://github.com/zksecurity/noname), zkSecurity ## Usage diff --git a/examples/circom_full_flow.rs b/examples/circom_full_flow.rs index 71d0ed90..35a3f491 100644 --- a/examples/circom_full_flow.rs +++ b/examples/circom_full_flow.rs @@ -21,9 +21,10 @@ use folding_schemes::{ commitment::{kzg::KZG, pedersen::Pedersen}, folding::nova::{ decider_eth::{prepare_calldata, Decider as DeciderEth}, - Nova, + Nova, PreprocessorParam, }, frontend::{circom::CircomFCircuit, FCircuit}, + transcript::poseidon::poseidon_canonical_config, Decider, FoldingScheme, }; use solidity_verifiers::{ @@ -33,9 +34,6 @@ use solidity_verifiers::{ NovaCycleFoldVerifierKey, }; -mod utils; -use utils::init_ivc_and_decider_params; - fn main() { // set the initial state let z_0 = vec![Fr::from(3_u32)]; @@ -66,12 +64,8 @@ fn main() { let f_circuit_params = (r1cs_path, wasm_path, 1, 2); let f_circuit = CircomFCircuit::::new(f_circuit_params).unwrap(); - let (fs_prover_params, kzg_vk, g16_pk, g16_vk) = - init_ivc_and_decider_params::>(f_circuit.clone()); - - pub type NOVA = - Nova, KZG<'static, Bn254>, Pedersen>; - pub type DECIDERETH_FCircuit = DeciderEth< + pub type N = Nova, KZG<'static, Bn254>, Pedersen>; + pub type D = DeciderEth< G1, GVar, G2, @@ -80,30 +74,36 @@ fn main() { KZG<'static, Bn254>, Pedersen, Groth16, - NOVA, + N, >; + let poseidon_config = poseidon_canonical_config::(); + let mut rng = rand::rngs::OsRng; + + // prepare the Nova prover & verifier params + let nova_preprocess_params = PreprocessorParam::new(poseidon_config, f_circuit.clone()); + let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); + // initialize the folding scheme engine, in our case we use Nova - let mut nova = NOVA::init(&fs_prover_params, f_circuit.clone(), z_0).unwrap(); + let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); + + // prepare the Decider prover & verifier params + let (decider_pp, decider_vp) = D::preprocess(&mut rng, &nova_params, nova.clone()).unwrap(); + // run n steps of the folding iteration for (i, external_inputs_at_step) in external_inputs.iter().enumerate() { let start = Instant::now(); - nova.prove_step(external_inputs_at_step.clone()).unwrap(); + nova.prove_step(rng, external_inputs_at_step.clone(), None) + .unwrap(); println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } - let rng = rand::rngs::OsRng; let start = Instant::now(); - let proof = DECIDERETH_FCircuit::prove( - (g16_pk, fs_prover_params.cs_params.clone()), - rng, - nova.clone(), - ) - .unwrap(); + let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); println!("generated Decider proof: {:?}", start.elapsed()); - let verified = DECIDERETH_FCircuit::verify( - (g16_vk.clone(), kzg_vk.clone()), + let verified = D::verify( + decider_vp.clone(), nova.i, nova.z_0.clone(), nova.z_i.clone(), @@ -131,7 +131,7 @@ fn main() { .unwrap(); // prepare the setup params for the solidity verifier - let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from((g16_vk, kzg_vk, f_circuit.state_len())); + let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from((decider_vp, f_circuit.state_len())); // generate the solidity code let decider_solidity_code = get_decider_template_for_cyclefold_decider(nova_cyclefold_vk); diff --git a/examples/external_inputs.rs b/examples/external_inputs.rs index a923bf1d..4939b32b 100644 --- a/examples/external_inputs.rs +++ b/examples/external_inputs.rs @@ -21,12 +21,10 @@ use core::marker::PhantomData; use std::time::Instant; use folding_schemes::commitment::{kzg::KZG, pedersen::Pedersen}; -use folding_schemes::folding::nova::Nova; +use folding_schemes::folding::nova::{Nova, PreprocessorParam}; use folding_schemes::frontend::FCircuit; -use folding_schemes::{Error, FoldingScheme}; -mod utils; use folding_schemes::transcript::poseidon::poseidon_canonical_config; -use utils::init_nova_ivc_params; +use folding_schemes::{Error, FoldingScheme}; /// This is the circuit that we want to fold, it implements the FCircuit trait. The parameter z_i /// denotes the current state which contains 1 element, and z_{i+1} denotes the next state which we @@ -65,14 +63,14 @@ use utils::init_nova_ivc_params; /// The last state z_i is used together with the external input w_i as inputs to compute the new /// state z_{i+1}. #[derive(Clone, Debug)] -pub struct ExternalInputsCircuits +pub struct ExternalInputsCircuit where F: Absorb, { _f: PhantomData, poseidon_config: PoseidonConfig, } -impl FCircuit for ExternalInputsCircuits +impl FCircuit for ExternalInputsCircuit where F: Absorb, { @@ -128,14 +126,14 @@ pub mod tests { use ark_r1cs_std::R1CSVar; use ark_relations::r1cs::ConstraintSystem; - // test to check that the ExternalInputsCircuits computes the same values inside and outside the circuit + // test to check that the ExternalInputsCircuit computes the same values inside and outside the circuit #[test] fn test_f_circuit() { let poseidon_config = poseidon_canonical_config::(); let cs = ConstraintSystem::::new_ref(); - let circuit = ExternalInputsCircuits::::new(poseidon_config).unwrap(); + let circuit = ExternalInputsCircuit::::new(poseidon_config).unwrap(); let z_i = vec![Fr::from(1_u32)]; let external_inputs = vec![Fr::from(3_u32)]; @@ -170,33 +168,35 @@ fn main() { assert_eq!(external_inputs.len(), num_steps); let poseidon_config = poseidon_canonical_config::(); - let F_circuit = ExternalInputsCircuits::::new(poseidon_config).unwrap(); - - println!("Prepare Nova ProverParams & VerifierParams"); - let (prover_params, verifier_params, _) = - init_nova_ivc_params::>(F_circuit.clone()); + let F_circuit = ExternalInputsCircuit::::new(poseidon_config.clone()).unwrap(); /// The idea here is that eventually we could replace the next line chunk that defines the - /// `type NOVA = Nova<...>` by using another folding scheme that fulfills the `FoldingScheme` + /// `type N = Nova<...>` by using another folding scheme that fulfills the `FoldingScheme` /// trait, and the rest of our code would be working without needing to be updated. - type NOVA = Nova< + type N = Nova< Projective, GVar, Projective2, GVar2, - ExternalInputsCircuits, + ExternalInputsCircuit, KZG<'static, Bn254>, Pedersen, >; + let mut rng = rand::rngs::OsRng; + + println!("Prepare Nova's ProverParams & VerifierParams"); + let nova_preprocess_params = PreprocessorParam::new(poseidon_config, F_circuit.clone()); + let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); + println!("Initialize FoldingScheme"); - let mut folding_scheme = NOVA::init(&prover_params, F_circuit, initial_state.clone()).unwrap(); + let mut folding_scheme = N::init(&nova_params, F_circuit, initial_state.clone()).unwrap(); // compute a step of the IVC for (i, external_inputs_at_step) in external_inputs.iter().enumerate() { let start = Instant::now(); folding_scheme - .prove_step(external_inputs_at_step.clone()) + .prove_step(rng, external_inputs_at_step.clone(), None) .unwrap(); println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } @@ -209,8 +209,8 @@ fn main() { let (running_instance, incoming_instance, cyclefold_instance) = folding_scheme.instances(); println!("Run the Nova's IVC verifier"); - NOVA::verify( - verifier_params, + N::verify( + nova_params.1, initial_state.clone(), folding_scheme.state(), // latest state Fr::from(num_steps as u32), diff --git a/examples/full_flow.rs b/examples/full_flow.rs index 6cf154fc..6db67332 100644 --- a/examples/full_flow.rs +++ b/examples/full_flow.rs @@ -19,16 +19,14 @@ use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; use std::marker::PhantomData; use std::time::Instant; -mod utils; -use utils::init_ivc_and_decider_params; - use folding_schemes::{ commitment::{kzg::KZG, pedersen::Pedersen}, folding::nova::{ decider_eth::{prepare_calldata, Decider as DeciderEth}, - Nova, + Nova, PreprocessorParam, }, frontend::FCircuit, + transcript::poseidon::poseidon_canonical_config, Decider, Error, FoldingScheme, }; use solidity_verifiers::{ @@ -82,11 +80,9 @@ fn main() { let z_0 = vec![Fr::from(3_u32)]; let f_circuit = CubicFCircuit::::new(()).unwrap(); - let (fs_prover_params, kzg_vk, g16_pk, g16_vk) = - init_ivc_and_decider_params::>(f_circuit); - pub type NOVA = Nova, KZG<'static, Bn254>, Pedersen>; - pub type DECIDERETH_FCircuit = DeciderEth< + pub type N = Nova, KZG<'static, Bn254>, Pedersen>; + pub type D = DeciderEth< G1, GVar, G2, @@ -95,30 +91,35 @@ fn main() { KZG<'static, Bn254>, Pedersen, Groth16, - NOVA, + N, >; + let poseidon_config = poseidon_canonical_config::(); + let mut rng = rand::rngs::OsRng; + + // prepare the Nova prover & verifier params + let nova_preprocess_params = PreprocessorParam::new(poseidon_config.clone(), f_circuit); + let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); + // initialize the folding scheme engine, in our case we use Nova - let mut nova = NOVA::init(&fs_prover_params, f_circuit, z_0).unwrap(); + let mut nova = N::init(&nova_params, f_circuit, z_0).unwrap(); + + // prepare the Decider prover & verifier params + let (decider_pp, decider_vp) = D::preprocess(&mut rng, &nova_params, nova.clone()).unwrap(); + // run n steps of the folding iteration for i in 0..n_steps { let start = Instant::now(); - nova.prove_step(vec![]).unwrap(); + nova.prove_step(rng, vec![], None).unwrap(); println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } - let rng = rand::rngs::OsRng; let start = Instant::now(); - let proof = DECIDERETH_FCircuit::prove( - (g16_pk, fs_prover_params.cs_params.clone()), - rng, - nova.clone(), - ) - .unwrap(); + let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); println!("generated Decider proof: {:?}", start.elapsed()); - let verified = DECIDERETH_FCircuit::verify( - (g16_vk.clone(), kzg_vk.clone()), + let verified = D::verify( + decider_vp.clone(), nova.i, nova.z_0.clone(), nova.z_i.clone(), @@ -146,7 +147,7 @@ fn main() { .unwrap(); // prepare the setup params for the solidity verifier - let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from((g16_vk, kzg_vk, f_circuit.state_len())); + let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from((decider_vp, f_circuit.state_len())); // generate the solidity code let decider_solidity_code = get_decider_template_for_cyclefold_decider(nova_cyclefold_vk); diff --git a/examples/multi_inputs.rs b/examples/multi_inputs.rs index bb150829..5560e743 100644 --- a/examples/multi_inputs.rs +++ b/examples/multi_inputs.rs @@ -14,11 +14,10 @@ use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; use folding_schemes::commitment::{kzg::KZG, pedersen::Pedersen}; -use folding_schemes::folding::nova::Nova; +use folding_schemes::folding::nova::{Nova, PreprocessorParam}; use folding_schemes::frontend::FCircuit; +use folding_schemes::transcript::poseidon::poseidon_canonical_config; use folding_schemes::{Error, FoldingScheme}; -mod utils; -use utils::init_nova_ivc_params; /// This is the circuit that we want to fold, it implements the FCircuit trait. The parameter z_i /// denotes the current state which contains 5 elements, and z_{i+1} denotes the next state which @@ -124,14 +123,13 @@ fn main() { let F_circuit = MultiInputsFCircuit::::new(()).unwrap(); - println!("Prepare Nova ProverParams & VerifierParams"); - let (prover_params, verifier_params, _) = - init_nova_ivc_params::>(F_circuit); + let poseidon_config = poseidon_canonical_config::(); + let mut rng = rand::rngs::OsRng; /// The idea here is that eventually we could replace the next line chunk that defines the - /// `type NOVA = Nova<...>` by using another folding scheme that fulfills the `FoldingScheme` + /// `type N = Nova<...>` by using another folding scheme that fulfills the `FoldingScheme` /// trait, and the rest of our code would be working without needing to be updated. - type NOVA = Nova< + type N = Nova< Projective, GVar, Projective2, @@ -141,21 +139,25 @@ fn main() { Pedersen, >; + println!("Prepare Nova ProverParams & VerifierParams"); + let nova_preprocess_params = PreprocessorParam::new(poseidon_config, F_circuit); + let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); + println!("Initialize FoldingScheme"); - let mut folding_scheme = NOVA::init(&prover_params, F_circuit, initial_state.clone()).unwrap(); + let mut folding_scheme = N::init(&nova_params, F_circuit, initial_state.clone()).unwrap(); // compute a step of the IVC for i in 0..num_steps { let start = Instant::now(); - folding_scheme.prove_step(vec![]).unwrap(); + folding_scheme.prove_step(rng, vec![], None).unwrap(); println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } let (running_instance, incoming_instance, cyclefold_instance) = folding_scheme.instances(); println!("Run the Nova's IVC verifier"); - NOVA::verify( - verifier_params, + N::verify( + nova_params.1, initial_state.clone(), folding_scheme.state(), // latest state Fr::from(num_steps as u32), diff --git a/examples/multiple_inputs/bench_utils.rs b/examples/multiple_inputs/bench_utils.rs index 87951574..368f100d 100644 --- a/examples/multiple_inputs/bench_utils.rs +++ b/examples/multiple_inputs/bench_utils.rs @@ -3,10 +3,11 @@ use std::error::Error; use std::time::Duration; use ark_ff::{ PrimeField}; use csv::Writer; -use folding_schemes::ccs::r1cs::R1CS; +use folding_schemes::arith::r1cs; use folding_schemes::utils::vec::{dense_matrix_to_sparse, SparseMatrix}; use num_bigint::BigUint; use rand::Rng; +use folding_schemes::arith::r1cs::R1CS; fn create_large_diagonal_matrix(power: usize) -> SparseMatrix { let size = 1 << power; diff --git a/examples/multiple_inputs/hypernova.rs b/examples/multiple_inputs/hypernova.rs new file mode 100644 index 00000000..f0f7f86d --- /dev/null +++ b/examples/multiple_inputs/hypernova.rs @@ -0,0 +1,129 @@ +use crate::bench_utils::{get_test_r1cs, get_test_z, write_to_csv}; +use ark_ff::{ BigInteger, Field, PrimeField}; +use ark_pallas::{Fr, Projective}; +use ark_std::{log2, UniformRand}; +use folding_schemes::commitment::pedersen::Pedersen; +use folding_schemes::commitment::CommitmentScheme; +use folding_schemes::transcript::poseidon::{poseidon_canonical_config}; +use folding_schemes::transcript::Transcript; +use folding_schemes::utils::sum_check::{ SumCheck}; +use rand::Rng; +use std::mem::size_of_val; +use std::time::{Duration, Instant}; + +use std::error::Error; +use ark_crypto_primitives::sponge::CryptographicSponge; +use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; +use folding_schemes::arith::ccs::CCS; +use folding_schemes::arith::r1cs::R1CS; +use folding_schemes::folding::hypernova::nimfs::NIMFS; +use folding_schemes::utils::vec::{dense_matrix_to_sparse, SparseMatrix}; + +mod bench_utils; + +fn hypernova_benchmarks(power: usize, prove_times: &mut Vec) { + let size = 1 << power; + + // let r1cs = get_test_r1cs_2(); + let r1cs: R1CS = get_test_r1cs(power); + let mut rng = ark_std::test_rng(); + let ccs = CCS::::from_r1cs(r1cs); + let (pedersen_params, _) = + Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + // Generate a satisfying witness + let z_1 = get_test_z(power); + // let z_1 = get_test_z_2(3); + // Generate another satisfying witness + let z_2 = get_test_z(power); + // let z_2 = get_test_z_2(4); + + + let (running_instance, w1) = ccs + .to_lcccs::<_, _, Pedersen>(&mut rng, &pedersen_params, &z_1) + .unwrap(); + + let (new_instance, w2) = ccs + .to_cccs::<_, _, Pedersen>(&mut rng, &pedersen_params, &z_2) + .unwrap(); + + let poseidon_config = poseidon_canonical_config::(); + + let mut transcript_p: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); + transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init")); + + let start = Instant::now(); + + + let (proof, folded_lcccs, folded_witness, _) = + NIMFS::>::prove( + &mut transcript_p, + &ccs, + &[running_instance.clone()], + &[new_instance.clone()], + &[w1], + &[w2], + ) + .unwrap(); + + + let prove_time = start.elapsed(); + prove_times.push(prove_time); + println!( + "Mova prove time {:?}", + prove_time + ); + + + // let mut transcript_v: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); + // transcript_v.absorb(&Fr::from_le_bytes_mod_order(b"init init")); + // + // // Run the verifier side of the multifolding + // let folded_lcccs_v = NIMFS::>::verify( + // &mut transcript_v, + // &ccs, + // &[running_instance.clone()], + // &[new_instance.clone()], + // proof, + // ) + // .unwrap(); + // assert_eq!(folded_lcccs, folded_lcccs_v); + // + // // Check that the folded LCCCS instance is a valid instance with respect to the folded witness + // folded_lcccs.check_relation(&ccs, &folded_witness).unwrap(); + +} + + +fn main() { + println!("starting"); + + let pows: Vec = vec![16, 20]; + println!("{:?}", pows); + + let mut prove_times: Vec = Vec::with_capacity(pows.len()); + + for pow in &pows { + println!("{}", pow); + hypernova_benchmarks(*pow, &mut prove_times); + } + + println!("Powers {:?}", pows); + + println!("Prove times {:?}", prove_times); + + println!( + "| {0: <10} | {1: <10} |", + "2^pow", "prove time" + ); + println!("| {0: <10} | {1: <10} |", "2^pow", "prove time"); + for (pow, prove_time) in pows.iter().zip(prove_times.iter()) { + println!("| {0: <10} | {1:?} |", pow, prove_time); + } + + if let Err(e) = write_to_csv(&pows, &prove_times, String::from("hypernova_prove_times.csv")) { + eprintln!("Failed to write to CSV: {}", e); + } else { + println!("CSV file has been successfully written."); + } + +} \ No newline at end of file diff --git a/examples/multiple_inputs/mova.rs b/examples/multiple_inputs/mova.rs index 68231ca0..87ccc33c 100644 --- a/examples/multiple_inputs/mova.rs +++ b/examples/multiple_inputs/mova.rs @@ -1,15 +1,13 @@ use std::env; -use ark_ff::PrimeField; use ark_pallas::{Fr, Projective}; use ark_std::log2; use ark_std::UniformRand; -use folding_schemes::ccs::r1cs::R1CS; use folding_schemes::commitment::pedersen::Pedersen; use folding_schemes::commitment::CommitmentScheme; -use folding_schemes::folding::mova::homogenization::{Homogenization, PointVsLineHomogenization, SumCheckHomogenization}; +use folding_schemes::folding::mova::homogenization::{Homogenization, PointVsLineHomogenization}; use folding_schemes::folding::mova::nifs::NIFS; use folding_schemes::folding::mova::Witness; -use folding_schemes::transcript::poseidon::{poseidon_canonical_config, PoseidonTranscript}; +use folding_schemes::transcript::poseidon::{poseidon_canonical_config}; use folding_schemes::transcript::Transcript; use num_bigint::{ BigUint, RandBigInt}; use rand::Rng; @@ -19,14 +17,15 @@ use std::time::{Duration, Instant}; use crate::bench_utils::{get_test_r1cs, get_test_z, write_to_csv}; use ark_ff::BigInteger; use folding_schemes::folding::mova::traits::MovaR1CS; -use num_traits::{One, Zero}; use std::error::Error; -use csv::Writer; +use ark_crypto_primitives::sponge::CryptographicSponge; +use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; +use folding_schemes::arith::r1cs::R1CS; mod bench_utils; -fn mova_benchmark>>(power: usize, prove_times: &mut Vec) { +fn mova_benchmark(power: usize, prove_times: &mut Vec) { let size = 1 << power; @@ -35,7 +34,7 @@ fn mova_benchmark>> let mut rng = ark_std::test_rng(); let (pedersen_params, _) = Pedersen::::setup(&mut rng, r1cs.A.n_cols).unwrap(); let poseidon_config = poseidon_canonical_config::(); - let mut transcript_p = PoseidonTranscript::::new(&poseidon_config); + let mut transcript_p: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); // INSTANCE 1 let z_1: Vec = get_test_z(power); @@ -78,8 +77,8 @@ fn mova_benchmark>> let result = NIFS::< Projective, Pedersen, - PoseidonTranscript, - H + PoseidonSponge, + PointVsLineHomogenization> >::prove( &pedersen_params, &r1cs, @@ -100,17 +99,17 @@ fn mova_benchmark>> println!("Mova bytes used {:?}", size_of_val(&result)); //NIFS.V - let poseidon_config = poseidon_canonical_config::(); - let mut transcript_p = PoseidonTranscript::::new(&poseidon_config); + let mut transcript_v: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); + let (proof, instance_witness) = result; let folded_committed_instance = NIFS::< Projective, Pedersen, - PoseidonTranscript, - H + PoseidonSponge, + PointVsLineHomogenization> >::verify( - &mut transcript_p, + &mut transcript_v, &committed_instance_1, &committed_instance_2, &proof, @@ -126,34 +125,20 @@ fn mova_benchmark>> fn main() { - let args: Vec = env::args().collect(); - - let homogenization = &args[1]; - match homogenization.as_str() { - "sumcheck" | "point" => (), - _ => { - eprintln!("Expected 'sumcheck' or 'point' as an input argument"); - std::process::exit(1); - } - } + println!("starting"); - let pows: Vec = (10..24).collect(); + // let pows: Vec = (10..24).collect(); + let pows: Vec = vec![16, 20]; + println!("{:?}", pows); let mut prove_times: Vec = Vec::with_capacity(pows.len()); - if homogenization.as_str() == "sumcheck" { - for pow in &pows { - println!("{}", pow); - mova_benchmark::>>(*pow, &mut prove_times); - } - } else { - for pow in &pows { - println!("{}", pow); - mova_benchmark::>>(*pow, &mut prove_times); - } + for pow in &pows { + println!("{}", pow); + mova_benchmark(*pow, &mut prove_times); } println!("Powers {:?}", pows); @@ -164,7 +149,7 @@ fn main() { println!("| {0: <10} | {1:?} |", pow, prove_time); } - if let Err(e) = write_to_csv(&pows, &prove_times, format!("mova_{}_prove_times.csv", homogenization)) { + if let Err(e) = write_to_csv(&pows, &prove_times, format!("mova_prove_times.csv")) { eprintln!("Failed to write to CSV: {}", e); } else { println!("CSV file has been successfully written."); diff --git a/examples/multiple_inputs/mova_prove_times.csv b/examples/multiple_inputs/mova_prove_times.csv new file mode 100644 index 00000000..35480cfd --- /dev/null +++ b/examples/multiple_inputs/mova_prove_times.csv @@ -0,0 +1,3 @@ +pow,prove_time +16,47384 +20,806294 diff --git a/examples/multiple_inputs/nova.rs b/examples/multiple_inputs/nova.rs index 210a1dd3..630d5688 100644 --- a/examples/multiple_inputs/nova.rs +++ b/examples/multiple_inputs/nova.rs @@ -7,16 +7,17 @@ use folding_schemes::commitment::CommitmentScheme; use folding_schemes::folding::nova::nifs::NIFS; use folding_schemes::folding::nova::traits::NovaR1CS; use folding_schemes::folding::nova::Witness; -use folding_schemes::transcript::poseidon::{poseidon_canonical_config, PoseidonTranscript}; +use folding_schemes::transcript::poseidon::{poseidon_canonical_config}; use folding_schemes::transcript::Transcript; use folding_schemes::utils::sum_check::{ SumCheck}; -use num_traits::{One, Zero}; use rand::Rng; use std::mem::size_of_val; use std::time::{Duration, Instant}; use std::error::Error; -use csv::Writer; +use ark_crypto_primitives::sponge::CryptographicSponge; +use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; +use ark_ec::CurveGroup; mod bench_utils; @@ -64,19 +65,20 @@ fn nova_benchmark(power: usize, prove_times: &mut Vec) { &witness_2, &incoming_committed_instance, ) - .unwrap(); - - match transcript_p.absorb_point(&cmT) { - Ok(_) => { - // - } - Err(e) => { - println!("Absorbed failed: {:?}", e); - } - } + .unwrap(); + + let elapsed = start.elapsed(); + println!("Time before Randomness generation {:?}", elapsed); + transcript_p.absorb_nonnative(&cmT); let r = transcript_p.get_challenge(); - let result = NIFS::>::fold_instances( + let elapsed = start.elapsed(); + println!("Time aftre Randomness generation {:?}", elapsed); + + let elapsed = start.elapsed(); + println!("Time before starting folding {:?}", elapsed); + + let result = NIFS::>::fold_instances( r, &witness_1, &running_committed_instance, @@ -85,7 +87,10 @@ fn nova_benchmark(power: usize, prove_times: &mut Vec) { &T, cmT, ) - .unwrap(); + .unwrap(); + let elapsed = start.elapsed(); + + println!("Time after folding {:?}", elapsed); let prove_time = start.elapsed(); prove_times.push(prove_time); @@ -111,7 +116,8 @@ fn nova_benchmark(power: usize, prove_times: &mut Vec) { fn main() { println!("starting"); - let pows: Vec = (10..24).collect(); + // let pows: Vec = (10..24).collect(); + let pows: Vec = vec![16, 20]; println!("{:?}", pows); let mut prove_times: Vec = Vec::with_capacity(pows.len()); @@ -139,5 +145,5 @@ fn main() { } else { println!("CSV file has been successfully written."); } - + } diff --git a/examples/multiple_inputs/nova_prove_times.csv b/examples/multiple_inputs/nova_prove_times.csv new file mode 100644 index 00000000..7864a568 --- /dev/null +++ b/examples/multiple_inputs/nova_prove_times.csv @@ -0,0 +1,3 @@ +pow,prove_time +16,408332 +20,5748832 diff --git a/examples/noname_full_flow.rs b/examples/noname_full_flow.rs new file mode 100644 index 00000000..a0954751 --- /dev/null +++ b/examples/noname_full_flow.rs @@ -0,0 +1,158 @@ +#![allow(non_snake_case)] +#![allow(non_camel_case_types)] +#![allow(clippy::upper_case_acronyms)] +/// +/// This example performs the full flow: +/// - define the circuit to be folded +/// - fold the circuit with Nova+CycleFold's IVC +/// - generate a DeciderEthCircuit final proof +/// - generate the Solidity contract that verifies the proof +/// - verify the proof in the EVM +/// +use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as G1}; +use noname::backends::r1cs::R1csBn254Field; + +use ark_groth16::Groth16; +use ark_grumpkin::{constraints::GVar as GVar2, Projective as G2}; + +use folding_schemes::{ + commitment::{kzg::KZG, pedersen::Pedersen}, + folding::nova::{ + decider_eth::{prepare_calldata, Decider as DeciderEth}, + Nova, PreprocessorParam, + }, + frontend::{noname::NonameFCircuit, FCircuit}, + transcript::poseidon::poseidon_canonical_config, + Decider, FoldingScheme, +}; +use std::time::Instant; + +use solidity_verifiers::{ + evm::{compile_solidity, Evm}, + utils::get_function_selector_for_nova_cyclefold_verifier, + verifiers::nova_cyclefold::get_decider_template_for_cyclefold_decider, + NovaCycleFoldVerifierKey, +}; + +fn main() { + const NONAME_CIRCUIT_EXTERNAL_INPUTS: &str = + "fn main(pub ivc_inputs: [Field; 2], external_inputs: [Field; 2]) -> [Field; 2] { + let xx = external_inputs[0] + ivc_inputs[0]; + let yy = external_inputs[1] * ivc_inputs[1]; + assert_eq(yy, xx); + return [xx, yy]; +}"; + + // set the initial state + let z_0 = vec![Fr::from(2), Fr::from(5)]; + + // set the external inputs to be used at each step of the IVC, it has length of 10 since this + // is the number of steps that we will do + let external_inputs = vec![ + vec![Fr::from(8u32), Fr::from(2u32)], + vec![Fr::from(40), Fr::from(5)], + ]; + + // initialize the noname circuit + let f_circuit_params = (NONAME_CIRCUIT_EXTERNAL_INPUTS.to_owned(), 2, 2); + let f_circuit = NonameFCircuit::::new(f_circuit_params).unwrap(); + + pub type N = Nova< + G1, + GVar, + G2, + GVar2, + NonameFCircuit, + KZG<'static, Bn254>, + Pedersen, + >; + pub type D = DeciderEth< + G1, + GVar, + G2, + GVar2, + NonameFCircuit, + KZG<'static, Bn254>, + Pedersen, + Groth16, + N, + >; + + let poseidon_config = poseidon_canonical_config::(); + let mut rng = rand::rngs::OsRng; + + // prepare the Nova prover & verifier params + let nova_preprocess_params = PreprocessorParam::new(poseidon_config, f_circuit.clone()); + let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); + + // initialize the folding scheme engine, in our case we use Nova + let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); + + // prepare the Decider prover & verifier params + let (decider_pp, decider_vp) = D::preprocess(&mut rng, &nova_params, nova.clone()).unwrap(); + + // run n steps of the folding iteration + for (i, external_inputs_at_step) in external_inputs.iter().enumerate() { + let start = Instant::now(); + nova.prove_step(rng, external_inputs_at_step.clone(), None) + .unwrap(); + println!("Nova::prove_step {}: {:?}", i, start.elapsed()); + } + + let start = Instant::now(); + let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); + println!("generated Decider proof: {:?}", start.elapsed()); + + let verified = D::verify( + decider_vp.clone(), + nova.i, + nova.z_0.clone(), + nova.z_i.clone(), + &nova.U_i, + &nova.u_i, + &proof, + ) + .unwrap(); + assert!(verified); + println!("Decider proof verification: {}", verified); + + // Now, let's generate the Solidity code that verifies this Decider final proof + let function_selector = + get_function_selector_for_nova_cyclefold_verifier(nova.z_0.len() * 2 + 1); + + let calldata: Vec = prepare_calldata( + function_selector, + nova.i, + nova.z_0, + nova.z_i, + &nova.U_i, + &nova.u_i, + proof, + ) + .unwrap(); + + // prepare the setup params for the solidity verifier + let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from((decider_vp, f_circuit.state_len())); + + // generate the solidity code + let decider_solidity_code = get_decider_template_for_cyclefold_decider(nova_cyclefold_vk); + + // verify the proof against the solidity code in the EVM + let nova_cyclefold_verifier_bytecode = compile_solidity(&decider_solidity_code, "NovaDecider"); + let mut evm = Evm::default(); + let verifier_address = evm.create(nova_cyclefold_verifier_bytecode); + let (_, output) = evm.call(verifier_address, calldata.clone()); + assert_eq!(*output.last().unwrap(), 1); + + // save smart contract and the calldata + println!("storing nova-verifier.sol and the calldata into files"); + use std::fs; + fs::write( + "./examples/nova-verifier.sol", + decider_solidity_code.clone(), + ) + .unwrap(); + fs::write("./examples/solidity-calldata.calldata", calldata.clone()).unwrap(); + let s = solidity_verifiers::utils::get_formatted_calldata(calldata.clone()); + fs::write("./examples/solidity-calldata.inputs", s.join(",\n")).expect(""); +} diff --git a/examples/sha256.rs b/examples/sha256.rs index 714eb01e..eea18875 100644 --- a/examples/sha256.rs +++ b/examples/sha256.rs @@ -20,11 +20,10 @@ use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; use folding_schemes::commitment::{kzg::KZG, pedersen::Pedersen}; -use folding_schemes::folding::nova::Nova; +use folding_schemes::folding::nova::{Nova, PreprocessorParam}; use folding_schemes::frontend::FCircuit; +use folding_schemes::transcript::poseidon::poseidon_canonical_config; use folding_schemes::{Error, FoldingScheme}; -mod utils; -use utils::init_nova_ivc_params; /// This is the circuit that we want to fold, it implements the FCircuit trait. /// The parameter z_i denotes the current state, and z_{i+1} denotes the next state which we get by @@ -109,13 +108,10 @@ fn main() { let F_circuit = Sha256FCircuit::::new(()).unwrap(); - println!("Prepare Nova ProverParams & VerifierParams"); - let (prover_params, verifier_params, _) = init_nova_ivc_params::>(F_circuit); - /// The idea here is that eventually we could replace the next line chunk that defines the - /// `type NOVA = Nova<...>` by using another folding scheme that fulfills the `FoldingScheme` + /// `type N = Nova<...>` by using another folding scheme that fulfills the `FoldingScheme` /// trait, and the rest of our code would be working without needing to be updated. - type NOVA = Nova< + type N = Nova< Projective, GVar, Projective2, @@ -125,21 +121,27 @@ fn main() { Pedersen, >; - println!("Initialize FoldingScheme"); - let mut folding_scheme = NOVA::init(&prover_params, F_circuit, initial_state.clone()).unwrap(); + let poseidon_config = poseidon_canonical_config::(); + let mut rng = rand::rngs::OsRng; + println!("Prepare Nova ProverParams & VerifierParams"); + let nova_preprocess_params = PreprocessorParam::new(poseidon_config, F_circuit); + let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); + + println!("Initialize FoldingScheme"); + let mut folding_scheme = N::init(&nova_params, F_circuit, initial_state.clone()).unwrap(); // compute a step of the IVC for i in 0..num_steps { let start = Instant::now(); - folding_scheme.prove_step(vec![]).unwrap(); + folding_scheme.prove_step(rng, vec![], None).unwrap(); println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } let (running_instance, incoming_instance, cyclefold_instance) = folding_scheme.instances(); println!("Run the Nova's IVC verifier"); - NOVA::verify( - verifier_params, + N::verify( + nova_params.1, initial_state, folding_scheme.state(), // latest state Fr::from(num_steps as u32), diff --git a/folding-schemes/Cargo.toml b/folding-schemes/Cargo.toml index 77d65c45..3e9ca003 100644 --- a/folding-schemes/Cargo.toml +++ b/folding-schemes/Cargo.toml @@ -23,11 +23,15 @@ num-integer = "0.1" color-eyre = "=0.6.2" ark-bn254 = {version="0.4.0"} ark-groth16 = { version = "^0.4.0" } -csv = "1.1" +sha3 = "0.10" +ark-noname = { git = "https://github.com/dmpierre/ark-noname", branch="feat/sonobe-integration" } +noname = { git = "https://github.com/dmpierre/noname" } +serde_json = "1.0.85" # to (de)serialize JSON +serde = "1.0.203" -# tmp imports for espresso's sumcheck +# tmp import for espresso's sumcheck espresso_subroutines = {git="https://github.com/EspressoSystems/hyperplonk", package="subroutines"} -num-traits = "0.2.15" +csv = "1.3.0" [dev-dependencies] ark-pallas = {version="0.4.0", features=["r1cs"]} @@ -38,19 +42,18 @@ rand = "0.8.5" tracing = { version = "0.1", default-features = false, features = [ "attributes" ] } tracing-subscriber = { version = "0.2" } - [features] default = ["parallel"] light-test = [] -parallel = [ - "ark-std/parallel", - "ark-ff/parallel", - "ark-ec/parallel", - "ark-poly/parallel", - "ark-crypto-primitives/parallel", - "ark-r1cs-std/parallel", -] +parallel = [ + "ark-std/parallel", + "ark-ff/parallel", + "ark-ec/parallel", + "ark-poly/parallel", + "ark-crypto-primitives/parallel", + "ark-r1cs-std/parallel", + ] [[example]] @@ -66,17 +69,13 @@ name = "external_inputs" path = "../examples/external_inputs.rs" [[example]] -name = "mova" -path = "../examples/mova.rs" +name = "hp" +path = "../examples/multiple_inputs/hypernova.rs" [[example]] -name = "nova" -path = "../examples/nova.rs" +name = "nova-multi-inputs" +path = "../examples/multiple_inputs/nova.rs" [[example]] name = "mova-multi-inputs" path = "../examples/multiple_inputs/mova.rs" - -[[example]] -name = "nova-multi-inputs" -path = "../examples/multiple_inputs/nova.rs" diff --git a/folding-schemes/src/arith/ccs.rs b/folding-schemes/src/arith/ccs.rs new file mode 100644 index 00000000..3dd6b877 --- /dev/null +++ b/folding-schemes/src/arith/ccs.rs @@ -0,0 +1,135 @@ +use ark_ff::PrimeField; +use ark_std::log2; + +use crate::utils::vec::{hadamard, mat_vec_mul, vec_add, vec_scalar_mul, SparseMatrix}; +use crate::Error; + +use super::{r1cs::R1CS, Arith}; + +/// CCS represents the Customizable Constraint Systems structure defined in +/// the [CCS paper](https://eprint.iacr.org/2023/552) +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct CCS { + /// m: number of rows in M_i (such that M_i \in F^{m, n}) + pub m: usize, + /// n = |z|, number of cols in M_i + pub n: usize, + /// l = |io|, size of public input/output + pub l: usize, + /// t = |M|, number of matrices + pub t: usize, + /// q = |c| = |S|, number of multisets + pub q: usize, + /// d: max degree in each variable + pub d: usize, + /// s = log(m), dimension of x + pub s: usize, + /// s_prime = log(n), dimension of y + pub s_prime: usize, + + /// vector of matrices + pub M: Vec>, + /// vector of multisets + pub S: Vec>, + /// vector of coefficients + pub c: Vec, +} + +impl Arith for CCS { + /// check that a CCS structure is satisfied by a z vector. Only for testing. + fn check_relation(&self, z: &[F]) -> Result<(), Error> { + let mut result = vec![F::zero(); self.m]; + + for i in 0..self.q { + // extract the needed M_j matrices out of S_i + let vec_M_j: Vec<&SparseMatrix> = self.S[i].iter().map(|j| &self.M[*j]).collect(); + + // complete the hadamard chain + let mut hadamard_result = vec![F::one(); self.m]; + for M_j in vec_M_j.into_iter() { + hadamard_result = hadamard(&hadamard_result, &mat_vec_mul(M_j, z)?)?; + } + + // multiply by the coefficient of this step + let c_M_j_z = vec_scalar_mul(&hadamard_result, &self.c[i]); + + // add it to the final vector + result = vec_add(&result, &c_M_j_z)?; + } + + // make sure the final vector is all zeroes + for e in result { + if !e.is_zero() { + return Err(Error::NotSatisfied); + } + } + + Ok(()) + } + + fn params_to_le_bytes(&self) -> Vec { + [ + self.l.to_le_bytes(), + self.m.to_le_bytes(), + self.n.to_le_bytes(), + self.t.to_le_bytes(), + self.q.to_le_bytes(), + self.d.to_le_bytes(), + ] + .concat() + } +} + +impl CCS { + pub fn from_r1cs(r1cs: R1CS) -> Self { + let m = r1cs.A.n_rows; + let n = r1cs.A.n_cols; + CCS { + m, + n, + l: r1cs.l, + s: log2(m) as usize, + s_prime: log2(n) as usize, + t: 3, + q: 2, + d: 2, + + S: vec![vec![0, 1], vec![2]], + c: vec![F::one(), F::one().neg()], + M: vec![r1cs.A, r1cs.B, r1cs.C], + } + } + + pub fn to_r1cs(self) -> R1CS { + R1CS:: { + l: self.l, + A: self.M[0].clone(), + B: self.M[1].clone(), + C: self.M[2].clone(), + } + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::arith::r1cs::tests::{get_test_r1cs, get_test_z as r1cs_get_test_z}; + use ark_pallas::Fr; + + pub fn get_test_ccs() -> CCS { + let r1cs = get_test_r1cs::(); + CCS::::from_r1cs(r1cs) + } + pub fn get_test_z(input: usize) -> Vec { + r1cs_get_test_z(input) + } + + /// Test that a basic CCS relation can be satisfied + #[test] + fn test_ccs_relation() { + let ccs = get_test_ccs::(); + let z = get_test_z(3); + + ccs.check_relation(&z).unwrap(); + } +} diff --git a/folding-schemes/src/arith/mod.rs b/folding-schemes/src/arith/mod.rs new file mode 100644 index 00000000..e09746d3 --- /dev/null +++ b/folding-schemes/src/arith/mod.rs @@ -0,0 +1,15 @@ +use ark_ff::PrimeField; + +use crate::Error; + +pub mod ccs; +pub mod r1cs; + +pub trait Arith { + /// Checks that the given Arith structure is satisfied by a z vector. Used only for testing. + fn check_relation(&self, z: &[F]) -> Result<(), Error>; + + /// Returns the bytes that represent the parameters, that is, the matrices sizes, the amount of + /// public inputs, etc, without the matrices/polynomials values. + fn params_to_le_bytes(&self) -> Vec; +} diff --git a/folding-schemes/src/arith/r1cs.rs b/folding-schemes/src/arith/r1cs.rs new file mode 100644 index 00000000..f510b67d --- /dev/null +++ b/folding-schemes/src/arith/r1cs.rs @@ -0,0 +1,205 @@ +use ark_ff::PrimeField; +use ark_relations::r1cs::ConstraintSystem; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::rand::Rng; + +use super::Arith; +use crate::utils::vec::{hadamard, mat_vec_mul, vec_add, vec_scalar_mul, SparseMatrix}; +use crate::Error; + +#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub struct R1CS { + pub l: usize, // io len + pub A: SparseMatrix, + pub B: SparseMatrix, + pub C: SparseMatrix, +} + +impl Arith for R1CS { + /// check that a R1CS structure is satisfied by a z vector. Only for testing. + fn check_relation(&self, z: &[F]) -> Result<(), Error> { + let Az = mat_vec_mul(&self.A, z)?; + let Bz = mat_vec_mul(&self.B, z)?; + let Cz = mat_vec_mul(&self.C, z)?; + let AzBz = hadamard(&Az, &Bz)?; + if AzBz != Cz { + return Err(Error::NotSatisfied); + } + Ok(()) + } + + fn params_to_le_bytes(&self) -> Vec { + [ + self.l.to_le_bytes(), + self.A.n_rows.to_le_bytes(), + self.A.n_cols.to_le_bytes(), + ] + .concat() + } +} + +impl R1CS { + pub fn rand(rng: &mut R, n_rows: usize, n_cols: usize) -> Self { + Self { + l: 1, + A: SparseMatrix::rand(rng, n_rows, n_cols), + B: SparseMatrix::rand(rng, n_rows, n_cols), + C: SparseMatrix::rand(rng, n_rows, n_cols), + } + } + + /// returns a tuple containing (w, x) (witness and public inputs respectively) + pub fn split_z(&self, z: &[F]) -> (Vec, Vec) { + (z[self.l + 1..].to_vec(), z[1..self.l + 1].to_vec()) + } + + /// converts the R1CS instance into a RelaxedR1CS as described in + /// [Nova](https://eprint.iacr.org/2021/370.pdf) section 4.1. + pub fn relax(self) -> RelaxedR1CS { + RelaxedR1CS:: { + l: self.l, + E: vec![F::zero(); self.A.n_rows], + A: self.A, + B: self.B, + C: self.C, + u: F::one(), + } + } +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct RelaxedR1CS { + pub l: usize, // io len + pub A: SparseMatrix, + pub B: SparseMatrix, + pub C: SparseMatrix, + pub u: F, + pub E: Vec, +} + +impl RelaxedR1CS { + /// check that a RelaxedR1CS structure is satisfied by a z vector. Only for testing. + pub fn check_relation(&self, z: &[F]) -> Result<(), Error> { + let Az = mat_vec_mul(&self.A, z)?; + let Bz = mat_vec_mul(&self.B, z)?; + let Cz = mat_vec_mul(&self.C, z)?; + let uCz = vec_scalar_mul(&Cz, &self.u); + let uCzE = vec_add(&uCz, &self.E)?; + let AzBz = hadamard(&Az, &Bz)?; + if AzBz != uCzE { + return Err(Error::NotSatisfied); + } + + Ok(()) + } +} + +/// extracts arkworks ConstraintSystem matrices into crate::utils::vec::SparseMatrix format as R1CS +/// struct. +pub fn extract_r1cs(cs: &ConstraintSystem) -> R1CS { + let m = cs.to_matrices().unwrap(); + + let n_rows = cs.num_constraints; + let n_cols = cs.num_instance_variables + cs.num_witness_variables; // cs.num_instance_variables already counts the 1 + + let A = SparseMatrix:: { + n_rows, + n_cols, + coeffs: m.a, + }; + let B = SparseMatrix:: { + n_rows, + n_cols, + coeffs: m.b, + }; + let C = SparseMatrix:: { + n_rows, + n_cols, + coeffs: m.c, + }; + + R1CS:: { + l: cs.num_instance_variables - 1, // -1 to subtract the first '1' + A, + B, + C, + } +} + +/// extracts the witness and the public inputs from arkworks ConstraintSystem. +pub fn extract_w_x(cs: &ConstraintSystem) -> (Vec, Vec) { + ( + cs.witness_assignment.clone(), + // skip the first element which is '1' + cs.instance_assignment[1..].to_vec(), + ) +} + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::utils::vec::tests::{to_F_matrix, to_F_vec}; + + use ark_pallas::Fr; + + pub fn get_test_r1cs() -> R1CS { + // R1CS for: x^3 + x + 5 = y (example from article + // https://www.vitalik.ca/general/2016/12/10/qap.html ) + let A = to_F_matrix::(vec![ + vec![0, 1, 0, 0, 0, 0], + vec![0, 0, 0, 1, 0, 0], + vec![0, 1, 0, 0, 1, 0], + vec![5, 0, 0, 0, 0, 1], + ]); + let B = to_F_matrix::(vec![ + vec![0, 1, 0, 0, 0, 0], + vec![0, 1, 0, 0, 0, 0], + vec![1, 0, 0, 0, 0, 0], + vec![1, 0, 0, 0, 0, 0], + ]); + let C = to_F_matrix::(vec![ + vec![0, 0, 0, 1, 0, 0], + vec![0, 0, 0, 0, 1, 0], + vec![0, 0, 0, 0, 0, 1], + vec![0, 0, 1, 0, 0, 0], + ]); + + R1CS:: { l: 1, A, B, C } + } + + pub fn get_test_z(input: usize) -> Vec { + // z = (1, io, w) + to_F_vec(vec![ + 1, + input, // io + input * input * input + input + 5, // x^3 + x + 5 + input * input, // x^2 + input * input * input, // x^2 * x + input * input * input + input, // x^3 + x + ]) + } + + pub fn get_test_z_split(input: usize) -> (F, Vec, Vec) { + // z = (1, io, w) + ( + F::one(), + to_F_vec(vec![ + input, // io + ]), + to_F_vec(vec![ + input * input * input + input + 5, // x^3 + x + 5 + input * input, // x^2 + input * input * input, // x^2 * x + input * input * input + input, // x^3 + x + ]), + ) + } + + #[test] + fn test_check_relation() { + let r1cs = get_test_r1cs::(); + let z = get_test_z(5); + r1cs.check_relation(&z).unwrap(); + r1cs.relax().check_relation(&z).unwrap(); + } +} diff --git a/folding-schemes/src/commitment/ipa.rs b/folding-schemes/src/commitment/ipa.rs index 18060ff6..46078347 100644 --- a/folding-schemes/src/commitment/ipa.rs +++ b/folding-schemes/src/commitment/ipa.rs @@ -54,6 +54,13 @@ impl CommitmentScheme for IPA { type ProverChallenge = (C::ScalarField, C, Vec); type Challenge = (C::ScalarField, C, Vec); + fn is_hiding() -> bool { + if H { + return true; + } + false + } + fn setup( mut rng: impl RngCore, len: usize, @@ -90,7 +97,7 @@ impl CommitmentScheme for IPA { fn prove( params: &Self::ProverParams, - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, P: &C, // commitment a: &[C::ScalarField], // vector blind: &C::ScalarField, @@ -124,7 +131,7 @@ impl CommitmentScheme for IPA { r = vec![]; } - transcript.absorb_point(P)?; + transcript.absorb_nonnative(P); let x = transcript.get_challenge(); // challenge value at which we evaluate let s = transcript.get_challenge(); let U = C::generator().mul(s); @@ -155,8 +162,8 @@ impl CommitmentScheme for IPA { R[j] = C::msm_unchecked(&G[..m], &a[m..]) + U.mul(inner_prod(&a[m..], &b[..m])?); } // get challenge for the j-th round - transcript.absorb_point(&L[j])?; - transcript.absorb_point(&R[j])?; + transcript.absorb_nonnative(&L[j]); + transcript.absorb_nonnative(&R[j]); u[j] = transcript.get_challenge(); let uj = u[j]; @@ -218,21 +225,21 @@ impl CommitmentScheme for IPA { fn verify( params: &Self::VerifierParams, - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, P: &C, // commitment proof: &Self::Proof, ) -> Result<(), Error> { let (p, _r) = (proof.0.clone(), proof.1); let k = p.L.len(); - transcript.absorb_point(P)?; + transcript.absorb_nonnative(P); let x = transcript.get_challenge(); // challenge value at which we evaluate let s = transcript.get_challenge(); let U = C::generator().mul(s); let mut u: Vec = vec![C::ScalarField::zero(); k]; for i in (0..k).rev() { - transcript.absorb_point(&p.L[i])?; - transcript.absorb_point(&p.R[i])?; + transcript.absorb_nonnative(&p.L[i]); + transcript.absorb_nonnative(&p.R[i]); u[i] = transcript.get_challenge(); } let challenge = (x, U, u); @@ -559,15 +566,15 @@ where #[cfg(test)] mod tests { + use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, CryptographicSponge}; use ark_ec::Group; use ark_pallas::{constraints::GVar, Fq, Fr, Projective}; - use ark_r1cs_std::{alloc::AllocVar, bits::boolean::Boolean, eq::EqGadget}; + use ark_r1cs_std::eq::EqGadget; use ark_relations::r1cs::ConstraintSystem; - use ark_std::UniformRand; use std::ops::Mul; use super::*; - use crate::transcript::poseidon::{poseidon_canonical_config, PoseidonTranscript}; + use crate::transcript::poseidon::poseidon_canonical_config; #[test] fn test_ipa() { @@ -585,9 +592,9 @@ mod tests { let poseidon_config = poseidon_canonical_config::(); // init Prover's transcript - let mut transcript_p = PoseidonTranscript::::new(&poseidon_config); + let mut transcript_p = PoseidonSponge::::new(&poseidon_config); // init Verifier's transcript - let mut transcript_v = PoseidonTranscript::::new(&poseidon_config); + let mut transcript_v = PoseidonSponge::::new(&poseidon_config); // a is the vector that we're committing let a: Vec = std::iter::repeat_with(|| Fr::rand(&mut rng)) @@ -629,9 +636,9 @@ mod tests { let poseidon_config = poseidon_canonical_config::(); // init Prover's transcript - let mut transcript_p = PoseidonTranscript::::new(&poseidon_config); + let mut transcript_p = PoseidonSponge::::new(&poseidon_config); // init Verifier's transcript - let mut transcript_v = PoseidonTranscript::::new(&poseidon_config); + let mut transcript_v = PoseidonSponge::::new(&poseidon_config); let mut a: Vec = std::iter::repeat_with(|| Fr::rand(&mut rng)) .take(d / 2) @@ -659,15 +666,15 @@ mod tests { // circuit let cs = ConstraintSystem::::new_ref(); - let mut transcript_v = PoseidonTranscript::::new(&poseidon_config); - transcript_v.absorb_point(&cm).unwrap(); + let mut transcript_v = PoseidonSponge::::new(&poseidon_config); + transcript_v.absorb_nonnative(&cm); let challenge = transcript_v.get_challenge(); // challenge value at which we evaluate let s = transcript_v.get_challenge(); let U = Projective::generator().mul(s); let mut u: Vec = vec![Fr::zero(); k]; for i in (0..k).rev() { - transcript_v.absorb_point(&proof.0.L[i]).unwrap(); - transcript_v.absorb_point(&proof.0.R[i]).unwrap(); + transcript_v.absorb_nonnative(&proof.0.L[i]); + transcript_v.absorb_nonnative(&proof.0.R[i]); u[i] = transcript_v.get_challenge(); } diff --git a/folding-schemes/src/commitment/kzg.rs b/folding-schemes/src/commitment/kzg.rs index b8254b2f..d0344286 100644 --- a/folding-schemes/src/commitment/kzg.rs +++ b/folding-schemes/src/commitment/kzg.rs @@ -93,6 +93,13 @@ where type ProverChallenge = E::ScalarField; type Challenge = E::ScalarField; + fn is_hiding() -> bool { + if H { + return true; + } + false + } + /// setup returns the tuple (ProverKey, VerifierKey). For real world deployments the setup must /// be computed in the most trustless way possible, usually through a MPC ceremony. fn setup( @@ -149,13 +156,13 @@ where /// the Pairing trait. fn prove( params: &Self::ProverParams, - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, cm: &E::G1, v: &[E::ScalarField], _blind: &E::ScalarField, _rng: Option<&mut dyn RngCore>, ) -> Result { - transcript.absorb_point(cm)?; + transcript.absorb_nonnative(cm); let challenge = transcript.get_challenge(); Self::prove_with_challenge(params, challenge, v, _blind, _rng) } @@ -207,11 +214,11 @@ where fn verify( params: &Self::VerifierParams, - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, cm: &E::G1, proof: &Self::Proof, ) -> Result<(), Error> { - transcript.absorb_point(cm)?; + transcript.absorb_nonnative(cm); let challenge = transcript.get_challenge(); Self::verify_with_challenge(params, challenge, cm, proof) } @@ -279,17 +286,18 @@ fn convert_to_bigints(p: &[F]) -> Vec { #[cfg(test)] mod tests { use ark_bn254::{Bn254, Fr, G1Projective as G1}; + use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, CryptographicSponge}; use ark_std::{test_rng, UniformRand}; use super::*; - use crate::transcript::poseidon::{poseidon_canonical_config, PoseidonTranscript}; + use crate::transcript::poseidon::poseidon_canonical_config; #[test] fn test_kzg_commitment_scheme() { let mut rng = &mut test_rng(); let poseidon_config = poseidon_canonical_config::(); - let transcript_p = &mut PoseidonTranscript::::new(&poseidon_config); - let transcript_v = &mut PoseidonTranscript::::new(&poseidon_config); + let transcript_p = &mut PoseidonSponge::::new(&poseidon_config); + let transcript_v = &mut PoseidonSponge::::new(&poseidon_config); let n = 10; let (pk, vk): (ProverKey, VerifierKey) = diff --git a/folding-schemes/src/commitment/mod.rs b/folding-schemes/src/commitment/mod.rs index 7b1add79..58ecadd9 100644 --- a/folding-schemes/src/commitment/mod.rs +++ b/folding-schemes/src/commitment/mod.rs @@ -1,4 +1,5 @@ use ark_ec::CurveGroup; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::fmt::Debug; use ark_std::rand::RngCore; @@ -13,11 +14,13 @@ pub mod pedersen; /// commitment in hiding mode or not. pub trait CommitmentScheme: Clone + Debug { type ProverParams: Clone + Debug; - type VerifierParams: Clone + Debug; + type VerifierParams: Clone + Debug + CanonicalSerialize + CanonicalDeserialize; type Proof: Clone + Debug; type ProverChallenge: Clone + Debug; type Challenge: Clone + Debug; + fn is_hiding() -> bool; + fn setup( rng: impl RngCore, len: usize, @@ -31,7 +34,7 @@ pub trait CommitmentScheme: Clone + Debug fn prove( params: &Self::ProverParams, - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, cm: &C, v: &[C::ScalarField], blind: &C::ScalarField, @@ -50,7 +53,7 @@ pub trait CommitmentScheme: Clone + Debug fn verify( params: &Self::VerifierParams, - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, cm: &C, proof: &Self::Proof, ) -> Result<(), Error>; @@ -69,7 +72,10 @@ pub trait CommitmentScheme: Clone + Debug mod tests { use super::*; use ark_bn254::{Bn254, Fr, G1Projective as G1}; - use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb}; + use ark_crypto_primitives::sponge::{ + poseidon::{PoseidonConfig, PoseidonSponge}, + Absorb, CryptographicSponge, + }; use ark_poly_commit::kzg10::VerifierKey; use ark_std::Zero; use ark_std::{test_rng, UniformRand}; @@ -77,10 +83,7 @@ mod tests { use super::ipa::IPA; use super::kzg::{ProverKey, KZG}; use super::pedersen::Pedersen; - use crate::transcript::{ - poseidon::{poseidon_canonical_config, PoseidonTranscript}, - Transcript, - }; + use crate::transcript::poseidon::poseidon_canonical_config; #[test] fn test_homomorphic_property_using_Commitment_trait() { @@ -150,7 +153,7 @@ mod tests { let v_3: Vec = v_1.iter().zip(v_2).map(|(a, b)| *a + (r * b)).collect(); // compute the proof of the cm_3 - let transcript_p = &mut PoseidonTranscript::::new(poseidon_config); + let transcript_p = &mut PoseidonSponge::::new(poseidon_config); let proof = CS::prove( prover_params, transcript_p, @@ -162,7 +165,7 @@ mod tests { .unwrap(); // verify the opening proof - let transcript_v = &mut PoseidonTranscript::::new(poseidon_config); + let transcript_v = &mut PoseidonSponge::::new(poseidon_config); CS::verify(verifier_params, transcript_v, &cm_3, &proof).unwrap(); } } diff --git a/folding-schemes/src/commitment/pedersen.rs b/folding-schemes/src/commitment/pedersen.rs index 82d07a07..4d225ce9 100644 --- a/folding-schemes/src/commitment/pedersen.rs +++ b/folding-schemes/src/commitment/pedersen.rs @@ -38,6 +38,13 @@ impl CommitmentScheme for Pedersen { type ProverChallenge = (C::ScalarField, Vec, C, C::ScalarField); type Challenge = C::ScalarField; + fn is_hiding() -> bool { + if H { + return true; + } + false + } + fn setup( mut rng: impl RngCore, len: usize, @@ -74,13 +81,13 @@ impl CommitmentScheme for Pedersen { fn prove( params: &Self::ProverParams, - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, cm: &C, v: &[C::ScalarField], r: &C::ScalarField, // blinding factor _rng: Option<&mut dyn RngCore>, ) -> Result { - transcript.absorb_point(cm)?; + transcript.absorb_nonnative(cm); let r1 = transcript.get_challenge(); let d = transcript.get_challenges(v.len()); @@ -91,7 +98,7 @@ impl CommitmentScheme for Pedersen { R += params.h.mul(r1); } - transcript.absorb_point(&R)?; + transcript.absorb_nonnative(&R); let e = transcript.get_challenge(); let challenge = (r1, d, R, e); @@ -126,14 +133,14 @@ impl CommitmentScheme for Pedersen { fn verify( params: &Self::VerifierParams, - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, cm: &C, proof: &Proof, ) -> Result<(), Error> { - transcript.absorb_point(cm)?; + transcript.absorb_nonnative(cm); transcript.get_challenge(); // r_1 transcript.get_challenges(proof.u.len()); // d - transcript.absorb_point(&proof.R)?; + transcript.absorb_nonnative(&proof.R); let e = transcript.get_challenge(); Self::verify_with_challenge(params, e, cm, proof) } @@ -210,14 +217,14 @@ where #[cfg(test)] mod tests { + use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, CryptographicSponge}; use ark_ff::{BigInteger, PrimeField}; use ark_pallas::{constraints::GVar, Fq, Fr, Projective}; use ark_r1cs_std::{alloc::AllocVar, eq::EqGadget}; use ark_relations::r1cs::ConstraintSystem; - use ark_std::UniformRand; use super::*; - use crate::transcript::poseidon::{poseidon_canonical_config, PoseidonTranscript}; + use crate::transcript::poseidon::poseidon_canonical_config; #[test] fn test_pedersen() { @@ -233,9 +240,9 @@ mod tests { let poseidon_config = poseidon_canonical_config::(); // init Prover's transcript - let mut transcript_p = PoseidonTranscript::::new(&poseidon_config); + let mut transcript_p = PoseidonSponge::::new(&poseidon_config); // init Verifier's transcript - let mut transcript_v = PoseidonTranscript::::new(&poseidon_config); + let mut transcript_v = PoseidonSponge::::new(&poseidon_config); let v: Vec = std::iter::repeat_with(|| Fr::rand(&mut rng)) .take(n) diff --git a/folding-schemes/src/folding/circuits/cyclefold.rs b/folding-schemes/src/folding/circuits/cyclefold.rs new file mode 100644 index 00000000..4a22eb28 --- /dev/null +++ b/folding-schemes/src/folding/circuits/cyclefold.rs @@ -0,0 +1,686 @@ +/// Contains [CycleFold](https://eprint.iacr.org/2023/1192.pdf) related circuits and functions that +/// are shared across the different folding schemes +use ark_crypto_primitives::sponge::{Absorb, CryptographicSponge}; +use ark_ec::{CurveGroup, Group}; +use ark_ff::{BigInteger, PrimeField}; +use ark_r1cs_std::{ + alloc::{AllocVar, AllocationMode}, + boolean::Boolean, + eq::EqGadget, + fields::fp::FpVar, + groups::GroupOpsBounds, + prelude::CurveVar, + ToConstraintFieldGadget, +}; +use ark_relations::r1cs::{ + ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef, Namespace, SynthesisError, +}; +use ark_std::fmt::Debug; +use ark_std::Zero; +use core::{borrow::Borrow, marker::PhantomData}; + +use super::{nonnative::uint::NonNativeUintVar, CF2}; +use crate::arith::r1cs::{extract_w_x, R1CS}; +use crate::commitment::CommitmentScheme; +use crate::constants::N_BITS_RO; +use crate::folding::nova::{nifs::NIFS, CommittedInstance, Witness}; +use crate::frontend::FCircuit; +use crate::transcript::{AbsorbNonNativeGadget, Transcript, TranscriptVar}; +use crate::Error; + +/// Public inputs length for the CycleFoldCircuit: +/// For Nova this is: |[r, p1.x,y, p2.x,y, p3.x,y]| +/// In general, |[r * (n_points-1), (p_i.x,y)*n_points, p_folded.x,y]|, thus, io len is: +/// (n_points-1) + 2*n_points + 2 +pub fn cf_io_len(n_points: usize) -> usize { + (n_points - 1) + 2 * n_points + 2 +} + +/// CycleFoldCommittedInstanceVar is the CycleFold CommittedInstance representation in the Nova +/// circuit. +#[derive(Debug, Clone)] +pub struct CycleFoldCommittedInstanceVar>> +where + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + pub cmE: GC, + pub u: NonNativeUintVar>, + pub cmW: GC, + pub x: Vec>>, +} +impl AllocVar, CF2> for CycleFoldCommittedInstanceVar +where + C: CurveGroup, + GC: CurveVar>, + ::BaseField: ark_ff::PrimeField, + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + fn new_variable>>( + cs: impl Into>>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + f().and_then(|val| { + let cs = cs.into(); + + let cmE = GC::new_variable(cs.clone(), || Ok(val.borrow().cmE), mode)?; + let cmW = GC::new_variable(cs.clone(), || Ok(val.borrow().cmW), mode)?; + let u = NonNativeUintVar::new_variable(cs.clone(), || Ok(val.borrow().u), mode)?; + let x = Vec::new_variable(cs.clone(), || Ok(val.borrow().x.clone()), mode)?; + + Ok(Self { cmE, u, cmW, x }) + }) + } +} + +impl AbsorbNonNativeGadget for CycleFoldCommittedInstanceVar +where + C: CurveGroup, + GC: CurveVar> + ToConstraintFieldGadget>, + ::BaseField: ark_ff::PrimeField + Absorb, + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + /// Extracts the underlying field elements from `CycleFoldCommittedInstanceVar`, in the order + /// of `u`, `x`, `cmE.x`, `cmE.y`, `cmW.x`, `cmW.y`, `cmE.is_inf || cmW.is_inf` (|| is for + /// concat). + fn to_native_sponge_field_elements(&self) -> Result>>, SynthesisError> { + let mut cmE_elems = self.cmE.to_constraint_field()?; + let mut cmW_elems = self.cmW.to_constraint_field()?; + + // See `transcript/poseidon.rs: TranscriptVar::absorb_point` for details + // why the last element is unnecessary. + cmE_elems.pop(); + cmW_elems.pop(); + + Ok([ + self.u.to_native_sponge_field_elements()?, + self.x + .iter() + .map(|i| i.to_native_sponge_field_elements()) + .collect::, _>>()? + .concat(), + cmE_elems, + cmW_elems, + ] + .concat()) + } +} + +impl CycleFoldCommittedInstanceVar +where + C: CurveGroup, + GC: CurveVar> + ToConstraintFieldGadget>, + ::BaseField: ark_ff::PrimeField + Absorb, + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + /// hash implements the committed instance hash compatible with the native implementation from + /// CommittedInstance.hash_cyclefold. Returns `H(U_i)`, where `U` is the `CommittedInstance` + /// for CycleFold. Additionally it returns the vector of the field elements from the self + /// parameters, so they can be reused in other gadgets avoiding recalculating (reconstraining) + /// them. + #[allow(clippy::type_complexity)] + pub fn hash, S>>( + self, + sponge: &T, + pp_hash: FpVar>, // public params hash + ) -> Result<(FpVar>, Vec>>), SynthesisError> { + let mut sponge = sponge.clone(); + let U_vec = self.to_native_sponge_field_elements()?; + sponge.absorb(&pp_hash)?; + sponge.absorb(&U_vec)?; + Ok((sponge.squeeze_field_elements(1)?.pop().unwrap(), U_vec)) + } +} + +/// CommittedInstanceInCycleFoldVar represents the Nova CommittedInstance in the CycleFold circuit, +/// where the commitments to E and W (cmW and cmW) from the CommittedInstance on the E2, +/// represented as native points, which are folded on the auxiliary curve constraints field (E2::Fr +/// = E1::Fq). +#[derive(Debug, Clone)] +pub struct CommittedInstanceInCycleFoldVar>> +where + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + _c: PhantomData, + pub cmE: GC, + pub cmW: GC, +} + +impl AllocVar, CF2> for CommittedInstanceInCycleFoldVar +where + C: CurveGroup, + GC: CurveVar>, + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + fn new_variable>>( + cs: impl Into>>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + f().and_then(|val| { + let cs = cs.into(); + + let cmE = GC::new_variable(cs.clone(), || Ok(val.borrow().cmE), mode)?; + let cmW = GC::new_variable(cs.clone(), || Ok(val.borrow().cmW), mode)?; + + Ok(Self { + _c: PhantomData, + cmE, + cmW, + }) + }) + } +} + +/// This is the gadget used in the AugmentedFCircuit to verify the CycleFold instances folding, +/// which checks the correct RLC of u,x,cmE,cmW (hence the name containing 'Full', since it checks +/// all the RLC values, not only the native ones). It assumes that ci2.cmE=0, ci2.u=1. +pub struct NIFSFullGadget>> { + _c: PhantomData, + _gc: PhantomData, +} + +impl>> NIFSFullGadget +where + C: CurveGroup, + GC: CurveVar>, + ::BaseField: ark_ff::PrimeField, + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + pub fn fold_committed_instance( + // assumes that r_bits is equal to r_nonnat just that in a different format + r_bits: Vec>>, + r_nonnat: NonNativeUintVar>, + cmT: GC, + ci1: CycleFoldCommittedInstanceVar, + // ci2 is assumed to be always with cmE=0, u=1 (checks done previous to this method) + ci2: CycleFoldCommittedInstanceVar, + ) -> Result, SynthesisError> { + Ok(CycleFoldCommittedInstanceVar { + cmE: cmT.scalar_mul_le(r_bits.iter())? + ci1.cmE, + cmW: ci1.cmW + ci2.cmW.scalar_mul_le(r_bits.iter())?, + u: ci1.u.add_no_align(&r_nonnat).modulo::()?, + x: ci1 + .x + .iter() + .zip(ci2.x) + .map(|(a, b)| { + a.add_no_align(&r_nonnat.mul_no_align(&b)?) + .modulo::() + }) + .collect::, _>>()?, + }) + } + + pub fn verify( + // assumes that r_bits is equal to r_nonnat just that in a different format + r_bits: Vec>>, + r_nonnat: NonNativeUintVar>, + cmT: GC, + ci1: CycleFoldCommittedInstanceVar, + // ci2 is assumed to be always with cmE=0, u=1 (checks done previous to this method) + ci2: CycleFoldCommittedInstanceVar, + ci3: CycleFoldCommittedInstanceVar, + ) -> Result<(), SynthesisError> { + let ci = Self::fold_committed_instance(r_bits, r_nonnat, cmT, ci1, ci2)?; + + ci.cmE.enforce_equal(&ci3.cmE)?; + ci.u.enforce_equal_unaligned(&ci3.u)?; + ci.cmW.enforce_equal(&ci3.cmW)?; + for (x, y) in ci.x.iter().zip(ci3.x.iter()) { + x.enforce_equal_unaligned(y)?; + } + + Ok(()) + } +} + +/// CycleFoldChallengeGadget computes the RO challenge used for the CycleFold instances NIFS, it contains a +/// rust-native and a in-circuit compatible versions. +pub struct CycleFoldChallengeGadget>> { + _c: PhantomData, // Nova's Curve2, the one used for the CycleFold circuit + _gc: PhantomData, +} +impl CycleFoldChallengeGadget +where + C: CurveGroup, + GC: CurveVar> + ToConstraintFieldGadget>, + ::BaseField: PrimeField, + ::BaseField: Absorb, + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + pub fn get_challenge_native>( + transcript: &mut T, + pp_hash: C::BaseField, // public params hash + U_i: CommittedInstance, + u_i: CommittedInstance, + cmT: C, + ) -> Vec { + transcript.absorb(&pp_hash); + transcript.absorb_nonnative(&U_i); + transcript.absorb_nonnative(&u_i); + transcript.absorb_point(&cmT); + transcript.squeeze_bits(N_BITS_RO) + } + + // compatible with the native get_challenge_native + pub fn get_challenge_gadget>( + transcript: &mut T, + pp_hash: FpVar, // public params hash + U_i_vec: Vec>, + u_i: CycleFoldCommittedInstanceVar, + cmT: GC, + ) -> Result>, SynthesisError> { + transcript.absorb(&pp_hash)?; + transcript.absorb(&U_i_vec)?; + transcript.absorb_nonnative(&u_i)?; + transcript.absorb_point(&cmT)?; + transcript.squeeze_bits(N_BITS_RO) + } +} + +/// CycleFoldCircuit contains the constraints that check the correct fold of the committed +/// instances from Curve1. Namely, it checks the random linear combinations of the elliptic curve +/// (Curve1) points of u_i, U_i leading to U_{i+1} +#[derive(Debug, Clone)] +pub struct CycleFoldCircuit>> { + pub _gc: PhantomData, + /// number of points being folded + pub n_points: usize, + /// r_bits is a vector containing the r_bits, one for each point except for the first one. They + /// are used for the scalar multiplication of the points. The r_bits are the bit + /// representation of each power of r (in Fr, while the CycleFoldCircuit is in Fq). + pub r_bits: Option>>, + /// points to be folded in the CycleFoldCircuit + pub points: Option>, + pub x: Option>>, // public inputs (cf_u_{i+1}.x) +} +impl>> CycleFoldCircuit { + /// n_points indicates the number of points being folded in the CycleFoldCircuit + pub fn empty(n_points: usize) -> Self { + Self { + _gc: PhantomData, + n_points, + r_bits: None, + points: None, + x: None, + } + } +} +impl ConstraintSynthesizer> for CycleFoldCircuit +where + C: CurveGroup, + GC: CurveVar> + ToConstraintFieldGadget>, + ::BaseField: ark_ff::PrimeField, + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { + let r_bits: Vec>>> = self + .r_bits + // n_points-1, bcs is one for each point except for the first one + .unwrap_or(vec![vec![false; N_BITS_RO]; self.n_points - 1]) + .iter() + .map(|r_bits_i| { + Vec::>>::new_witness(cs.clone(), || Ok(r_bits_i.clone())) + }) + .collect::>>>, SynthesisError>>()?; + let points = Vec::::new_witness(cs.clone(), || { + Ok(self.points.unwrap_or(vec![C::zero(); self.n_points])) + })?; + + #[cfg(test)] + { + assert_eq!(self.n_points, points.len()); + assert_eq!(self.n_points - 1, r_bits.len()); + } + + // Fold the original points of the instances natively in CycleFold. + // In Nova, + // - for the cmW we're computing: U_i1.cmW = U_i.cmW + r * u_i.cmW + // - for the cmE we're computing: U_i1.cmE = U_i.cmE + r * cmT + r^2 * u_i.cmE, where u_i.cmE + // is assumed to be 0, so, U_i1.cmE = U_i.cmE + r * cmT + let mut p_folded: GC = points[0].clone(); + // iter over n_points-1 because the first point is not multiplied by r^i (it is multiplied + // by r^0=1) + for i in 0..self.n_points - 1 { + p_folded += points[i + 1].scalar_mul_le(r_bits[i].iter())?; + } + + let x = Vec::>>::new_input(cs.clone(), || { + Ok(self + .x + .unwrap_or(vec![CF2::::zero(); cf_io_len(self.n_points)])) + })?; + #[cfg(test)] + assert_eq!(x.len(), cf_io_len(self.n_points)); // non-constrained sanity check + + // Check that the points coordinates are placed as the public input x: + // In Nova, this is: x == [r, p1, p2, p3] (wheere p3 is the p_folded). + // In multifolding schemes such as HyperNova, this is: + // computed_x = [r_0, r_1, r_2, ..., r_n, p_0, p_1, p_2, ..., p_n, p_folded], + // where each p_i is in fact p_i.to_constraint_field() + let computed_x: Vec>> = [ + r_bits + .iter() + .map(|r_bits_i| Boolean::le_bits_to_fp_var(r_bits_i)) + .collect::>>, SynthesisError>>()?, + points + .iter() + .map(|p_i| Ok(p_i.to_constraint_field()?[..2].to_vec())) + .collect::>>>, SynthesisError>>()? + .concat(), + p_folded.to_constraint_field()?[..2].to_vec(), + ] + .concat(); + computed_x.enforce_equal(&x)?; + + Ok(()) + } +} + +/// Folds the given cyclefold circuit and its instances. This method is abstracted from any folding +/// scheme struct because it is used both by Nova & HyperNova's CycleFold. +#[allow(clippy::type_complexity)] +#[allow(clippy::too_many_arguments)] +pub fn fold_cyclefold_circuit( + _n_points: usize, + transcript: &mut impl Transcript, + cf_r1cs: R1CS, + cf_cs_params: CS2::ProverParams, + pp_hash: C1::ScalarField, // public params hash + cf_W_i: Witness, // witness of the running instance + cf_U_i: CommittedInstance, // running instance + cf_u_i_x: Vec, + cf_circuit: CycleFoldCircuit, +) -> Result< + ( + Witness, + CommittedInstance, // u_i + Witness, // W_i1 + CommittedInstance, // U_i1 + C2, // cmT + C2::ScalarField, // r_Fq + ), + Error, +> +where + C1: CurveGroup, + GC1: CurveVar> + ToConstraintFieldGadget>, + C2: CurveGroup, + GC2: CurveVar> + ToConstraintFieldGadget>, + FC: FCircuit, + CS1: CommitmentScheme, + CS2: CommitmentScheme, + ::BaseField: PrimeField, + ::BaseField: PrimeField, + ::ScalarField: Absorb, + ::ScalarField: Absorb, + C1: CurveGroup, + for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>, + for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, +{ + let cs2 = ConstraintSystem::::new_ref(); + cf_circuit.generate_constraints(cs2.clone())?; + + let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; + let (cf_w_i, cf_x_i) = extract_w_x::(&cs2); + if cf_x_i != cf_u_i_x { + return Err(Error::NotEqual); + } + + #[cfg(test)] + assert_eq!(cf_x_i.len(), cf_io_len(_n_points)); + + // fold cyclefold instances + let cf_w_i = Witness::::new(cf_w_i.clone(), cf_r1cs.A.n_rows); + let cf_u_i: CommittedInstance = cf_w_i.commit::(&cf_cs_params, cf_x_i.clone())?; + + // compute T* and cmT* for CycleFoldCircuit + let (cf_T, cf_cmT) = NIFS::::compute_cyclefold_cmT( + &cf_cs_params, + &cf_r1cs, + &cf_w_i, + &cf_u_i, + &cf_W_i, + &cf_U_i, + )?; + + let cf_r_bits = CycleFoldChallengeGadget::::get_challenge_native( + transcript, + pp_hash, + cf_U_i.clone(), + cf_u_i.clone(), + cf_cmT, + ); + let cf_r_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&cf_r_bits)) + .expect("cf_r_bits out of bounds"); + + let (cf_W_i1, cf_U_i1) = NIFS::::fold_instances( + cf_r_Fq, &cf_W_i, &cf_U_i, &cf_w_i, &cf_u_i, &cf_T, cf_cmT, + )?; + Ok((cf_w_i, cf_u_i, cf_W_i1, cf_U_i1, cf_cmT, cf_r_Fq)) +} + +#[cfg(test)] +pub mod tests { + use ark_bn254::{constraints::GVar, Fq, Fr, G1Projective as Projective}; + use ark_crypto_primitives::sponge::{ + constraints::CryptographicSpongeVar, + poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge}, + }; + use ark_r1cs_std::R1CSVar; + use ark_std::UniformRand; + + use super::*; + use crate::folding::nova::nifs::tests::prepare_simple_fold_inputs; + use crate::transcript::poseidon::poseidon_canonical_config; + use crate::utils::get_cm_coordinates; + + #[test] + fn test_committed_instance_cyclefold_var() { + let mut rng = ark_std::test_rng(); + + let ci = CommittedInstance:: { + cmE: Projective::rand(&mut rng), + u: Fr::rand(&mut rng), + cmW: Projective::rand(&mut rng), + x: vec![Fr::rand(&mut rng); 1], + }; + + // check the instantiation of the CycleFold side: + let cs = ConstraintSystem::::new_ref(); + let ciVar = + CommittedInstanceInCycleFoldVar::::new_witness(cs.clone(), || { + Ok(ci.clone()) + }) + .unwrap(); + assert_eq!(ciVar.cmE.value().unwrap(), ci.cmE); + assert_eq!(ciVar.cmW.value().unwrap(), ci.cmW); + } + + #[test] + fn test_CycleFoldCircuit_constraints() { + let (_, _, _, _, ci1, _, ci2, _, ci3, _, cmT, r_bits, _) = prepare_simple_fold_inputs(); + let r_Fq = Fq::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap(); + + // cs is the Constraint System on the Curve Cycle auxiliary curve constraints field + // (E1::Fq=E2::Fr) + let cs = ConstraintSystem::::new_ref(); + + let cfW_u_i_x: Vec = [ + vec![r_Fq], + get_cm_coordinates(&ci1.cmW), + get_cm_coordinates(&ci2.cmW), + get_cm_coordinates(&ci3.cmW), + ] + .concat(); + let cfW_circuit = CycleFoldCircuit:: { + _gc: PhantomData, + n_points: 2, + r_bits: Some(vec![r_bits.clone()]), + points: Some(vec![ci1.clone().cmW, ci2.clone().cmW]), + x: Some(cfW_u_i_x.clone()), + }; + cfW_circuit.generate_constraints(cs.clone()).unwrap(); + assert!(cs.is_satisfied().unwrap()); + + // same for E: + let cs = ConstraintSystem::::new_ref(); + let cfE_u_i_x = [ + vec![r_Fq], + get_cm_coordinates(&ci1.cmE), + get_cm_coordinates(&cmT), + get_cm_coordinates(&ci3.cmE), + ] + .concat(); + let cfE_circuit = CycleFoldCircuit:: { + _gc: PhantomData, + n_points: 2, + r_bits: Some(vec![r_bits.clone()]), + points: Some(vec![ci1.clone().cmE, cmT]), + x: Some(cfE_u_i_x.clone()), + }; + cfE_circuit.generate_constraints(cs.clone()).unwrap(); + assert!(cs.is_satisfied().unwrap()); + } + + #[test] + fn test_nifs_full_gadget() { + let (_, _, _, _, ci1, _, ci2, _, ci3, _, cmT, r_bits, r_Fr) = prepare_simple_fold_inputs(); + + let cs = ConstraintSystem::::new_ref(); + + let r_nonnatVar = NonNativeUintVar::::new_witness(cs.clone(), || Ok(r_Fr)).unwrap(); + let r_bitsVar = Vec::>::new_witness(cs.clone(), || Ok(r_bits)).unwrap(); + + let ci1Var = + CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(ci1.clone()) + }) + .unwrap(); + let ci2Var = + CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(ci2.clone()) + }) + .unwrap(); + let ci3Var = + CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(ci3.clone()) + }) + .unwrap(); + let cmTVar = GVar::new_witness(cs.clone(), || Ok(cmT)).unwrap(); + + NIFSFullGadget::::verify( + r_bitsVar, + r_nonnatVar, + cmTVar, + ci1Var, + ci2Var, + ci3Var, + ) + .unwrap(); + assert!(cs.is_satisfied().unwrap()); + } + + #[test] + fn test_cyclefold_challenge_gadget() { + let mut rng = ark_std::test_rng(); + let poseidon_config = poseidon_canonical_config::(); + let mut transcript = PoseidonSponge::::new(&poseidon_config); + + let u_i = CommittedInstance:: { + cmE: Projective::zero(), // zero on purpose, so we test also the zero point case + u: Fr::zero(), + cmW: Projective::rand(&mut rng), + x: std::iter::repeat_with(|| Fr::rand(&mut rng)) + .take(7) // 7 = cf_io_len + .collect(), + }; + let U_i = CommittedInstance:: { + cmE: Projective::rand(&mut rng), + u: Fr::rand(&mut rng), + cmW: Projective::rand(&mut rng), + x: std::iter::repeat_with(|| Fr::rand(&mut rng)) + .take(7) // 7 = cf_io_len + .collect(), + }; + let cmT = Projective::rand(&mut rng); + + // compute the challenge natively + let pp_hash = Fq::from(42u32); // only for test + let r_bits = CycleFoldChallengeGadget::::get_challenge_native( + &mut transcript, + pp_hash, + U_i.clone(), + u_i.clone(), + cmT, + ); + + let cs = ConstraintSystem::::new_ref(); + let u_iVar = + CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(u_i.clone()) + }) + .unwrap(); + let U_iVar = + CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(U_i.clone()) + }) + .unwrap(); + let cmTVar = GVar::new_witness(cs.clone(), || Ok(cmT)).unwrap(); + let mut transcript_var = + PoseidonSpongeVar::::new(ConstraintSystem::::new_ref(), &poseidon_config); + + let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash)).unwrap(); + let r_bitsVar = CycleFoldChallengeGadget::::get_challenge_gadget( + &mut transcript_var, + pp_hashVar, + U_iVar.to_native_sponge_field_elements().unwrap(), + u_iVar, + cmTVar, + ) + .unwrap(); + assert!(cs.is_satisfied().unwrap()); + + // check that the natively computed and in-circuit computed hashes match + let rVar = Boolean::le_bits_to_fp_var(&r_bitsVar).unwrap(); + let r = Fq::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap(); + assert_eq!(rVar.value().unwrap(), r); + assert_eq!(r_bitsVar.value().unwrap(), r_bits); + } + + #[test] + fn test_cyclefold_hash_gadget() { + let mut rng = ark_std::test_rng(); + let poseidon_config = poseidon_canonical_config::(); + let sponge = PoseidonSponge::::new(&poseidon_config); + + let U_i = CommittedInstance:: { + cmE: Projective::rand(&mut rng), + u: Fr::rand(&mut rng), + cmW: Projective::rand(&mut rng), + x: std::iter::repeat_with(|| Fr::rand(&mut rng)) + .take(7) // 7 = cf_io_len in Nova + .collect(), + }; + let pp_hash = Fq::from(42u32); // only for test + let h = U_i.hash_cyclefold(&sponge, pp_hash); + + let cs = ConstraintSystem::::new_ref(); + let U_iVar = + CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(U_i.clone()) + }) + .unwrap(); + let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash)).unwrap(); + let (hVar, _) = U_iVar + .hash( + &PoseidonSpongeVar::new(cs.clone(), &poseidon_config), + pp_hashVar, + ) + .unwrap(); + hVar.enforce_equal(&FpVar::new_witness(cs.clone(), || Ok(h)).unwrap()) + .unwrap(); + assert!(cs.is_satisfied().unwrap()); + } +} diff --git a/folding-schemes/src/folding/circuits/mod.rs b/folding-schemes/src/folding/circuits/mod.rs index 3854c43a..40d6b86e 100644 --- a/folding-schemes/src/folding/circuits/mod.rs +++ b/folding-schemes/src/folding/circuits/mod.rs @@ -2,6 +2,7 @@ use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::Field; +pub mod cyclefold; pub mod nonnative; pub mod sum_check; pub mod utils; diff --git a/folding-schemes/src/folding/circuits/nonnative/affine.rs b/folding-schemes/src/folding/circuits/nonnative/affine.rs index 05ee6b18..f4ec6618 100644 --- a/folding-schemes/src/folding/circuits/nonnative/affine.rs +++ b/folding-schemes/src/folding/circuits/nonnative/affine.rs @@ -1,5 +1,4 @@ use ark_ec::{AffineRepr, CurveGroup}; -use ark_ff::PrimeField; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, fields::fp::FpVar, @@ -9,16 +8,15 @@ use ark_relations::r1cs::{Namespace, SynthesisError}; use ark_std::Zero; use core::borrow::Borrow; +use crate::transcript::{AbsorbNonNative, AbsorbNonNativeGadget}; + use super::uint::{nonnative_field_to_field_elements, NonNativeUintVar}; /// NonNativeAffineVar represents an elliptic curve point in Affine representation in the non-native /// field, over the constraint field. It is not intended to perform operations, but just to contain /// the affine coordinates in order to perform hash operations of the point. #[derive(Debug, Clone)] -pub struct NonNativeAffineVar -where - ::BaseField: PrimeField, -{ +pub struct NonNativeAffineVar { pub x: NonNativeUintVar, pub y: NonNativeUintVar, } @@ -26,7 +24,6 @@ where impl AllocVar for NonNativeAffineVar where C: CurveGroup, - ::BaseField: PrimeField, { fn new_variable>( cs: impl Into>, @@ -37,21 +34,18 @@ where let cs = cs.into(); let affine = val.borrow().into_affine(); - let zero_point = (&C::BaseField::zero(), &C::BaseField::zero()); - let xy = affine.xy().unwrap_or(zero_point); + let zero = (&C::BaseField::zero(), &C::BaseField::zero()); + let (x, y) = affine.xy().unwrap_or(zero); - let x = NonNativeUintVar::new_variable(cs.clone(), || Ok(*xy.0), mode)?; - let y = NonNativeUintVar::new_variable(cs.clone(), || Ok(*xy.1), mode)?; + let x = NonNativeUintVar::new_variable(cs.clone(), || Ok(*x), mode)?; + let y = NonNativeUintVar::new_variable(cs.clone(), || Ok(*y), mode)?; Ok(Self { x, y }) }) } } -impl ToConstraintFieldGadget for NonNativeAffineVar -where - ::BaseField: PrimeField, -{ +impl ToConstraintFieldGadget for NonNativeAffineVar { // Used for converting `NonNativeAffineVar` to a vector of `FpVar` with minimum length in // the circuit. fn to_constraint_field(&self) -> Result>, SynthesisError> { @@ -63,50 +57,50 @@ where /// The out-circuit counterpart of `NonNativeAffineVar::to_constraint_field` #[allow(clippy::type_complexity)] -pub fn nonnative_affine_to_field_elements( +fn nonnative_affine_to_field_elements( p: C, -) -> Result<(Vec, Vec), SynthesisError> -where - ::BaseField: PrimeField, -{ +) -> (Vec, Vec) { let affine = p.into_affine(); - if affine.is_zero() { - let x = nonnative_field_to_field_elements(&C::BaseField::zero()); - let y = nonnative_field_to_field_elements(&C::BaseField::zero()); - return Ok((x, y)); - } + let zero = (&C::BaseField::zero(), &C::BaseField::zero()); + let (x, y) = affine.xy().unwrap_or(zero); - let (x, y) = affine.xy().unwrap(); let x = nonnative_field_to_field_elements(x); let y = nonnative_field_to_field_elements(y); - Ok((x, y)) + (x, y) } -impl NonNativeAffineVar -where - ::BaseField: PrimeField, -{ - // A wrapper of `point_to_nonnative_limbs_custom_opt` with constraints-focused optimization - // type (which is the default optimization type for arkworks' Groth16). - // Used for extracting a list of field elements of type `C::ScalarField` from the public input +impl NonNativeAffineVar { + // Extracts a list of field elements of type `C::ScalarField` from the public input // `p`, in exactly the same way as how `NonNativeAffineVar` is represented as limbs of type // `FpVar` in-circuit. #[allow(clippy::type_complexity)] pub fn inputize(p: C) -> Result<(Vec, Vec), SynthesisError> { let affine = p.into_affine(); - if affine.is_zero() { - let x = NonNativeUintVar::inputize(C::BaseField::zero()); - let y = NonNativeUintVar::inputize(C::BaseField::zero()); - return Ok((x, y)); - } + let zero = (&C::BaseField::zero(), &C::BaseField::zero()); + let (x, y) = affine.xy().unwrap_or(zero); - let (x, y) = affine.xy().unwrap(); let x = NonNativeUintVar::inputize(*x); let y = NonNativeUintVar::inputize(*y); Ok((x, y)) } } +impl AbsorbNonNative for C { + fn to_native_sponge_field_elements(&self, dest: &mut Vec) { + let (x, y) = nonnative_affine_to_field_elements(*self); + dest.extend(x); + dest.extend(y); + } +} + +impl AbsorbNonNativeGadget for NonNativeAffineVar { + fn to_native_sponge_field_elements( + &self, + ) -> Result>, SynthesisError> { + self.to_constraint_field() + } +} + #[cfg(test)] mod tests { use super::*; @@ -132,7 +126,7 @@ mod tests { let mut rng = ark_std::test_rng(); let p = Projective::rand(&mut rng); let pVar = NonNativeAffineVar::::new_witness(cs.clone(), || Ok(p)).unwrap(); - let (x, y) = nonnative_affine_to_field_elements(p).unwrap(); + let (x, y) = nonnative_affine_to_field_elements(p); assert_eq!( pVar.to_constraint_field().unwrap().value().unwrap(), [x, y].concat() diff --git a/folding-schemes/src/folding/circuits/nonnative/uint.rs b/folding-schemes/src/folding/circuits/nonnative/uint.rs index 151ed2ca..f3bbf4c6 100644 --- a/folding-schemes/src/folding/circuits/nonnative/uint.rs +++ b/folding-schemes/src/folding/circuits/nonnative/uint.rs @@ -3,7 +3,7 @@ use std::{ cmp::{max, min}, }; -use ark_ff::{BigInteger, One, PrimeField, Zero}; +use ark_ff::{BigInteger, Field, One, PrimeField, Zero}; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, boolean::Boolean, @@ -16,7 +16,10 @@ use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; use num_bigint::BigUint; use num_integer::Integer; -use crate::utils::gadgets::{MatrixGadget, SparseMatrixVar, VectorGadget}; +use crate::{ + transcript::{AbsorbNonNative, AbsorbNonNativeGadget}, + utils::gadgets::{MatrixGadget, SparseMatrixVar, VectorGadget}, +}; /// `LimbVar` represents a single limb of a non-native unsigned integer in the /// circuit. @@ -229,7 +232,7 @@ impl AllocVar for NonNativeUintVar { } } -impl AllocVar for NonNativeUintVar { +impl AllocVar for NonNativeUintVar { fn new_variable>( cs: impl Into>, f: impl FnOnce() -> Result, @@ -237,7 +240,8 @@ impl AllocVar for NonNativeUintVar { ) -> Result { let cs = cs.into().cs(); let v = f()?; - let v = v.borrow(); + assert_eq!(G::extension_degree(), 1); + let v = v.borrow().to_base_prime_field_elements().next().unwrap(); let mut limbs = vec![]; @@ -256,8 +260,12 @@ impl AllocVar for NonNativeUintVar { } impl NonNativeUintVar { - pub fn inputize(x: T) -> Vec { - x.into_bigint() + pub fn inputize(x: T) -> Vec { + assert_eq!(T::extension_degree(), 1); + x.to_base_prime_field_elements() + .next() + .unwrap() + .into_bigint() .to_bits_le() .chunks(Self::bits_per_limb()) .map(|chunk| F::from_bigint(F::BigInt::from_bits_le(chunk)).unwrap()) @@ -802,14 +810,40 @@ impl]>> From for NonNativeUintVar { } } +// If we impl `AbsorbNonNative` directly for `PrimeField`, rustc will complain +// that this impl conflicts with the impl for `CurveGroup`. +// Therefore, we instead impl `AbsorbNonNative` for a slice of `PrimeField` as a +// workaround. +impl AbsorbNonNative + for [TargetField] +{ + fn to_native_sponge_field_elements(&self, dest: &mut Vec) { + self.iter() + .for_each(|x| dest.extend(&nonnative_field_to_field_elements(x))); + } +} + +impl AbsorbNonNativeGadget for NonNativeUintVar { + fn to_native_sponge_field_elements(&self) -> Result>, SynthesisError> { + self.to_constraint_field() + } +} + /// The out-circuit counterpart of `NonNativeUintVar::to_constraint_field` -pub fn nonnative_field_to_field_elements( +pub(crate) fn nonnative_field_to_field_elements( f: &TargetField, ) -> Vec { - let bits = f.into_bigint().to_bits_le(); + assert_eq!(TargetField::extension_degree(), 1); + let bits = f + .to_base_prime_field_elements() + .next() + .unwrap() + .into_bigint() + .to_bits_le(); let bits_per_limb = BaseField::MODULUS_BIT_SIZE as usize - 1; - let num_limbs = (TargetField::MODULUS_BIT_SIZE as usize).div_ceil(bits_per_limb); + let num_limbs = + (TargetField::BasePrimeField::MODULUS_BIT_SIZE as usize).div_ceil(bits_per_limb); let mut limbs = bits .chunks(bits_per_limb) @@ -897,7 +931,6 @@ mod tests { use std::error::Error; use super::*; - use ark_ff::Field; use ark_pallas::{Fq, Fr}; use ark_relations::r1cs::ConstraintSystem; use ark_std::{test_rng, UniformRand}; diff --git a/folding-schemes/src/folding/circuits/sum_check.rs b/folding-schemes/src/folding/circuits/sum_check.rs index ceda3946..75c7b43c 100644 --- a/folding-schemes/src/folding/circuits/sum_check.rs +++ b/folding-schemes/src/folding/circuits/sum_check.rs @@ -1,26 +1,27 @@ -use crate::utils::espresso::sum_check::SumCheck; -use crate::utils::virtual_polynomial::VPAuxInfo; -use crate::{ - transcript::{poseidon::PoseidonTranscript, TranscriptVar}, - utils::sum_check::{structs::IOPProof, IOPSumCheck}, -}; -use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{CurveGroup, Group}; /// Heavily inspired from testudo: https://github.com/cryptonetlab/testudo/tree/master /// Some changes: /// - Typings to better stick to ark_poly's API /// - Uses `folding-schemes`' own `TranscriptVar` trait and `PoseidonTranscriptVar` struct /// - API made closer to gadgets found in `folding-schemes` +use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, Absorb, CryptographicSponge}; use ark_ff::PrimeField; use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, + boolean::Boolean, eq::EqGadget, - fields::fp::FpVar, + fields::{fp::FpVar, FieldVar}, }; use ark_relations::r1cs::{Namespace, SynthesisError}; use std::{borrow::Borrow, marker::PhantomData}; +use crate::utils::espresso::sum_check::SumCheck; +use crate::utils::virtual_polynomial::VPAuxInfo; +use crate::{ + transcript::TranscriptVar, + utils::sum_check::{structs::IOPProof, IOPSumCheck}, +}; + #[derive(Clone, Debug)] pub struct DensePolynomialVar { pub coeffs: Vec>, @@ -47,10 +48,16 @@ impl AllocVar, F> for DensePolynomialVar { impl DensePolynomialVar { pub fn eval_at_zero(&self) -> FpVar { + if self.coeffs.is_empty() { + return FpVar::::zero(); + } self.coeffs[0].clone() } pub fn eval_at_one(&self) -> FpVar { + if self.coeffs.is_empty() { + return FpVar::::zero(); + } let mut res = self.coeffs[0].clone(); for i in 1..self.coeffs.len() { res = &res + &self.coeffs[i]; @@ -59,6 +66,9 @@ impl DensePolynomialVar { } pub fn evaluate(&self, r: &FpVar) -> FpVar { + if self.coeffs.is_empty() { + return FpVar::::zero(); + } let mut eval = self.coeffs[0].clone(); let mut power = r.clone(); @@ -71,35 +81,27 @@ impl DensePolynomialVar { } #[derive(Clone, Debug)] -pub struct IOPProofVar { +pub struct IOPProofVar { // We have to be generic over a CurveGroup because instantiating a IOPProofVar will call IOPSumCheck which requires a CurveGroup - pub proofs: Vec>, - pub claim: FpVar, + pub proofs: Vec>, + pub claim: FpVar, } -impl AllocVar, C::ScalarField> for IOPProofVar -where - ::ScalarField: Absorb, -{ - fn new_variable>>( - cs: impl Into>, +impl AllocVar, F> for IOPProofVar { + fn new_variable>>( + cs: impl Into>, f: impl FnOnce() -> Result, mode: AllocationMode, ) -> Result { f().and_then(|c| { let cs = cs.into(); - let cp: &IOPProof = c.borrow(); - let claim = IOPSumCheck::>::extract_sum(cp); - let claim = FpVar::::new_variable(cs.clone(), || Ok(claim), mode)?; - let mut proofs = - Vec::>::with_capacity(cp.proofs.len()); + let cp: &IOPProof = c.borrow(); + let claim = IOPSumCheck::>::extract_sum(cp); + let claim = FpVar::::new_variable(cs.clone(), || Ok(claim), mode)?; + let mut proofs = Vec::>::with_capacity(cp.proofs.len()); for proof in cp.proofs.iter() { let poly = DensePolynomial::from_coefficients_slice(&proof.coeffs); - let proof = DensePolynomialVar::::new_variable( - cs.clone(), - || Ok(poly), - mode, - )?; + let proof = DensePolynomialVar::::new_variable(cs.clone(), || Ok(poly), mode)?; proofs.push(proof); } Ok(Self { proofs, claim }) @@ -138,27 +140,28 @@ impl AllocVar, F> for VPAuxInfoVar { } #[derive(Debug, Clone)] -pub struct SumCheckVerifierGadget { - _f: PhantomData, +pub struct SumCheckVerifierGadget { + _f: PhantomData, } -impl SumCheckVerifierGadget { +impl SumCheckVerifierGadget { #[allow(clippy::type_complexity)] - pub fn verify( - iop_proof_var: &IOPProofVar, - poly_aux_info_var: &VPAuxInfoVar, - transcript_var: &mut impl TranscriptVar, - ) -> Result<(Vec>, Vec>), SynthesisError> { + pub fn verify>( + iop_proof_var: &IOPProofVar, + poly_aux_info_var: &VPAuxInfoVar, + transcript_var: &mut T, + enabled: Boolean, + ) -> Result<(Vec>, Vec>), SynthesisError> { let mut e_vars = vec![iop_proof_var.claim.clone()]; - let mut r_vars: Vec> = Vec::new(); - transcript_var.absorb(poly_aux_info_var.num_variables.clone())?; - transcript_var.absorb(poly_aux_info_var.max_degree.clone())?; + let mut r_vars: Vec> = Vec::new(); + transcript_var.absorb(&poly_aux_info_var.num_variables)?; + transcript_var.absorb(&poly_aux_info_var.max_degree)?; for poly_var in iop_proof_var.proofs.iter() { let res = poly_var.eval_at_one() + poly_var.eval_at_zero(); let e_var = e_vars.last().ok_or(SynthesisError::Unsatisfiable)?; - res.enforce_equal(e_var)?; - transcript_var.absorb_vec(&poly_var.coeffs)?; + res.conditional_enforce_equal(e_var, &enabled)?; + transcript_var.absorb(&poly_var.coeffs)?; let r_i_var = transcript_var.get_challenge()?; e_vars.push(poly_var.evaluate(&r_i_var)); r_vars.push(r_i_var); @@ -170,49 +173,35 @@ impl SumCheckVerifierGadget { #[cfg(test)] mod tests { - use crate::{ - folding::circuits::sum_check::{IOPProofVar, VPAuxInfoVar}, - transcript::{ - poseidon::{poseidon_canonical_config, PoseidonTranscript, PoseidonTranscriptVar}, - Transcript, TranscriptVar, - }, - utils::{ - sum_check::{structs::IOPProof, IOPSumCheck, SumCheck}, - virtual_polynomial::VirtualPolynomial, - }, - }; - use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb}; - use ark_ec::CurveGroup; - use ark_ff::Field; - use ark_pallas::{Fr, Projective}; - use ark_poly::{ - univariate::DensePolynomial, DenseMultilinearExtension, DenseUVPolynomial, - MultilinearExtension, Polynomial, + use ark_crypto_primitives::sponge::{ + constraints::CryptographicSpongeVar, + poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig}, }; - use ark_r1cs_std::{alloc::AllocVar, R1CSVar}; + use ark_pallas::Fr; + use ark_poly::{DenseMultilinearExtension, MultilinearExtension, Polynomial}; + use ark_r1cs_std::R1CSVar; use ark_relations::r1cs::ConstraintSystem; use std::sync::Arc; - use super::SumCheckVerifierGadget; + use super::*; + use crate::{ + transcript::poseidon::poseidon_canonical_config, + utils::virtual_polynomial::VirtualPolynomial, + }; pub type TestSumCheckProof = (VirtualPolynomial, PoseidonConfig, IOPProof); /// Primarily used for testing the sumcheck gadget /// Returns a random virtual polynomial, the poseidon config used and the associated sumcheck proof - pub fn get_test_sumcheck_proof( + pub fn get_test_sumcheck_proof( num_vars: usize, - ) -> TestSumCheckProof - where - ::ScalarField: Absorb, - { + ) -> TestSumCheckProof { let mut rng = ark_std::test_rng(); - let poseidon_config: PoseidonConfig = - poseidon_canonical_config::(); - let mut poseidon_transcript_prove = PoseidonTranscript::::new(&poseidon_config); + let poseidon_config: PoseidonConfig = poseidon_canonical_config::(); + let mut poseidon_transcript_prove = PoseidonSponge::::new(&poseidon_config); let poly_mle = DenseMultilinearExtension::rand(num_vars, &mut rng); - let virtual_poly = - VirtualPolynomial::new_from_mle(&Arc::new(poly_mle), C::ScalarField::ONE); - let sum_check: IOPProof = IOPSumCheck::>::prove( + let virtual_poly = VirtualPolynomial::new_from_mle(&Arc::new(poly_mle), F::ONE); + let sum_check: IOPProof = IOPSumCheck::>::prove( &virtual_poly, &mut poseidon_transcript_prove, ) @@ -225,25 +214,26 @@ mod tests { for num_vars in 1..15 { let cs = ConstraintSystem::::new_ref(); let (virtual_poly, poseidon_config, sum_check) = - get_test_sumcheck_proof::(num_vars); - let mut poseidon_var: PoseidonTranscriptVar = - PoseidonTranscriptVar::new(cs.clone(), &poseidon_config); + get_test_sumcheck_proof::(num_vars); + let mut poseidon_var: PoseidonSpongeVar = + PoseidonSpongeVar::new(cs.clone(), &poseidon_config); let iop_proof_var = - IOPProofVar::::new_witness(cs.clone(), || Ok(&sum_check)).unwrap(); + IOPProofVar::::new_witness(cs.clone(), || Ok(&sum_check)).unwrap(); let poly_aux_info_var = VPAuxInfoVar::::new_witness(cs.clone(), || Ok(virtual_poly.aux_info)).unwrap(); - let res = SumCheckVerifierGadget::::verify( + let enabled = Boolean::::new_witness(cs.clone(), || Ok(true)).unwrap(); + let res = SumCheckVerifierGadget::::verify( &iop_proof_var, &poly_aux_info_var, &mut poseidon_var, + enabled, ); assert!(res.is_ok()); let (circuit_evals, r_challenges) = res.unwrap(); // 1. assert claim from circuit is equal to the one from the sum-check - let claim: Fr = - IOPSumCheck::>::extract_sum(&sum_check); + let claim: Fr = IOPSumCheck::>::extract_sum(&sum_check); assert_eq!(circuit_evals[0].value().unwrap(), claim); // 2. assert that all in-circuit evaluations are equal to the ones from the sum-check diff --git a/folding-schemes/src/folding/hypernova/cccs.rs b/folding-schemes/src/folding/hypernova/cccs.rs index c13f7d89..6be8ab64 100644 --- a/folding-schemes/src/folding/hypernova/cccs.rs +++ b/folding-schemes/src/folding/hypernova/cccs.rs @@ -1,3 +1,4 @@ +use ark_crypto_primitives::sponge::Absorb; use ark_ec::CurveGroup; use ark_ff::PrimeField; use ark_std::One; @@ -6,24 +7,15 @@ use std::sync::Arc; use ark_std::rand::Rng; -use crate::ccs::CCS; -use crate::commitment::{ - pedersen::{Params as PedersenParams, Pedersen}, - CommitmentScheme, -}; -use crate::utils::hypercube::BooleanHypercube; +use super::Witness; +use crate::arith::{ccs::CCS, Arith}; +use crate::commitment::CommitmentScheme; +use crate::transcript::AbsorbNonNative; use crate::utils::mle::dense_vec_to_dense_mle; use crate::utils::vec::mat_vec_mul; use crate::utils::virtual_polynomial::{build_eq_x_r_vec, VirtualPolynomial}; use crate::Error; -/// Witness for the LCCCS & CCCS, containing the w vector, and the r_w used as randomness in the Pedersen commitment. -#[derive(Debug, Clone)] -pub struct Witness { - pub w: Vec, - pub r_w: F, // randomness used in the Pedersen commitment of w -} - /// Committed CCS instance #[derive(Debug, Clone)] pub struct CCCS { @@ -34,10 +26,10 @@ pub struct CCCS { } impl CCS { - pub fn to_cccs( + pub fn to_cccs>( &self, rng: &mut R, - pedersen_params: &PedersenParams, + cs_params: &CS::ProverParams, z: &[C::ScalarField], ) -> Result<(CCCS, Witness), Error> where @@ -45,8 +37,14 @@ impl CCS { C: CurveGroup, { let w: Vec = z[(1 + self.l)..].to_vec(); - let r_w = C::ScalarField::rand(rng); - let C = Pedersen::::commit(pedersen_params, &w, &r_w)?; + + // if the commitment scheme is set to be hiding, set the random blinding parameter + let r_w = if CS::is_hiding() { + C::ScalarField::rand(rng) + } else { + C::ScalarField::zero() + }; + let C = CS::commit(cs_params, &w, &r_w)?; Ok(( CCCS:: { @@ -92,44 +90,65 @@ impl CCS { } impl CCCS { - /// Perform the check of the CCCS instance described at section 4.1 + pub fn dummy(l: usize) -> CCCS + where + C::ScalarField: PrimeField, + { + CCCS:: { + C: C::zero(), + x: vec![C::ScalarField::zero(); l], + } + } + + /// Perform the check of the CCCS instance described at section 4.1, + /// notice that this method does not check the commitment correctness pub fn check_relation( &self, - pedersen_params: &PedersenParams, ccs: &CCS, w: &Witness, ) -> Result<(), Error> { - // check that C is the commitment of w. Notice that this is not verifying a Pedersen - // opening, but checking that the commitment comes from committing to the witness. - if self.C != Pedersen::::commit(pedersen_params, &w.w, &w.r_w)? { - return Err(Error::NotSatisfied); - } - // check CCCS relation let z: Vec = [vec![C::ScalarField::one()], self.x.clone(), w.w.to_vec()].concat(); - // A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in the hypercube - let q_x = ccs.compute_q(&z)?; - - for x in BooleanHypercube::new(ccs.s) { - if !q_x.evaluate(&x)?.is_zero() { - return Err(Error::NotSatisfied); - } - } + // A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in + // the hypercube, evaluating over the whole boolean hypercube for a normal-sized instance + // would take too much, this checks the CCS relation of the CCCS. + ccs.check_relation(&z)?; Ok(()) } } +impl Absorb for CCCS +where + C::ScalarField: Absorb, +{ + fn to_sponge_bytes(&self, _dest: &mut Vec) { + // This is never called + unimplemented!() + } + + fn to_sponge_field_elements(&self, dest: &mut Vec) { + // We cannot call `to_native_sponge_field_elements(dest)` directly, as + // `to_native_sponge_field_elements` needs `F` to be `C::ScalarField`, + // but here `F` is a generic `PrimeField`. + self.C + .to_native_sponge_field_elements_as_vec() + .to_sponge_field_elements(dest); + self.x.to_sponge_field_elements(dest); + } +} + #[cfg(test)] pub mod tests { - use super::*; - use crate::ccs::tests::{get_test_ccs, get_test_z}; + use ark_pallas::Fr; use ark_std::test_rng; use ark_std::UniformRand; - use ark_pallas::Fr; + use super::*; + use crate::arith::ccs::tests::{get_test_ccs, get_test_z}; + use crate::utils::hypercube::BooleanHypercube; /// Do some sanity checks on q(x). It's a multivariable polynomial and it should evaluate to zero inside the /// hypercube, but to not-zero outside the hypercube. diff --git a/folding-schemes/src/folding/hypernova/circuits.rs b/folding-schemes/src/folding/hypernova/circuits.rs index d42330cd..78a28d28 100644 --- a/folding-schemes/src/folding/hypernova/circuits.rs +++ b/folding-schemes/src/folding/hypernova/circuits.rs @@ -1,24 +1,53 @@ -/// Implementation of [HyperNova](https://eprint.iacr.org/2023/573.pdf) NIMFS verifier circuit -use ark_crypto_primitives::sponge::Absorb; +/// Implementation of [HyperNova](https://eprint.iacr.org/2023/573.pdf) circuits +use ark_crypto_primitives::sponge::{ + constraints::CryptographicSpongeVar, + poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge}, + CryptographicSponge, +}; +use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb}; use ark_ec::{CurveGroup, Group}; use ark_ff::PrimeField; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, + boolean::Boolean, eq::EqGadget, fields::{fp::FpVar, FieldVar}, + groups::GroupOpsBounds, + prelude::CurveVar, + R1CSVar, ToBitsGadget, ToConstraintFieldGadget, +}; +use ark_relations::r1cs::{ + ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef, Namespace, SynthesisError, }; -use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; +use ark_std::{fmt::Debug, ops::Neg, One, Zero}; use core::{borrow::Borrow, marker::PhantomData}; -use super::{cccs::CCCS, lcccs::LCCCS, nimfs::Proof}; -use crate::folding::circuits::{ - nonnative::affine::NonNativeAffineVar, - sum_check::{IOPProofVar, SumCheckVerifierGadget, VPAuxInfoVar}, - utils::EqEvalGadget, - CF1, +use super::{ + cccs::CCCS, + lcccs::LCCCS, + nimfs::{NIMFSProof, NIMFS}, + Witness, +}; +use crate::constants::N_BITS_RO; +use crate::folding::{ + circuits::cyclefold::{ + cf_io_len, CycleFoldChallengeGadget, CycleFoldCommittedInstanceVar, NIFSFullGadget, + }, + circuits::{ + nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar}, + sum_check::{IOPProofVar, SumCheckVerifierGadget, VPAuxInfoVar}, + utils::EqEvalGadget, + CF1, CF2, + }, + nova::{get_r1cs_from_cs, CommittedInstance}, }; +use crate::frontend::FCircuit; use crate::utils::virtual_polynomial::VPAuxInfo; -use crate::{ccs::CCS, transcript::TranscriptVar}; +use crate::Error; +use crate::{ + arith::{ccs::CCS, r1cs::extract_r1cs}, + transcript::TranscriptVar, +}; /// Committed CCS instance #[derive(Debug, Clone)] @@ -28,7 +57,7 @@ where { // Commitment to witness pub C: NonNativeAffineVar, - // Public input/output + // Public io pub x: Vec>>, } impl AllocVar, CF1> for CCCSVar @@ -63,7 +92,7 @@ where pub C: NonNativeAffineVar, // Relaxation factor of z for folded LCCCS pub u: FpVar>, - // Public input/output + // Public io pub x: Vec>>, // Random evaluation point for the v_i pub r_x: Vec>>, @@ -97,20 +126,58 @@ where } } +impl LCCCSVar +where + C: CurveGroup, + ::ScalarField: Absorb, + ::BaseField: ark_ff::PrimeField, +{ + /// [`LCCCSVar`].hash implements the LCCCS instance hash compatible with the native + /// implementation from LCCCS.hash. + /// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U` is the LCCCS. + /// Additionally it returns the vector of the field elements from the self parameters, so they + /// can be reused in other gadgets avoiding recalculating (reconstraining) them. + #[allow(clippy::type_complexity)] + pub fn hash( + self, + sponge: &PoseidonSpongeVar>, + pp_hash: FpVar>, + i: FpVar>, + z_0: Vec>>, + z_i: Vec>>, + ) -> Result<(FpVar>, Vec>>), SynthesisError> { + let mut sponge = sponge.clone(); + let U_vec = [ + self.C.to_constraint_field()?, + vec![self.u], + self.x, + self.r_x, + self.v, + ] + .concat(); + sponge.absorb(&pp_hash)?; + sponge.absorb(&i)?; + sponge.absorb(&z_0)?; + sponge.absorb(&z_i)?; + sponge.absorb(&U_vec)?; + Ok((sponge.squeeze_field_elements(1)?.pop().unwrap(), U_vec)) + } +} + /// ProofVar defines a multifolding proof #[derive(Debug)] pub struct ProofVar { - pub sc_proof: IOPProofVar, + pub sc_proof: IOPProofVar, #[allow(clippy::type_complexity)] pub sigmas_thetas: (Vec>>>, Vec>>>), } -impl AllocVar, CF1> for ProofVar +impl AllocVar, CF1> for ProofVar where C: CurveGroup, ::BaseField: PrimeField, ::ScalarField: Absorb, { - fn new_variable>>( + fn new_variable>>( cs: impl Into>>, f: impl FnOnce() -> Result, mode: AllocationMode, @@ -118,7 +185,7 @@ where f().and_then(|val| { let cs = cs.into(); - let sc_proof = IOPProofVar::::new_variable( + let sc_proof = IOPProofVar::::new_variable( cs.clone(), || Ok(val.borrow().sc_proof.clone()), mode, @@ -153,27 +220,48 @@ impl NIMFSGadget where ::BaseField: PrimeField, { - pub fn verify( + /// Runs (in-circuit) the NIMFS.V, which outputs the new folded LCCCS instance together with + /// the rho_powers, which will be used in other parts of the AugmentedFCircuit + #[allow(clippy::type_complexity)] + pub fn verify>( cs: ConstraintSystemRef>, // only used the CCS params, not the matrices ccs: &CCS, - mut transcript: impl TranscriptVar, + transcript: &mut T, - running_instances: &[LCCCSVar], - new_instances: &[CCCSVar], + running_instances: &[LCCCSVar], // U + new_instances: &[CCCSVar], // u proof: ProofVar, - ) -> Result, SynthesisError> { + enabled: Boolean, + ) -> Result<(LCCCSVar, Vec>>>), SynthesisError> { + // absorb instances to transcript + for U_i in running_instances { + let v = [ + U_i.C.to_constraint_field()?, + vec![U_i.u.clone()], + U_i.x.clone(), + U_i.r_x.clone(), + U_i.v.clone(), + ] + .concat(); + transcript.absorb(&v)?; + } + for u_i in new_instances { + let v = [u_i.C.to_constraint_field()?, u_i.x.clone()].concat(); + transcript.absorb(&v)?; + } + // get the challenges let gamma_scalar_raw = C::ScalarField::from_le_bytes_mod_order(b"gamma"); let gamma_scalar: FpVar> = FpVar::>::new_constant(cs.clone(), gamma_scalar_raw)?; - transcript.absorb(gamma_scalar)?; + transcript.absorb(&gamma_scalar)?; let gamma: FpVar> = transcript.get_challenge()?; let beta_scalar_raw = C::ScalarField::from_le_bytes_mod_order(b"beta"); let beta_scalar: FpVar> = FpVar::>::new_constant(cs.clone(), beta_scalar_raw)?; - transcript.absorb(beta_scalar)?; + transcript.absorb(&beta_scalar)?; let beta: Vec>> = transcript.get_challenges(ccs.s)?; let vp_aux_info_raw = VPAuxInfo:: { @@ -195,8 +283,12 @@ where } // verify the interactive part of the sumcheck - let (e_vars, r_vars) = - SumCheckVerifierGadget::::verify(&proof.sc_proof, &vp_aux_info, &mut transcript)?; + let (e_vars, r_vars) = SumCheckVerifierGadget::::verify( + &proof.sc_proof, + &vp_aux_info, + transcript, + enabled.clone(), + )?; // extract the randomness from the sumcheck let r_x_prime = r_vars.clone(); @@ -215,15 +307,17 @@ where .collect(), r_x_prime.clone(), )?; - computed_c.enforce_equal(&e_vars[e_vars.len() - 1])?; + computed_c.conditional_enforce_equal(&e_vars[e_vars.len() - 1], &enabled)?; // get the folding challenge let rho_scalar_raw = C::ScalarField::from_le_bytes_mod_order(b"rho"); let rho_scalar: FpVar> = FpVar::>::new_constant(cs.clone(), rho_scalar_raw)?; - transcript.absorb(rho_scalar)?; - let rho: FpVar> = transcript.get_challenge()?; + transcript.absorb(&rho_scalar)?; + let rho_bits: Vec>> = transcript.get_challenge_nbits(N_BITS_RO)?; + let rho = Boolean::le_bits_to_fp_var(&rho_bits)?; - // return the folded instance + // Self::fold will return the folded instance, together with the rho's powers vector so + // they can be used in other parts of the AugmentedFCircuit Self::fold( running_instances, new_instances, @@ -233,6 +327,7 @@ where ) } + /// Runs (in-circuit) the verifier side of the fold, computing the new folded LCCCS instance #[allow(clippy::type_complexity)] fn fold( lcccs: &[LCCCSVar], @@ -240,12 +335,14 @@ where sigmas_thetas: (Vec>>>, Vec>>>), r_x_prime: Vec>>, rho: FpVar>, - ) -> Result, SynthesisError> { + ) -> Result<(LCCCSVar, Vec>>>), SynthesisError> { let (sigmas, thetas) = (sigmas_thetas.0.clone(), sigmas_thetas.1.clone()); let mut u_folded: FpVar> = FpVar::zero(); let mut x_folded: Vec>> = vec![FpVar::zero(); lcccs[0].x.len()]; let mut v_folded: Vec>> = vec![FpVar::zero(); sigmas[0].len()]; + let mut rho_vec: Vec>>> = + vec![vec![Boolean::FALSE; N_BITS_RO]; lcccs.len() + cccs.len() - 1]; let mut rho_i = FpVar::one(); for i in 0..(lcccs.len() + cccs.len()) { let u: FpVar>; @@ -282,20 +379,34 @@ where .map(|(a_i, b_i)| a_i + b_i) .collect(); + // compute the next power of rho rho_i *= rho.clone(); + // crop the size of rho_i to N_BITS_RO + let rho_i_bits = rho_i.to_bits_le()?; + rho_i = Boolean::le_bits_to_fp_var(&rho_i_bits[..N_BITS_RO])?; + if i < lcccs.len() + cccs.len() - 1 { + // store the cropped rho_i into the rho_vec + rho_vec[i] = rho_i_bits[..N_BITS_RO].to_vec(); + } } - Ok(LCCCSVar:: { - C: lcccs[0].C.clone(), // WIP this will come from the cyclefold circuit - u: u_folded, - x: x_folded, - r_x: r_x_prime, - v: v_folded, - }) + // return the folded instance, together with the rho's powers vector so they can be used in + // other parts of the AugmentedFCircuit + Ok(( + LCCCSVar:: { + // C this is later overwritten by the U_{i+1}.C value checked by the cyclefold circuit + C: lcccs[0].C.clone(), + u: u_folded, + x: x_folded, + r_x: r_x_prime, + v: v_folded, + }, + rho_vec, + )) } } -/// computes c from the step 5 in section 5 of HyperNova, adapted to multiple LCCCS & CCCS +/// Computes c from the step 5 in section 5 of HyperNova, adapted to multiple LCCCS & CCCS /// instances: /// $$ /// c = \sum_{i \in [\mu]} \left(\sum_{j \in [t]} \gamma^{i \cdot t + j} \cdot e_i \cdot \sigma_{i,j} \right) @@ -345,28 +456,466 @@ fn compute_c_gadget( Ok(c) } +#[derive(Debug, Clone)] +pub struct AugmentedFCircuit< + C1: CurveGroup, + C2: CurveGroup, + GC2: CurveVar>, + FC: FCircuit>, +> where + for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, +{ + pub _c2: PhantomData, + pub _gc2: PhantomData, + pub poseidon_config: PoseidonConfig>, + pub ccs: CCS, // CCS of the AugmentedFCircuit + pub pp_hash: Option>, + pub mu: usize, // max number of LCCCS instances to be folded + pub nu: usize, // max number of CCCS instances to be folded + pub i: Option>, + pub i_usize: Option, + pub z_0: Option>, + pub z_i: Option>, + pub external_inputs: Option>, + pub U_i: Option>, + pub Us: Option>>, // other U_i's to be folded that are not the main running instance + pub u_i_C: Option, // u_i.C + pub us: Option>>, // other u_i's to be folded that are not the main incoming instance + pub U_i1_C: Option, // U_{i+1}.C + pub F: FC, // F circuit + pub x: Option>, // public input (u_{i+1}.x[0]) + pub nimfs_proof: Option>, + + // cyclefold verifier on C1 + pub cf_u_i_cmW: Option, // input, cf_u_i.cmW + pub cf_U_i: Option>, // input, RelaxedR1CS CycleFold instance + pub cf_x: Option>, // public input (cf_u_{i+1}.x[1]) + pub cf_cmT: Option, +} + +impl AugmentedFCircuit +where + C1: CurveGroup, + C2: CurveGroup, + GC2: CurveVar> + ToConstraintFieldGadget>, + FC: FCircuit>, + ::BaseField: PrimeField, + ::BaseField: PrimeField, + ::ScalarField: Absorb, + ::ScalarField: Absorb, + C1: CurveGroup, + for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, +{ + pub fn default( + poseidon_config: &PoseidonConfig>, + F_circuit: FC, + ccs: CCS, + mu: usize, + nu: usize, + ) -> Result { + if mu < 1 || nu < 1 { + return Err(Error::CantBeZero("mu,nu".to_string())); + } + Ok(Self { + _c2: PhantomData, + _gc2: PhantomData, + poseidon_config: poseidon_config.clone(), + ccs, + pp_hash: None, + mu, + nu, + i: None, + i_usize: None, + z_0: None, + z_i: None, + external_inputs: None, + U_i: None, + Us: None, + u_i_C: None, + us: None, + U_i1_C: None, + F: F_circuit, + x: None, + nimfs_proof: None, + cf_u_i_cmW: None, + cf_U_i: None, + cf_x: None, + cf_cmT: None, + }) + } + + pub fn empty( + poseidon_config: &PoseidonConfig>, + F: FC, // FCircuit + ccs: Option>, + mu: usize, + nu: usize, + ) -> Result { + let initial_ccs = CCS { + // m, n, s, s_prime and M will be overwritten by the `upper_bound_ccs' method + m: 0, + n: 0, + l: 2, // io_len + s: 1, + s_prime: 1, + t: 3, // note: this is only supports R1CS for the moment + q: 2, + d: 2, + S: vec![vec![0, 1], vec![2]], + c: vec![C1::ScalarField::one(), C1::ScalarField::one().neg()], + M: vec![], + }; + let mut augmented_f_circuit = Self::default(poseidon_config, F, initial_ccs, mu, nu)?; + if ccs.is_some() { + augmented_f_circuit.ccs = ccs.unwrap(); + } else { + augmented_f_circuit.ccs = augmented_f_circuit.upper_bound_ccs()?; + } + Ok(augmented_f_circuit) + } + + /// This method computes the CCS parameters. This is used because there is a circular + /// dependency between the AugmentedFCircuit CCS and the CCS parameters m & n & s & s'. + /// For a stable FCircuit circuit, the CCS parameters can be computed in advance and can be + /// feed in as parameter for the AugmentedFCircuit::empty method to avoid computing them there. + pub fn upper_bound_ccs(&self) -> Result, Error> { + let r1cs = get_r1cs_from_cs::>(self.clone()).unwrap(); + let mut ccs = CCS::from_r1cs(r1cs.clone()); + + let z_0 = vec![C1::ScalarField::zero(); self.F.state_len()]; + let mut W_i = Witness::::dummy(&ccs); + let mut U_i = LCCCS::::dummy(ccs.l, ccs.t, ccs.s); + let mut w_i = W_i.clone(); + let mut u_i = CCCS::::dummy(ccs.l); + + let n_iters = 2; + for _ in 0..n_iters { + let Us = vec![U_i.clone(); self.mu - 1]; + let Ws = vec![W_i.clone(); self.mu - 1]; + let us = vec![u_i.clone(); self.nu - 1]; + let ws = vec![w_i.clone(); self.nu - 1]; + + let all_Us = [vec![U_i.clone()], Us.clone()].concat(); + let all_us = [vec![u_i.clone()], us.clone()].concat(); + let all_Ws = [vec![W_i.clone()], Ws].concat(); + let all_ws = [vec![w_i.clone()], ws].concat(); + + let mut transcript_p: PoseidonSponge = + PoseidonSponge::::new(&self.poseidon_config.clone()); + // since this is only for the number of constraints, no need to absorb the pp_hash here + let (nimfs_proof, U_i1, _, _) = NIMFS::>::prove( + &mut transcript_p, + &ccs, + &all_Us, + &all_us, + &all_Ws, + &all_ws, + )?; + + let augmented_f_circuit = Self { + _c2: PhantomData, + _gc2: PhantomData, + poseidon_config: self.poseidon_config.clone(), + ccs: ccs.clone(), + pp_hash: Some(C1::ScalarField::zero()), + mu: self.mu, + nu: self.nu, + i: Some(C1::ScalarField::zero()), + i_usize: Some(0), + z_0: Some(z_0.clone()), + z_i: Some(z_0.clone()), + external_inputs: Some(vec![C1::ScalarField::zero(); self.F.external_inputs_len()]), + U_i: Some(U_i.clone()), + Us: Some(Us), + u_i_C: Some(u_i.C), + us: Some(us), + U_i1_C: Some(U_i1.C), + F: self.F.clone(), + x: Some(C1::ScalarField::zero()), + nimfs_proof: Some(nimfs_proof), + // cyclefold values + cf_u_i_cmW: None, + cf_U_i: None, + cf_x: None, + cf_cmT: None, + }; + + let cs: ConstraintSystem; + (cs, ccs) = augmented_f_circuit.compute_cs_ccs()?; + // prepare instances for next loop iteration + use crate::arith::r1cs::extract_w_x; + let (r1cs_w_i1, r1cs_x_i1) = extract_w_x::(&cs); + u_i = CCCS:: { + C: u_i.C, + x: r1cs_x_i1, + }; + w_i = Witness:: { + w: r1cs_w_i1.clone(), + r_w: C1::ScalarField::one(), + }; + W_i = Witness::::dummy(&ccs); + U_i = LCCCS::::dummy(ccs.l, ccs.t, ccs.s); + } + Ok(ccs) + + // Ok(augmented_f_circuit.compute_cs_ccs()?.1) + } + + /// Returns the cs (ConstraintSystem) and the CCS out of the AugmentedFCircuit + #[allow(clippy::type_complexity)] + pub fn compute_cs_ccs( + &self, + ) -> Result<(ConstraintSystem, CCS), Error> { + let cs = ConstraintSystem::::new_ref(); + self.clone().generate_constraints(cs.clone())?; + cs.finalize(); + let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; + let r1cs = extract_r1cs::(&cs); + let ccs = CCS::from_r1cs(r1cs.clone()); + + Ok((cs, ccs)) + } +} + +impl ConstraintSynthesizer> for AugmentedFCircuit +where + C1: CurveGroup, + C2: CurveGroup, + GC2: CurveVar> + ToConstraintFieldGadget>, + FC: FCircuit>, + ::BaseField: PrimeField, + ::BaseField: PrimeField, + ::ScalarField: Absorb, + ::ScalarField: Absorb, + C1: CurveGroup, + for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, +{ + fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { + let pp_hash = FpVar::>::new_witness(cs.clone(), || { + Ok(self.pp_hash.unwrap_or_else(CF1::::zero)) + })?; + let i = FpVar::>::new_witness(cs.clone(), || { + Ok(self.i.unwrap_or_else(CF1::::zero)) + })?; + let z_0 = Vec::>>::new_witness(cs.clone(), || { + Ok(self + .z_0 + .unwrap_or(vec![CF1::::zero(); self.F.state_len()])) + })?; + let z_i = Vec::>>::new_witness(cs.clone(), || { + Ok(self + .z_i + .unwrap_or(vec![CF1::::zero(); self.F.state_len()])) + })?; + let external_inputs = Vec::>>::new_witness(cs.clone(), || { + Ok(self + .external_inputs + .unwrap_or(vec![CF1::::zero(); self.F.external_inputs_len()])) + })?; + + let U_dummy = LCCCS::::dummy(self.ccs.l, self.ccs.t, self.ccs.s); + let u_dummy = CCCS::::dummy(self.ccs.l); + + let U_i = + LCCCSVar::::new_witness(cs.clone(), || Ok(self.U_i.unwrap_or(U_dummy.clone())))?; + let Us = Vec::>::new_witness(cs.clone(), || { + Ok(self.Us.unwrap_or(vec![U_dummy.clone(); self.mu - 1])) + })?; + let us = Vec::>::new_witness(cs.clone(), || { + Ok(self.us.unwrap_or(vec![u_dummy.clone(); self.mu - 1])) + })?; + let U_i1_C = NonNativeAffineVar::new_witness(cs.clone(), || { + Ok(self.U_i1_C.unwrap_or_else(C1::zero)) + })?; + let nimfs_proof_dummy = NIMFSProof::::dummy(&self.ccs, self.mu, self.nu); + let nimfs_proof = ProofVar::::new_witness(cs.clone(), || { + Ok(self.nimfs_proof.unwrap_or(nimfs_proof_dummy)) + })?; + + let cf_u_dummy = CommittedInstance::dummy(cf_io_len(self.mu + self.nu)); + let cf_U_i = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(self.cf_U_i.unwrap_or(cf_u_dummy.clone())) + })?; + let cf_cmT = GC2::new_witness(cs.clone(), || Ok(self.cf_cmT.unwrap_or_else(C2::zero)))?; + + let sponge = PoseidonSpongeVar::::new(cs.clone(), &self.poseidon_config); + + // get z_{i+1} from the F circuit + let i_usize = self.i_usize.unwrap_or(0); + let z_i1 = + self.F + .generate_step_constraints(cs.clone(), i_usize, z_i.clone(), external_inputs)?; + + let is_basecase = i.is_zero()?; + let is_not_basecase = is_basecase.not(); + + // Primary Part + // P.1. Compute u_i.x + // u_i.x[0] = H(i, z_0, z_i, U_i) + let (u_i_x, _) = U_i.clone().hash( + &sponge, + pp_hash.clone(), + i.clone(), + z_0.clone(), + z_i.clone(), + )?; + // u_i.x[1] = H(cf_U_i) + let (cf_u_i_x, cf_U_i_vec) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?; + + // P.2. Construct u_i + let u_i = CCCSVar:: { + // u_i.C is provided by the prover as witness + C: NonNativeAffineVar::::new_witness(cs.clone(), || { + Ok(self.u_i_C.unwrap_or(C1::zero())) + })?, + // u_i.x is computed in step 1 + x: vec![u_i_x, cf_u_i_x], + }; + + let all_Us = [vec![U_i.clone()], Us].concat(); + let all_us = [vec![u_i.clone()], us].concat(); + + // P.3. NIMFS.verify, obtains U_{i+1} by folding [U_i] & [u_i]. + // Notice that NIMFSGadget::fold_committed_instance does not fold C. We set `U_i1.C` to + // unconstrained witnesses `U_i1_C` respectively. Its correctness will be checked on the + // other curve. + let mut transcript = PoseidonSpongeVar::new(cs.clone(), &self.poseidon_config); + transcript.absorb(&pp_hash)?; + let (mut U_i1, rho_vec) = NIMFSGadget::::verify( + cs.clone(), + &self.ccs.clone(), + &mut transcript, + &all_Us, + &all_us, + nimfs_proof, + is_not_basecase.clone(), + )?; + U_i1.C = U_i1_C; + + // P.4.a compute and check the first output of F' + let (u_i1_x, _) = U_i1.clone().hash( + &sponge, + pp_hash.clone(), + i + FpVar::>::one(), + z_0.clone(), + z_i1.clone(), + )?; + let (u_i1_x_base, _) = LCCCSVar::new_constant(cs.clone(), U_dummy)?.hash( + &sponge, + pp_hash.clone(), + FpVar::>::one(), + z_0.clone(), + z_i1.clone(), + )?; + let x = FpVar::new_input(cs.clone(), || Ok(self.x.unwrap_or(u_i1_x_base.value()?)))?; + x.enforce_equal(&is_basecase.select(&u_i1_x_base, &u_i1_x)?)?; + + // convert rho_bits of the rho_vec to a `NonNativeFieldVar` + let rho_vec_nonnat = rho_vec + .iter() + .map(|rho_i| { + let mut bits = rho_i.clone(); + bits.resize(C1::BaseField::MODULUS_BIT_SIZE as usize, Boolean::FALSE); + NonNativeUintVar::from(&bits) + }) + .collect(); + + // CycleFold part + // C.1. Compute cf1_u_i.x and cf2_u_i.x + let cf_x: Vec>> = [ + rho_vec_nonnat, + all_Us + .iter() + .flat_map(|U| vec![U.C.x.clone(), U.C.y.clone()]) + .collect(), + all_us + .iter() + .flat_map(|u| vec![u.C.x.clone(), u.C.y.clone()]) + .collect(), + vec![U_i1.C.x, U_i1.C.y], + ] + .concat(); + + // ensure that cf_u has as public inputs the C from main instances U_i, u_i, U_i+1 + // coordinates of the commitments. + // C.2. Construct `cf_u_i` + let cf_u_i = CycleFoldCommittedInstanceVar:: { + // cf1_u_i.cmE = 0. Notice that we enforce cmE to be equal to 0 since it is allocated + // as 0. + cmE: GC2::zero(), + // cf1_u_i.u = 1 + u: NonNativeUintVar::new_constant(cs.clone(), C1::BaseField::one())?, + // cf_u_i.cmW is provided by the prover as witness + cmW: GC2::new_witness(cs.clone(), || Ok(self.cf_u_i_cmW.unwrap_or(C2::zero())))?, + // cf_u_i.x is computed in step 1 + x: cf_x, + }; + + // C.3. nifs.verify (fold_committed_instance), obtains cf_U_{i+1} by folding cf_u_i & cf_U_i. + // compute cf_r = H(cf_u_i, cf_U_i, cf_cmT) + // cf_r_bits is denoted by rho* in the paper. + let cf_r_bits = CycleFoldChallengeGadget::::get_challenge_gadget( + &mut transcript, + pp_hash.clone(), + cf_U_i_vec, + cf_u_i.clone(), + cf_cmT.clone(), + )?; + // Convert cf_r_bits to a `NonNativeFieldVar` + let cf_r_nonnat = { + let mut bits = cf_r_bits.clone(); + bits.resize(C1::BaseField::MODULUS_BIT_SIZE as usize, Boolean::FALSE); + NonNativeUintVar::from(&bits) + }; + // Fold cf1_u_i & cf_U_i into cf1_U_{i+1} + let cf_U_i1 = NIFSFullGadget::::fold_committed_instance( + cf_r_bits, + cf_r_nonnat, + cf_cmT, + cf_U_i, + cf_u_i, + )?; + + // Back to Primary Part + // P.4.b compute and check the second output of F' + // Base case: u_{i+1}.x[1] == H(cf_U_{\bot}) + // Non-base case: u_{i+1}.x[1] == H(cf_U_{i+1}) + let (cf_u_i1_x, _) = cf_U_i1.clone().hash(&sponge, pp_hash.clone())?; + let (cf_u_i1_x_base, _) = + CycleFoldCommittedInstanceVar::new_constant(cs.clone(), cf_u_dummy)? + .hash(&sponge, pp_hash)?; + let cf_x = FpVar::new_input(cs.clone(), || { + Ok(self.cf_x.unwrap_or(cf_u_i1_x_base.value()?)) + })?; + cf_x.enforce_equal(&is_basecase.select(&cf_u_i1_x_base, &cf_u_i1_x)?)?; + + Ok(()) + } +} + #[cfg(test)] mod tests { - use ark_pallas::{Fr, Projective}; - use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar}; - use ark_relations::r1cs::ConstraintSystem; + use ark_bn254::{constraints::GVar, Fq, Fr, G1Projective as Projective}; + use ark_ff::BigInteger; + use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; use ark_std::{test_rng, UniformRand}; + use std::time::Instant; use super::*; use crate::{ - ccs::{ - tests::{get_test_ccs, get_test_z}, - CCS, + arith::{ + ccs::tests::{get_test_ccs, get_test_z}, + r1cs::extract_w_x, }, commitment::{pedersen::Pedersen, CommitmentScheme}, - folding::hypernova::{ - nimfs::NIMFS, - utils::{compute_c, compute_sigmas_thetas}, - }, - transcript::{ - poseidon::{poseidon_canonical_config, PoseidonTranscript, PoseidonTranscriptVar}, - Transcript, + folding::{ + circuits::cyclefold::{fold_cyclefold_circuit, CycleFoldCircuit}, + hypernova::utils::{compute_c, compute_sigmas_thetas}, + nova::{traits::NovaR1CS, Witness as NovaWitness}, }, + frontend::tests::CubicFCircuit, + transcript::poseidon::poseidon_canonical_config, + utils::get_cm_coordinates, }; #[test] @@ -399,13 +948,17 @@ mod tests { // Create the LCCCS instances out of z_lcccs let mut lcccs_instances = Vec::new(); for z_i in z_lcccs.iter() { - let (inst, _) = ccs.to_lcccs(&mut rng, &pedersen_params, z_i).unwrap(); + let (inst, _) = ccs + .to_lcccs::<_, _, Pedersen>(&mut rng, &pedersen_params, z_i) + .unwrap(); lcccs_instances.push(inst); } // Create the CCCS instance out of z_cccs let mut cccs_instances = Vec::new(); for z_i in z_cccs.iter() { - let (inst, _) = ccs.to_cccs(&mut rng, &pedersen_params, z_i).unwrap(); + let (inst, _) = ccs + .to_cccs::<_, _, Pedersen>(&mut rng, &pedersen_params, z_i) + .unwrap(); cccs_instances.push(inst); } @@ -491,7 +1044,9 @@ mod tests { let mut lcccs_instances = Vec::new(); let mut w_lcccs = Vec::new(); for z_i in z_lcccs.iter() { - let (running_instance, w) = ccs.to_lcccs(&mut rng, &pedersen_params, z_i).unwrap(); + let (running_instance, w) = ccs + .to_lcccs::<_, _, Pedersen>(&mut rng, &pedersen_params, z_i) + .unwrap(); lcccs_instances.push(running_instance); w_lcccs.push(w); } @@ -499,19 +1054,20 @@ mod tests { let mut cccs_instances = Vec::new(); let mut w_cccs = Vec::new(); for z_i in z_cccs.iter() { - let (new_instance, w) = ccs.to_cccs(&mut rng, &pedersen_params, z_i).unwrap(); + let (new_instance, w) = ccs + .to_cccs::<_, _, Pedersen>(&mut rng, &pedersen_params, z_i) + .unwrap(); cccs_instances.push(new_instance); w_cccs.push(w); } // Prover's transcript let poseidon_config = poseidon_canonical_config::(); - let mut transcript_p: PoseidonTranscript = - PoseidonTranscript::::new(&poseidon_config); + let mut transcript_p: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); // Run the prover side of the multifolding - let (proof, folded_lcccs, folded_witness) = - NIMFS::>::prove( + let (proof, folded_lcccs, folded_witness, _) = + NIMFS::>::prove( &mut transcript_p, &ccs, &lcccs_instances, @@ -522,11 +1078,10 @@ mod tests { .unwrap(); // Verifier's transcript - let mut transcript_v: PoseidonTranscript = - PoseidonTranscript::::new(&poseidon_config); + let mut transcript_v: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); // Run the verifier side of the multifolding - let folded_lcccs_v = NIMFS::>::verify( + let folded_lcccs_v = NIMFS::>::verify( &mut transcript_v, &ccs, &lcccs_instances, @@ -537,9 +1092,7 @@ mod tests { assert_eq!(folded_lcccs, folded_lcccs_v); // Check that the folded LCCCS instance is a valid instance with respect to the folded witness - folded_lcccs - .check_relation(&pedersen_params, &ccs, &folded_witness) - .unwrap(); + folded_lcccs.check_relation(&ccs, &folded_witness).unwrap(); // allocate circuit inputs let cs = ConstraintSystem::::new_ref(); @@ -551,18 +1104,375 @@ mod tests { .unwrap(); let proofVar = ProofVar::::new_witness(cs.clone(), || Ok(proof.clone())).unwrap(); - let transcriptVar = PoseidonTranscriptVar::::new(cs.clone(), &poseidon_config); + let mut transcriptVar = PoseidonSpongeVar::::new(cs.clone(), &poseidon_config); - let folded_lcccsVar = NIMFSGadget::::verify( + let enabled = Boolean::::new_witness(cs.clone(), || Ok(true)).unwrap(); + let (folded_lcccsVar, _) = NIMFSGadget::::verify( cs.clone(), &ccs, - transcriptVar, + &mut transcriptVar, &lcccs_instancesVar, &cccs_instancesVar, proofVar, + enabled, ) .unwrap(); assert!(cs.is_satisfied().unwrap()); assert_eq!(folded_lcccsVar.u.value().unwrap(), folded_lcccs.u); } + + /// test that checks the native LCCCS.hash vs the R1CS constraints version + #[test] + pub fn test_lcccs_hash() { + let mut rng = test_rng(); + let poseidon_config = poseidon_canonical_config::(); + let sponge = PoseidonSponge::::new(&poseidon_config); + + let ccs = get_test_ccs(); + let z1 = get_test_z::(3); + + let (pedersen_params, _) = + Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let pp_hash = Fr::from(42u32); // only for test + + let i = Fr::from(3_u32); + let z_0 = vec![Fr::from(3_u32)]; + let z_i = vec![Fr::from(3_u32)]; + let (lcccs, _) = ccs + .to_lcccs::<_, _, Pedersen>(&mut rng, &pedersen_params, &z1) + .unwrap(); + let h = lcccs + .clone() + .hash(&sponge, pp_hash, i, z_0.clone(), z_i.clone()); + + let cs = ConstraintSystem::::new_ref(); + + let spongeVar = PoseidonSpongeVar::::new(cs.clone(), &poseidon_config); + let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash)).unwrap(); + let iVar = FpVar::::new_witness(cs.clone(), || Ok(i)).unwrap(); + let z_0Var = Vec::>::new_witness(cs.clone(), || Ok(z_0.clone())).unwrap(); + let z_iVar = Vec::>::new_witness(cs.clone(), || Ok(z_i.clone())).unwrap(); + let lcccsVar = LCCCSVar::::new_witness(cs.clone(), || Ok(lcccs)).unwrap(); + let (hVar, _) = lcccsVar + .clone() + .hash( + &spongeVar, + pp_hashVar, + iVar.clone(), + z_0Var.clone(), + z_iVar.clone(), + ) + .unwrap(); + assert!(cs.is_satisfied().unwrap()); + + // check that the natively computed and in-circuit computed hashes match + assert_eq!(hVar.value().unwrap(), h); + } + + #[test] + pub fn test_augmented_f_circuit() { + let mut rng = test_rng(); + let poseidon_config = poseidon_canonical_config::(); + let sponge = PoseidonSponge::::new(&poseidon_config); + + let mu = 3; + let nu = 3; + + let start = Instant::now(); + let F_circuit = CubicFCircuit::::new(()).unwrap(); + let mut augmented_f_circuit = AugmentedFCircuit::< + Projective, + Projective2, + GVar2, + CubicFCircuit, + >::empty(&poseidon_config, F_circuit, None, mu, nu) + .unwrap(); + let ccs = augmented_f_circuit.ccs.clone(); + println!("AugmentedFCircuit & CCS generation: {:?}", start.elapsed()); + println!("CCS m x n: {} x {}", ccs.m, ccs.n); + + // CycleFold circuit + let cs2 = ConstraintSystem::::new_ref(); + let cf_circuit = CycleFoldCircuit::::empty(mu + nu); + cf_circuit.generate_constraints(cs2.clone()).unwrap(); + cs2.finalize(); + let cs2 = cs2 + .into_inner() + .ok_or(Error::NoInnerConstraintSystem) + .unwrap(); + let cf_r1cs = extract_r1cs::(&cs2); + println!("CF m x n: {} x {}", cf_r1cs.A.n_rows, cf_r1cs.A.n_cols); + + let (pedersen_params, _) = + Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + let (cf_pedersen_params, _) = + Pedersen::::setup(&mut rng, cf_r1cs.A.n_cols - cf_r1cs.l - 1).unwrap(); + + // public params hash + let pp_hash = Fr::from(42u32); // only for test + + // first step + let z_0 = vec![Fr::from(3_u32)]; + let mut z_i = z_0.clone(); + + // prepare the dummy instances + let W_dummy = Witness::::dummy(&ccs); + let U_dummy = LCCCS::::dummy(ccs.l, ccs.t, ccs.s); + let w_dummy = W_dummy.clone(); + let u_dummy = CCCS::::dummy(ccs.l); + let (cf_W_dummy, cf_U_dummy): (NovaWitness, CommittedInstance) = + cf_r1cs.dummy_instance(); + + // set the initial dummy instances + let mut W_i = W_dummy.clone(); + let mut U_i = U_dummy.clone(); + let mut w_i = w_dummy.clone(); + let mut u_i = u_dummy.clone(); + let mut cf_W_i = cf_W_dummy.clone(); + let mut cf_U_i = cf_U_dummy.clone(); + u_i.x = vec![ + U_i.hash(&sponge, pp_hash, Fr::zero(), z_0.clone(), z_i.clone()), + cf_U_i.hash_cyclefold(&sponge, pp_hash), + ]; + + let n_steps: usize = 4; + let mut iFr = Fr::zero(); + for i in 0..n_steps { + let start = Instant::now(); + + // for this test, let Us & us be just an array of copies of the U_i & u_i respectively + let Us = vec![U_i.clone(); mu - 1]; + let Ws = vec![W_i.clone(); mu - 1]; + let us = vec![u_i.clone(); nu - 1]; + let ws = vec![w_i.clone(); nu - 1]; + let all_Us = [vec![U_i.clone()], Us.clone()].concat(); + let all_us = [vec![u_i.clone()], us.clone()].concat(); + let all_Ws = [vec![W_i.clone()], Ws].concat(); + let all_ws = [vec![w_i.clone()], ws].concat(); + + let z_i1 = F_circuit.step_native(i, z_i.clone(), vec![]).unwrap(); + + let (U_i1, W_i1); + + if i == 0 { + W_i1 = Witness::::dummy(&ccs); + U_i1 = LCCCS::dummy(ccs.l, ccs.t, ccs.s); + + let u_i1_x = U_i1.hash(&sponge, pp_hash, Fr::one(), z_0.clone(), z_i1.clone()); + + // hash the initial (dummy) CycleFold instance, which is used as the 2nd public + // input in the AugmentedFCircuit + let cf_u_i1_x = cf_U_i.hash_cyclefold(&sponge, pp_hash); + + augmented_f_circuit = + AugmentedFCircuit::> { + _c2: PhantomData, + _gc2: PhantomData, + poseidon_config: poseidon_config.clone(), + ccs: ccs.clone(), + pp_hash: Some(pp_hash), + mu, + nu, + i: Some(Fr::zero()), + i_usize: Some(0), + z_0: Some(z_0.clone()), + z_i: Some(z_i.clone()), + external_inputs: Some(vec![]), + U_i: Some(U_i.clone()), + Us: Some(Us.clone()), + u_i_C: Some(u_i.C), + us: Some(us.clone()), + U_i1_C: Some(U_i1.C), + F: F_circuit, + x: Some(u_i1_x), + nimfs_proof: None, + + // cyclefold values + cf_u_i_cmW: None, + cf_U_i: None, + cf_x: Some(cf_u_i1_x), + cf_cmT: None, + }; + } else { + let mut transcript_p: PoseidonSponge = + PoseidonSponge::::new(&poseidon_config.clone()); + transcript_p.absorb(&pp_hash); + let (rho_powers, nimfs_proof); + (nimfs_proof, U_i1, W_i1, rho_powers) = + NIMFS::>::prove( + &mut transcript_p, + &ccs, + &all_Us, + &all_us, + &all_Ws, + &all_ws, + ) + .unwrap(); + + // sanity check: check the folded instance relation + U_i1.check_relation(&ccs, &W_i1).unwrap(); + + let u_i1_x = + U_i1.hash(&sponge, pp_hash, iFr + Fr::one(), z_0.clone(), z_i1.clone()); + + let rho_powers_Fq: Vec = rho_powers + .iter() + .map(|rho_i| { + Fq::from_bigint(BigInteger::from_bits_le(&rho_i.into_bigint().to_bits_le())) + .unwrap() + }) + .collect(); + let rho_powers_bits: Vec> = rho_powers + .iter() + .map(|rho_i| rho_i.into_bigint().to_bits_le()[..N_BITS_RO].to_vec()) + .collect(); + + // CycleFold part: + // get the vector used as public inputs 'x' in the CycleFold circuit + let cf_u_i_x = [ + // all values for multiple instances + rho_powers_Fq, + get_cm_coordinates(&U_i.C), + Us.iter() + .flat_map(|Us_i| get_cm_coordinates(&Us_i.C)) + .collect(), + get_cm_coordinates(&u_i.C), + us.iter() + .flat_map(|us_i| get_cm_coordinates(&us_i.C)) + .collect(), + get_cm_coordinates(&U_i1.C), + ] + .concat(); + + let cf_circuit = CycleFoldCircuit:: { + _gc: PhantomData, + n_points: mu + nu, + r_bits: Some(rho_powers_bits.clone()), + points: Some( + [ + vec![U_i.clone().C], + Us.iter().map(|Us_i| Us_i.C).collect(), + vec![u_i.clone().C], + us.iter().map(|us_i| us_i.C).collect(), + ] + .concat(), + ), + x: Some(cf_u_i_x.clone()), + }; + + // ensure that the CycleFoldCircuit is well defined + assert_eq!( + cf_circuit.r_bits.clone().unwrap().len(), + cf_circuit.n_points - 1 + ); + assert_eq!( + cf_circuit.points.clone().unwrap().len(), + cf_circuit.n_points + ); + + let (_cf_w_i, cf_u_i, cf_W_i1, cf_U_i1, cf_cmT, _) = fold_cyclefold_circuit::< + Projective, + GVar, + Projective2, + GVar2, + CubicFCircuit, + Pedersen, + Pedersen, + >( + mu + nu, + &mut transcript_p, + cf_r1cs.clone(), + cf_pedersen_params.clone(), + pp_hash, + cf_W_i.clone(), // CycleFold running instance witness + cf_U_i.clone(), // CycleFold running instance + cf_u_i_x, // CycleFold incoming instance + cf_circuit, + ) + .unwrap(); + + // hash the CycleFold folded instance, which is used as the 2nd public input in the + // AugmentedFCircuit + let cf_u_i1_x = cf_U_i1.hash_cyclefold(&sponge, pp_hash); + + augmented_f_circuit = + AugmentedFCircuit::> { + _c2: PhantomData, + _gc2: PhantomData, + poseidon_config: poseidon_config.clone(), + ccs: ccs.clone(), + pp_hash: Some(pp_hash), + mu, + nu, + i: Some(iFr), + i_usize: Some(i), + z_0: Some(z_0.clone()), + z_i: Some(z_i.clone()), + external_inputs: Some(vec![]), + U_i: Some(U_i.clone()), + Us: Some(Us.clone()), + u_i_C: Some(u_i.C), + us: Some(us.clone()), + U_i1_C: Some(U_i1.C), + F: F_circuit, + x: Some(u_i1_x), + nimfs_proof: Some(nimfs_proof), + + // cyclefold values + cf_u_i_cmW: Some(cf_u_i.cmW), + cf_U_i: Some(cf_U_i), + cf_x: Some(cf_u_i1_x), + cf_cmT: Some(cf_cmT), + }; + + // assign the next round instances + cf_W_i = cf_W_i1; + cf_U_i = cf_U_i1; + } + + let (cs, _) = augmented_f_circuit.compute_cs_ccs().unwrap(); + assert!(cs.is_satisfied().unwrap()); + + let (r1cs_w_i1, r1cs_x_i1) = extract_w_x::(&cs); // includes 1 and public inputs + assert_eq!(r1cs_x_i1[0], augmented_f_circuit.x.unwrap()); + let r1cs_z = [vec![Fr::one()], r1cs_x_i1.clone(), r1cs_w_i1.clone()].concat(); + // compute committed instances, w_{i+1}, u_{i+1}, which will be used as w_i, u_i, so we + // assign them directly to w_i, u_i. + (u_i, w_i) = ccs + .to_cccs::<_, _, Pedersen>(&mut rng, &pedersen_params, &r1cs_z) + .unwrap(); + u_i.check_relation(&ccs, &w_i).unwrap(); + + // sanity checks + assert_eq!(w_i.w, r1cs_w_i1); + assert_eq!(u_i.x, r1cs_x_i1); + assert_eq!(u_i.x[0], augmented_f_circuit.x.unwrap()); + assert_eq!(u_i.x[1], augmented_f_circuit.cf_x.unwrap()); + let expected_u_i1_x = + U_i1.hash(&sponge, pp_hash, iFr + Fr::one(), z_0.clone(), z_i1.clone()); + let expected_cf_U_i1_x = cf_U_i.hash_cyclefold(&sponge, pp_hash); + // u_i is already u_i1 at this point, check that has the expected value at x[0] + assert_eq!(u_i.x[0], expected_u_i1_x); + assert_eq!(u_i.x[1], expected_cf_U_i1_x); + + // set values for next iteration + iFr += Fr::one(); + // assign z_{i+1} into z_i + z_i = z_i1.clone(); + U_i = U_i1.clone(); + W_i = W_i1.clone(); + + // check the new LCCCS instance relation + U_i.check_relation(&ccs, &W_i).unwrap(); + // check the new CCCS instance relation + u_i.check_relation(&ccs, &w_i).unwrap(); + + // check the CycleFold instance relation + cf_r1cs + .check_relaxed_instance_relation(&cf_W_i, &cf_U_i) + .unwrap(); + + println!("augmented_f_circuit step {}: {:?}", i, start.elapsed()); + } + } } diff --git a/folding-schemes/src/folding/hypernova/lcccs.rs b/folding-schemes/src/folding/hypernova/lcccs.rs index 52c0527f..801ec4ed 100644 --- a/folding-schemes/src/folding/hypernova/lcccs.rs +++ b/folding-schemes/src/folding/hypernova/lcccs.rs @@ -1,16 +1,15 @@ -use ark_ec::CurveGroup; +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::{CurveGroup, Group}; use ark_ff::PrimeField; use ark_poly::DenseMultilinearExtension; use ark_poly::MultilinearExtension; - use ark_std::rand::Rng; +use ark_std::Zero; -use super::cccs::Witness; -use crate::ccs::CCS; -use crate::commitment::{ - pedersen::{Params as PedersenParams, Pedersen}, - CommitmentScheme, -}; +use super::Witness; +use crate::arith::ccs::CCS; +use crate::commitment::CommitmentScheme; +use crate::transcript::{AbsorbNonNative, Transcript}; use crate::utils::mle::dense_vec_to_dense_mle; use crate::utils::vec::mat_vec_mul; use crate::Error; @@ -31,10 +30,10 @@ pub struct LCCCS { } impl CCS { - pub fn to_lcccs( + pub fn to_lcccs>( &self, rng: &mut R, - pedersen_params: &PedersenParams, + cs_params: &CS::ProverParams, z: &[C::ScalarField], ) -> Result<(LCCCS, Witness), Error> where @@ -42,8 +41,13 @@ impl CCS { C: CurveGroup, { let w: Vec = z[(1 + self.l)..].to_vec(); - let r_w = C::ScalarField::rand(rng); - let C = Pedersen::::commit(pedersen_params, &w, &r_w)?; + // if the commitment scheme is set to be hiding, set the random blinding parameter + let r_w = if CS::is_hiding() { + C::ScalarField::rand(rng) + } else { + C::ScalarField::zero() + }; + let C = CS::commit(cs_params, &w, &r_w)?; let r_x: Vec = (0..self.s).map(|_| C::ScalarField::rand(rng)).collect(); @@ -73,19 +77,26 @@ impl CCS { } impl LCCCS { - /// Perform the check of the LCCCS instance described at section 4.2 + pub fn dummy(l: usize, t: usize, s: usize) -> LCCCS + where + C::ScalarField: PrimeField, + { + LCCCS:: { + C: C::zero(), + u: C::ScalarField::zero(), + x: vec![C::ScalarField::zero(); l], + r_x: vec![C::ScalarField::zero(); s], + v: vec![C::ScalarField::zero(); t], + } + } + + /// Perform the check of the LCCCS instance described at section 4.2, + /// notice that this method does not check the commitment correctness pub fn check_relation( &self, - pedersen_params: &PedersenParams, ccs: &CCS, w: &Witness, ) -> Result<(), Error> { - // check that C is the commitment of w. Notice that this is not verifying a Pedersen - // opening, but checking that the Commitment comes from committing to the witness. - if self.C != Pedersen::::commit(pedersen_params, &w.w, &w.r_w)? { - return Err(Error::NotSatisfied); - } - // check CCS relation let z: Vec = [vec![self.u], self.x.clone(), w.w.to_vec()].concat(); @@ -104,20 +115,70 @@ impl LCCCS { } } +impl Absorb for LCCCS +where + C::ScalarField: Absorb, +{ + fn to_sponge_bytes(&self, _dest: &mut Vec) { + // This is never called + unimplemented!() + } + + fn to_sponge_field_elements(&self, dest: &mut Vec) { + // We cannot call `to_native_sponge_field_elements(dest)` directly, as + // `to_native_sponge_field_elements` needs `F` to be `C::ScalarField`, + // but here `F` is a generic `PrimeField`. + self.C + .to_native_sponge_field_elements_as_vec() + .to_sponge_field_elements(dest); + self.u.to_sponge_field_elements(dest); + self.x.to_sponge_field_elements(dest); + self.r_x.to_sponge_field_elements(dest); + self.v.to_sponge_field_elements(dest); + } +} + +impl LCCCS +where + ::ScalarField: Absorb, + ::BaseField: ark_ff::PrimeField, +{ + /// [`LCCCS`].hash implements the committed instance hash compatible with the gadget + /// implemented in nova/circuits.rs::CommittedInstanceVar.hash. + /// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U_i` is the LCCCS. + pub fn hash>( + &self, + sponge: &T, + pp_hash: C::ScalarField, + i: C::ScalarField, + z_0: Vec, + z_i: Vec, + ) -> C::ScalarField { + let mut sponge = sponge.clone(); + sponge.absorb(&pp_hash); + sponge.absorb(&i); + sponge.absorb(&z_0); + sponge.absorb(&z_i); + sponge.absorb(&self); + sponge.squeeze_field_elements(1)[0] + } +} + #[cfg(test)] pub mod tests { use ark_pallas::{Fr, Projective}; use ark_std::test_rng; use ark_std::One; use ark_std::UniformRand; - use ark_std::Zero; use std::sync::Arc; use super::*; - use crate::ccs::{ + use crate::arith::{ + ccs::tests::{get_test_ccs, get_test_z}, r1cs::R1CS, - tests::{get_test_ccs, get_test_z}, + Arith, }; + use crate::commitment::pedersen::Pedersen; use crate::utils::hypercube::BooleanHypercube; use crate::utils::virtual_polynomial::{build_eq_x_r_vec, VirtualPolynomial}; @@ -152,14 +213,16 @@ pub mod tests { let n_rows = 2_u32.pow(5) as usize; let n_cols = 2_u32.pow(5) as usize; - let r1cs = R1CS::rand(&mut rng, n_rows, n_cols); + let r1cs = R1CS::::rand(&mut rng, n_rows, n_cols); let ccs = CCS::from_r1cs(r1cs); let z: Vec = (0..n_cols).map(|_| Fr::rand(&mut rng)).collect(); let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); - let (lcccs, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z).unwrap(); + let (lcccs, _) = ccs + .to_lcccs::<_, Projective, Pedersen>(&mut rng, &pedersen_params, &z) + .unwrap(); // with our test vector coming from R1CS, v should have length 3 assert_eq!(lcccs.v.len(), 3); @@ -191,7 +254,9 @@ pub mod tests { let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); // Compute v_j with the right z - let (lcccs, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z).unwrap(); + let (lcccs, _) = ccs + .to_lcccs::<_, Projective, Pedersen>(&mut rng, &pedersen_params, &z) + .unwrap(); // with our test vector coming from R1CS, v should have length 3 assert_eq!(lcccs.v.len(), 3); diff --git a/folding-schemes/src/folding/hypernova/mod.rs b/folding-schemes/src/folding/hypernova/mod.rs index 3f7172e6..5a156559 100644 --- a/folding-schemes/src/folding/hypernova/mod.rs +++ b/folding-schemes/src/folding/hypernova/mod.rs @@ -1,6 +1,946 @@ /// Implements the scheme described in [HyperNova](https://eprint.iacr.org/2023/573.pdf) +use ark_crypto_primitives::sponge::{ + poseidon::{PoseidonConfig, PoseidonSponge}, + Absorb, CryptographicSponge, +}; +use ark_ec::{CurveGroup, Group}; +use ark_ff::{BigInteger, PrimeField}; +use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget}; +use ark_std::rand::RngCore; +use ark_std::{One, Zero}; +use core::marker::PhantomData; +use std::fmt::Debug; + pub mod cccs; pub mod circuits; +use circuits::AugmentedFCircuit; pub mod lcccs; pub mod nimfs; pub mod utils; +use cccs::CCCS; +use lcccs::LCCCS; +use nimfs::NIMFS; + +use crate::commitment::CommitmentScheme; +use crate::constants::N_BITS_RO; +use crate::folding::circuits::{ + cyclefold::{fold_cyclefold_circuit, CycleFoldCircuit}, + CF2, +}; +use crate::folding::nova::{ + get_r1cs_from_cs, traits::NovaR1CS, CommittedInstance, PreprocessorParam, + Witness as NovaWitness, +}; +use crate::frontend::FCircuit; +use crate::utils::{get_cm_coordinates, pp_hash}; +use crate::Error; +use crate::{ + arith::{ + ccs::CCS, + r1cs::{extract_w_x, R1CS}, + }, + FoldingScheme, MultiFolding, +}; + +/// Witness for the LCCCS & CCCS, containing the w vector, and the r_w used as randomness in the Pedersen commitment. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct Witness { + pub w: Vec, + pub r_w: F, +} + +impl Witness { + pub fn new(w: Vec) -> Self { + // note: at the current version, we don't use the blinding factors and we set them to 0 + // always. + Self { w, r_w: F::zero() } + } + pub fn dummy(ccs: &CCS) -> Self { + Witness::::new(vec![F::zero(); ccs.n - ccs.l - 1]) + } +} + +#[derive(Debug, Clone)] +pub struct ProverParams +where + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ + pub poseidon_config: PoseidonConfig, + pub cs_params: CS1::ProverParams, + pub cf_cs_params: CS2::ProverParams, + // if ccs is set, it will be used, if not, it will be computed at runtime + pub ccs: Option>, + pub mu: usize, + pub nu: usize, +} + +#[derive(Debug, Clone)] +pub struct VerifierParams< + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +> { + pub poseidon_config: PoseidonConfig, + pub ccs: CCS, + pub cf_r1cs: R1CS, + pub cs_vp: CS1::VerifierParams, + pub cf_cs_vp: CS2::VerifierParams, +} + +impl VerifierParams +where + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ + /// returns the hash of the public parameters of HyperNova + pub fn pp_hash(&self) -> Result { + pp_hash::( + &self.ccs, + &self.cf_r1cs, + &self.cs_vp, + &self.cf_cs_vp, + &self.poseidon_config, + ) + } +} + +/// Implements HyperNova+CycleFold's IVC, described in +/// [HyperNova](https://eprint.iacr.org/2023/573.pdf) and +/// [CycleFold](https://eprint.iacr.org/2023/1192.pdf), following the FoldingScheme trait +#[derive(Clone, Debug)] +pub struct HyperNova +where + C1: CurveGroup, + GC1: CurveVar> + ToConstraintFieldGadget>, + C2: CurveGroup, + GC2: CurveVar>, + FC: FCircuit, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ + _gc1: PhantomData, + _c2: PhantomData, + _gc2: PhantomData, + + /// CCS of the Augmented Function circuit + pub ccs: CCS, + /// R1CS of the CycleFold circuit + pub cf_r1cs: R1CS, + pub poseidon_config: PoseidonConfig, + /// CommitmentScheme::ProverParams over C1 + pub cs_params: CS1::ProverParams, + /// CycleFold CommitmentScheme::ProverParams, over C2 + pub cf_cs_params: CS2::ProverParams, + /// F circuit, the circuit that is being folded + pub F: FC, + /// public params hash + pub pp_hash: C1::ScalarField, + pub mu: usize, // number of LCCCS instances to be folded + pub nu: usize, // number of CCCS instances to be folded + pub i: C1::ScalarField, + /// initial state + pub z_0: Vec, + /// current i-th state + pub z_i: Vec, + /// HyperNova instances + pub W_i: Witness, + pub U_i: LCCCS, + pub w_i: Witness, + pub u_i: CCCS, + + /// CycleFold running instance + pub cf_W_i: NovaWitness, + pub cf_U_i: CommittedInstance, +} + +impl MultiFolding + for HyperNova +where + C1: CurveGroup, + GC1: CurveVar> + ToConstraintFieldGadget>, + C2: CurveGroup, + GC2: CurveVar> + ToConstraintFieldGadget>, + FC: FCircuit, + CS1: CommitmentScheme, + CS2: CommitmentScheme, + ::BaseField: PrimeField, + ::BaseField: PrimeField, + ::ScalarField: Absorb, + ::ScalarField: Absorb, + C1: CurveGroup, + for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>, + for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, +{ + type RunningInstance = (LCCCS, Witness); + type IncomingInstance = (CCCS, Witness); + type MultiInstance = (Vec, Vec); + + /// Creates a new LCCS instance for the given state, which satisfies the HyperNova.CCS. This + /// method can be used to generate the 'other' LCCS instances to be folded in the multi-folding + /// step. + fn new_running_instance( + &self, + mut rng: impl RngCore, + state: Vec, + external_inputs: Vec, + ) -> Result { + let r1cs_z = self.new_instance_generic(state, external_inputs)?; + // compute committed instances, w_{i+1}, u_{i+1}, which will be used as w_i, u_i, so we + // assign them directly to w_i, u_i. + let (U_i, W_i) = self + .ccs + .to_lcccs::<_, _, CS1>(&mut rng, &self.cs_params, &r1cs_z)?; + + #[cfg(test)] + U_i.check_relation(&self.ccs, &W_i)?; + + Ok((U_i, W_i)) + } + + /// Creates a new CCCS instance for the given state, which satisfies the HyperNova.CCS. This + /// method can be used to generate the 'other' CCCS instances to be folded in the multi-folding + /// step. + fn new_incoming_instance( + &self, + mut rng: impl RngCore, + state: Vec, + external_inputs: Vec, + ) -> Result { + let r1cs_z = self.new_instance_generic(state, external_inputs)?; + // compute committed instances, w_{i+1}, u_{i+1}, which will be used as w_i, u_i, so we + // assign them directly to w_i, u_i. + let (u_i, w_i) = self + .ccs + .to_cccs::<_, _, CS1>(&mut rng, &self.cs_params, &r1cs_z)?; + + #[cfg(test)] + u_i.check_relation(&self.ccs, &w_i)?; + + Ok((u_i, w_i)) + } +} + +impl HyperNova +where + C1: CurveGroup, + GC1: CurveVar> + ToConstraintFieldGadget>, + C2: CurveGroup, + GC2: CurveVar> + ToConstraintFieldGadget>, + FC: FCircuit, + CS1: CommitmentScheme, + CS2: CommitmentScheme, + ::BaseField: PrimeField, + ::BaseField: PrimeField, + ::ScalarField: Absorb, + ::ScalarField: Absorb, + C1: CurveGroup, + for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>, + for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, +{ + /// internal helper for new_running_instance & new_incoming_instance methods, returns the R1CS + /// z=[u,x,w] vector to be used to create the LCCCS & CCCS fresh instances. + fn new_instance_generic( + &self, + state: Vec, + external_inputs: Vec, + ) -> Result, Error> { + // prepare the initial dummy instances + let U_i = LCCCS::::dummy(self.ccs.l, self.ccs.t, self.ccs.s); + let mut u_i = CCCS::::dummy(self.ccs.l); + let (_, cf_U_i): (NovaWitness, CommittedInstance) = self.cf_r1cs.dummy_instance(); + + let sponge = PoseidonSponge::::new(&self.poseidon_config); + + u_i.x = vec![ + U_i.hash( + &sponge, + self.pp_hash, + C1::ScalarField::zero(), // i + self.z_0.clone(), + state.clone(), + ), + cf_U_i.hash_cyclefold(&sponge, self.pp_hash), + ]; + let us = vec![u_i.clone(); self.nu - 1]; + + let z_i1 = self + .F + .step_native(0, state.clone(), external_inputs.clone())?; + + // compute u_{i+1}.x + let U_i1 = LCCCS::dummy(self.ccs.l, self.ccs.t, self.ccs.s); + let u_i1_x = U_i1.hash( + &sponge, + self.pp_hash, + C1::ScalarField::one(), // i+1, where i=0 + self.z_0.clone(), + z_i1.clone(), + ); + + let cf_u_i1_x = cf_U_i.hash_cyclefold(&sponge, self.pp_hash); + let augmented_f_circuit = AugmentedFCircuit:: { + _c2: PhantomData, + _gc2: PhantomData, + poseidon_config: self.poseidon_config.clone(), + ccs: self.ccs.clone(), + pp_hash: Some(self.pp_hash), + mu: self.mu, + nu: self.nu, + i: Some(C1::ScalarField::zero()), + i_usize: Some(0), + z_0: Some(self.z_0.clone()), + z_i: Some(state.clone()), + external_inputs: Some(external_inputs), + U_i: Some(U_i.clone()), + Us: None, + u_i_C: Some(u_i.C), + us: Some(us), + U_i1_C: Some(U_i1.C), + F: self.F.clone(), + x: Some(u_i1_x), + nimfs_proof: None, + + // cyclefold values + cf_u_i_cmW: None, + cf_U_i: None, + cf_x: Some(cf_u_i1_x), + cf_cmT: None, + }; + + let (cs, _) = augmented_f_circuit.compute_cs_ccs()?; + + #[cfg(test)] + assert!(cs.is_satisfied()?); + + let (r1cs_w_i1, r1cs_x_i1) = extract_w_x::(&cs); // includes 1 and public inputs + + #[cfg(test)] + assert_eq!(r1cs_x_i1[0], augmented_f_circuit.x.unwrap()); + + let r1cs_z = [ + vec![C1::ScalarField::one()], + r1cs_x_i1.clone(), + r1cs_w_i1.clone(), + ] + .concat(); + Ok(r1cs_z) + } +} + +impl FoldingScheme + for HyperNova +where + C1: CurveGroup, + GC1: CurveVar> + ToConstraintFieldGadget>, + C2: CurveGroup, + GC2: CurveVar> + ToConstraintFieldGadget>, + FC: FCircuit, + CS1: CommitmentScheme, + CS2: CommitmentScheme, + ::BaseField: PrimeField, + ::BaseField: PrimeField, + ::ScalarField: Absorb, + ::ScalarField: Absorb, + C1: CurveGroup, + for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>, + for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, +{ + /// Reuse Nova's PreprocessorParam, together with two usize values, which are mu & nu + /// respectively, which indicate the amount of LCCCS & CCCS instances to be folded at each + /// folding step. + type PreprocessorParam = (PreprocessorParam, usize, usize); + type ProverParam = ProverParams; + type VerifierParam = VerifierParams; + type RunningInstance = (LCCCS, Witness); + type IncomingInstance = (CCCS, Witness); + type MultiCommittedInstanceWithWitness = + (Vec, Vec); + type CFInstance = (CommittedInstance, NovaWitness); + + fn preprocess( + mut rng: impl RngCore, + prep_param: &Self::PreprocessorParam, + ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { + let (prep_param, mu, nu) = prep_param; + if *mu < 1 || *nu < 1 { + return Err(Error::CantBeZero("mu,nu".to_string())); + } + + let augmented_f_circuit = AugmentedFCircuit::::empty( + &prep_param.poseidon_config, + prep_param.F.clone(), + None, + *mu, + *nu, + )?; + let ccs = augmented_f_circuit.ccs.clone(); + + let cf_circuit = CycleFoldCircuit::::empty(mu + nu); + let cf_r1cs = get_r1cs_from_cs::(cf_circuit)?; + + // if cs params exist, use them, if not, generate new ones + let cs_pp: CS1::ProverParams; + let cs_vp: CS1::VerifierParams; + let cf_cs_pp: CS2::ProverParams; + let cf_cs_vp: CS2::VerifierParams; + if prep_param.cs_pp.is_some() + && prep_param.cf_cs_pp.is_some() + && prep_param.cs_vp.is_some() + && prep_param.cf_cs_vp.is_some() + { + cs_pp = prep_param.clone().cs_pp.unwrap(); + cs_vp = prep_param.clone().cs_vp.unwrap(); + cf_cs_pp = prep_param.clone().cf_cs_pp.unwrap(); + cf_cs_vp = prep_param.clone().cf_cs_vp.unwrap(); + } else { + (cs_pp, cs_vp) = CS1::setup(&mut rng, ccs.n - ccs.l - 1)?; + (cf_cs_pp, cf_cs_vp) = CS2::setup(&mut rng, cf_r1cs.A.n_cols - cf_r1cs.l - 1)?; + } + + let pp = ProverParams:: { + poseidon_config: prep_param.poseidon_config.clone(), + cs_params: cs_pp.clone(), + cf_cs_params: cf_cs_pp.clone(), + ccs: Some(ccs.clone()), + mu: *mu, + nu: *nu, + }; + let vp = VerifierParams:: { + poseidon_config: prep_param.poseidon_config.clone(), + ccs, + cf_r1cs, + cs_vp: cs_vp.clone(), + cf_cs_vp: cf_cs_vp.clone(), + }; + Ok((pp, vp)) + } + + /// Initializes the HyperNova+CycleFold's IVC for the given parameters and initial state `z_0`. + fn init( + params: &(Self::ProverParam, Self::VerifierParam), + F: FC, + z_0: Vec, + ) -> Result { + let (pp, vp) = params; + if pp.mu < 1 || pp.nu < 1 { + return Err(Error::CantBeZero("mu,nu".to_string())); + } + + // `sponge` is for digest computation. + let sponge = PoseidonSponge::::new(&pp.poseidon_config); + + // prepare the HyperNova's AugmentedFCircuit and CycleFold's circuits and obtain its CCS + // and R1CS respectively + let augmented_f_circuit = AugmentedFCircuit::::empty( + &pp.poseidon_config, + F.clone(), + pp.ccs.clone(), + pp.mu, + pp.nu, + )?; + let ccs = augmented_f_circuit.ccs.clone(); + + let cf_circuit = CycleFoldCircuit::::empty(pp.mu + pp.nu); + let cf_r1cs = get_r1cs_from_cs::(cf_circuit)?; + + // compute the public params hash + let pp_hash = vp.pp_hash()?; + + // setup the dummy instances + let W_dummy = Witness::::dummy(&ccs); + let U_dummy = LCCCS::::dummy(ccs.l, ccs.t, ccs.s); + let w_dummy = W_dummy.clone(); + let mut u_dummy = CCCS::::dummy(ccs.l); + let (cf_W_dummy, cf_U_dummy): (NovaWitness, CommittedInstance) = + cf_r1cs.dummy_instance(); + u_dummy.x = vec![ + U_dummy.hash( + &sponge, + pp_hash, + C1::ScalarField::zero(), + z_0.clone(), + z_0.clone(), + ), + cf_U_dummy.hash_cyclefold(&sponge, pp_hash), + ]; + + // W_dummy=W_0 is a 'dummy witness', all zeroes, but with the size corresponding to the + // R1CS that we're working with. + Ok(Self { + _gc1: PhantomData, + _c2: PhantomData, + _gc2: PhantomData, + ccs, + cf_r1cs, + poseidon_config: pp.poseidon_config.clone(), + cs_params: pp.cs_params.clone(), + cf_cs_params: pp.cf_cs_params.clone(), + F, + pp_hash, + mu: pp.mu, + nu: pp.nu, + i: C1::ScalarField::zero(), + z_0: z_0.clone(), + z_i: z_0, + W_i: W_dummy, + U_i: U_dummy, + w_i: w_dummy, + u_i: u_dummy, + // cyclefold running instance + cf_W_i: cf_W_dummy, + cf_U_i: cf_U_dummy, + }) + } + + /// Implements IVC.P of HyperNova+CycleFold + fn prove_step( + &mut self, + mut rng: impl RngCore, + external_inputs: Vec, + other_instances: Option, + ) -> Result<(), Error> { + // `sponge` is for digest computation. + let sponge = PoseidonSponge::::new(&self.poseidon_config); + + let other_instances = other_instances.ok_or(Error::MissingOtherInstances)?; + + #[allow(clippy::type_complexity)] + let (lcccs, cccs): ( + Vec<(LCCCS, Witness)>, + Vec<(CCCS, Witness)>, + ) = other_instances; + + // recall, mu & nu is the number of all the LCCCS & CCCS respectively, including the + // running and incoming instances that are not part of the 'other_instances', hence the +1 + // in the couple of following checks. + if lcccs.len() + 1 != self.mu { + return Err(Error::NotSameLength( + "other_instances.lcccs.len()".to_string(), + lcccs.len(), + "hypernova.mu".to_string(), + self.mu, + )); + } + if cccs.len() + 1 != self.nu { + return Err(Error::NotSameLength( + "other_instances.cccs.len()".to_string(), + cccs.len(), + "hypernova.nu".to_string(), + self.nu, + )); + } + + let (Us, Ws): (Vec>, Vec>) = lcccs.into_iter().unzip(); + let (us, ws): (Vec>, Vec>) = cccs.into_iter().unzip(); + + let augmented_f_circuit: AugmentedFCircuit; + + if self.z_i.len() != self.F.state_len() { + return Err(Error::NotSameLength( + "z_i.len()".to_string(), + self.z_i.len(), + "F.state_len()".to_string(), + self.F.state_len(), + )); + } + if external_inputs.len() != self.F.external_inputs_len() { + return Err(Error::NotSameLength( + "F.external_inputs_len()".to_string(), + self.F.external_inputs_len(), + "external_inputs.len()".to_string(), + external_inputs.len(), + )); + } + + if self.i > C1::ScalarField::from_le_bytes_mod_order(&usize::MAX.to_le_bytes()) { + return Err(Error::MaxStep); + } + + let mut i_bytes: [u8; 8] = [0; 8]; + i_bytes.copy_from_slice(&self.i.into_bigint().to_bytes_le()[..8]); + let i_usize: usize = usize::from_le_bytes(i_bytes); + + let z_i1 = self + .F + .step_native(i_usize, self.z_i.clone(), external_inputs.clone())?; + + // u_{i+1}.x[1] = H(cf_U_{i+1}) + let cf_u_i1_x: C1::ScalarField; + let (U_i1, W_i1); + + if self.i == C1::ScalarField::zero() { + W_i1 = Witness::::dummy(&self.ccs); + U_i1 = LCCCS::dummy(self.ccs.l, self.ccs.t, self.ccs.s); + + let u_i1_x = U_i1.hash( + &sponge, + self.pp_hash, + C1::ScalarField::one(), + self.z_0.clone(), + z_i1.clone(), + ); + + // hash the initial (dummy) CycleFold instance, which is used as the 2nd public + // input in the AugmentedFCircuit + cf_u_i1_x = self.cf_U_i.hash_cyclefold(&sponge, self.pp_hash); + + augmented_f_circuit = AugmentedFCircuit:: { + _c2: PhantomData, + _gc2: PhantomData, + poseidon_config: self.poseidon_config.clone(), + ccs: self.ccs.clone(), + pp_hash: Some(self.pp_hash), + mu: self.mu, + nu: self.nu, + i: Some(C1::ScalarField::zero()), + i_usize: Some(0), + z_0: Some(self.z_0.clone()), + z_i: Some(self.z_i.clone()), + external_inputs: Some(external_inputs.clone()), + U_i: Some(self.U_i.clone()), + Us: Some(Us.clone()), + u_i_C: Some(self.u_i.C), + us: Some(us.clone()), + U_i1_C: Some(U_i1.C), + F: self.F.clone(), + x: Some(u_i1_x), + nimfs_proof: None, + + // cyclefold values + cf_u_i_cmW: None, + cf_U_i: None, + cf_x: Some(cf_u_i1_x), + cf_cmT: None, + }; + } else { + let mut transcript_p: PoseidonSponge = + PoseidonSponge::::new(&self.poseidon_config); + transcript_p.absorb(&self.pp_hash); + let (rho_powers, nimfs_proof); + (nimfs_proof, U_i1, W_i1, rho_powers) = + NIMFS::>::prove( + &mut transcript_p, + &self.ccs, + &[vec![self.U_i.clone()], Us.clone()].concat(), + &[vec![self.u_i.clone()], us.clone()].concat(), + &[vec![self.W_i.clone()], Ws].concat(), + &[vec![self.w_i.clone()], ws].concat(), + )?; + + // sanity check: check the folded instance relation + #[cfg(test)] + U_i1.check_relation(&self.ccs, &W_i1)?; + + let u_i1_x = U_i1.hash( + &sponge, + self.pp_hash, + self.i + C1::ScalarField::one(), + self.z_0.clone(), + z_i1.clone(), + ); + + let rho_powers_Fq: Vec = rho_powers + .iter() + .map(|rho_i| { + C1::BaseField::from_bigint(BigInteger::from_bits_le( + &rho_i.into_bigint().to_bits_le(), + )) + .unwrap() + }) + .collect(); + let rho_powers_bits: Vec> = rho_powers + .iter() + .map(|rho_i| rho_i.into_bigint().to_bits_le()[..N_BITS_RO].to_vec()) + .collect(); + + // CycleFold part: + // get the vector used as public inputs 'x' in the CycleFold circuit. + // Place the random values and the points coordinates as the public input x: + // In Nova, this is: x == [r, p1, p2, p3]. + // In multifolding schemes such as HyperNova, this is: + // computed_x = [r_0, r_1, r_2, ..., r_n, p_0, p_1, p_2, ..., p_n], + // where each p_i is in fact p_i.to_constraint_field() + let cf_u_i_x = [ + rho_powers_Fq, + get_cm_coordinates(&self.U_i.C), + Us.iter() + .flat_map(|Us_i| get_cm_coordinates(&Us_i.C)) + .collect(), + get_cm_coordinates(&self.u_i.C), + us.iter() + .flat_map(|us_i| get_cm_coordinates(&us_i.C)) + .collect(), + get_cm_coordinates(&U_i1.C), + ] + .concat(); + + let cf_circuit = CycleFoldCircuit:: { + _gc: PhantomData, + n_points: self.mu + self.nu, + r_bits: Some(rho_powers_bits.clone()), + points: Some( + [ + vec![self.U_i.clone().C], + Us.iter().map(|Us_i| Us_i.C).collect(), + vec![self.u_i.clone().C], + us.iter().map(|us_i| us_i.C).collect(), + ] + .concat(), + ), + x: Some(cf_u_i_x.clone()), + }; + + let (_cf_w_i, cf_u_i, cf_W_i1, cf_U_i1, cf_cmT, _) = + fold_cyclefold_circuit::( + self.mu + self.nu, + &mut transcript_p, + self.cf_r1cs.clone(), + self.cf_cs_params.clone(), + self.pp_hash, + self.cf_W_i.clone(), // CycleFold running instance witness + self.cf_U_i.clone(), // CycleFold running instance + cf_u_i_x, + cf_circuit, + )?; + + cf_u_i1_x = cf_U_i1.hash_cyclefold(&sponge, self.pp_hash); + + augmented_f_circuit = AugmentedFCircuit:: { + _c2: PhantomData, + _gc2: PhantomData, + poseidon_config: self.poseidon_config.clone(), + ccs: self.ccs.clone(), + pp_hash: Some(self.pp_hash), + mu: self.mu, + nu: self.nu, + i: Some(self.i), + i_usize: Some(i_usize), + z_0: Some(self.z_0.clone()), + z_i: Some(self.z_i.clone()), + external_inputs: Some(external_inputs), + U_i: Some(self.U_i.clone()), + Us: Some(Us.clone()), + u_i_C: Some(self.u_i.C), + us: Some(us.clone()), + U_i1_C: Some(U_i1.C), + F: self.F.clone(), + x: Some(u_i1_x), + nimfs_proof: Some(nimfs_proof), + + // cyclefold values + cf_u_i_cmW: Some(cf_u_i.cmW), + cf_U_i: Some(self.cf_U_i.clone()), + cf_x: Some(cf_u_i1_x), + cf_cmT: Some(cf_cmT), + }; + + // assign the next round instances + self.cf_W_i = cf_W_i1; + self.cf_U_i = cf_U_i1; + } + + let (cs, _) = augmented_f_circuit.compute_cs_ccs()?; + + #[cfg(test)] + assert!(cs.is_satisfied()?); + + let (r1cs_w_i1, r1cs_x_i1) = extract_w_x::(&cs); // includes 1 and public inputs + + let r1cs_z = [ + vec![C1::ScalarField::one()], + r1cs_x_i1.clone(), + r1cs_w_i1.clone(), + ] + .concat(); + // compute committed instances, w_{i+1}, u_{i+1}, which will be used as w_i, u_i, so we + // assign them directly to w_i, u_i. + let (u_i, w_i) = self + .ccs + .to_cccs::<_, C1, CS1>(&mut rng, &self.cs_params, &r1cs_z)?; + self.u_i = u_i.clone(); + self.w_i = w_i.clone(); + + // set values for next iteration + self.i += C1::ScalarField::one(); + // assign z_{i+1} into z_i + self.z_i = z_i1.clone(); + self.U_i = U_i1.clone(); + self.W_i = W_i1.clone(); + + #[cfg(test)] + { + // check the new LCCCS instance relation + self.U_i.check_relation(&self.ccs, &self.W_i)?; + // check the new CCCS instance relation + self.u_i.check_relation(&self.ccs, &self.w_i)?; + } + + Ok(()) + } + + fn state(&self) -> Vec { + self.z_i.clone() + } + + fn instances( + &self, + ) -> ( + Self::RunningInstance, + Self::IncomingInstance, + Self::CFInstance, + ) { + ( + (self.U_i.clone(), self.W_i.clone()), + (self.u_i.clone(), self.w_i.clone()), + (self.cf_U_i.clone(), self.cf_W_i.clone()), + ) + } + + /// Implements IVC.V of HyperNova+CycleFold. Notice that this method does not include the + /// commitments verification, which is done in the Decider. + fn verify( + vp: Self::VerifierParam, + z_0: Vec, // initial state + z_i: Vec, // last state + num_steps: C1::ScalarField, + running_instance: Self::RunningInstance, + incoming_instance: Self::IncomingInstance, + cyclefold_instance: Self::CFInstance, + ) -> Result<(), Error> { + if num_steps == C1::ScalarField::zero() { + if z_0 != z_i { + return Err(Error::IVCVerificationFail); + } + return Ok(()); + } + // `sponge` is for digest computation. + let sponge = PoseidonSponge::::new(&vp.poseidon_config); + + let (U_i, W_i) = running_instance; + let (u_i, w_i) = incoming_instance; + let (cf_U_i, cf_W_i) = cyclefold_instance; + if u_i.x.len() != 2 || U_i.x.len() != 2 { + return Err(Error::IVCVerificationFail); + } + + let pp_hash = vp.pp_hash()?; + + // check that u_i's output points to the running instance + // u_i.X[0] == H(i, z_0, z_i, U_i) + let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, z_0, z_i.clone()); + if expected_u_i_x != u_i.x[0] { + return Err(Error::IVCVerificationFail); + } + // u_i.X[1] == H(cf_U_i) + let expected_cf_u_i_x = cf_U_i.hash_cyclefold(&sponge, pp_hash); + if expected_cf_u_i_x != u_i.x[1] { + return Err(Error::IVCVerificationFail); + } + + // check LCCCS satisfiability + U_i.check_relation(&vp.ccs, &W_i)?; + // check CCCS satisfiability + u_i.check_relation(&vp.ccs, &w_i)?; + + // check CycleFold's RelaxedR1CS satisfiability + vp.cf_r1cs + .check_relaxed_instance_relation(&cf_W_i, &cf_U_i)?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use crate::commitment::kzg::KZG; + use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; + use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; + + use super::*; + use crate::commitment::pedersen::Pedersen; + use crate::frontend::tests::CubicFCircuit; + use crate::transcript::poseidon::poseidon_canonical_config; + + #[test] + pub fn test_ivc() { + let poseidon_config = poseidon_canonical_config::(); + + let F_circuit = CubicFCircuit::::new(()).unwrap(); + + // run the test using Pedersen commitments on both sides of the curve cycle + test_ivc_opt::, Pedersen>( + poseidon_config.clone(), + F_circuit, + ); + // run the test using KZG for the commitments on the main curve, and Pedersen for the + // commitments on the secondary curve + test_ivc_opt::, Pedersen>(poseidon_config, F_circuit); + } + + // test_ivc allowing to choose the CommitmentSchemes + fn test_ivc_opt, CS2: CommitmentScheme>( + poseidon_config: PoseidonConfig, + F_circuit: CubicFCircuit, + ) { + let mut rng = ark_std::test_rng(); + + type HN = + HyperNova, CS1, CS2>; + let (mu, nu) = (2, 3); + + let prep_param = + PreprocessorParam::, CS1, CS2>::new( + poseidon_config.clone(), + F_circuit, + ); + let hypernova_params = HN::preprocess(&mut rng, &(prep_param, mu, nu)).unwrap(); + + let z_0 = vec![Fr::from(3_u32)]; + let mut hypernova = HN::init(&hypernova_params, F_circuit, z_0.clone()).unwrap(); + + let num_steps: usize = 3; + for _ in 0..num_steps { + // prepare some new instances to fold in the multifolding step + let mut lcccs = vec![]; + for j in 0..mu - 1 { + let instance_state = vec![Fr::from(j as u32 + 85_u32)]; + let (U, W) = hypernova + .new_running_instance(&mut rng, instance_state, vec![]) + .unwrap(); + lcccs.push((U, W)); + } + let mut cccs = vec![]; + for j in 0..nu - 1 { + let instance_state = vec![Fr::from(j as u32 + 15_u32)]; + let (u, w) = hypernova + .new_incoming_instance(&mut rng, instance_state, vec![]) + .unwrap(); + cccs.push((u, w)); + } + + dbg!(&hypernova.i); + hypernova + .prove_step(&mut rng, vec![], Some((lcccs, cccs))) + .unwrap(); + } + assert_eq!(Fr::from(num_steps as u32), hypernova.i); + + let (running_instance, incoming_instance, cyclefold_instance) = hypernova.instances(); + HN::verify( + hypernova_params.1, // verifier_params + z_0, + hypernova.z_i, + hypernova.i, + running_instance, + incoming_instance, + cyclefold_instance, + ) + .unwrap(); + } +} diff --git a/folding-schemes/src/folding/hypernova/nimfs.rs b/folding-schemes/src/folding/hypernova/nimfs.rs index 8aa6a49f..2a010a60 100644 --- a/folding-schemes/src/folding/hypernova/nimfs.rs +++ b/folding-schemes/src/folding/hypernova/nimfs.rs @@ -1,17 +1,20 @@ use ark_crypto_primitives::sponge::Absorb; use ark_ec::{CurveGroup, Group}; -use ark_ff::{Field, PrimeField}; +use ark_ff::{BigInteger, Field, PrimeField}; use ark_poly::univariate::DensePolynomial; use ark_poly::{DenseUVPolynomial, Polynomial}; use ark_std::{One, Zero}; -use super::cccs::{Witness, CCCS}; -use super::lcccs::LCCCS; -use super::utils::{compute_c, compute_g, compute_sigmas_thetas}; -use crate::ccs::CCS; +use super::{ + cccs::CCCS, + lcccs::LCCCS, + utils::{compute_c, compute_g, compute_sigmas_thetas}, + Witness, +}; +use crate::arith::ccs::CCS; +use crate::constants::N_BITS_RO; use crate::transcript::Transcript; -use crate::utils::hypercube::BooleanHypercube; -use crate::utils::sum_check::structs::IOPProof as SumCheckProof; +use crate::utils::sum_check::structs::{IOPProof as SumCheckProof, IOPProverMessage}; use crate::utils::sum_check::{IOPSumCheck, SumCheck}; use crate::utils::virtual_polynomial::VPAuxInfo; use crate::Error; @@ -19,27 +22,50 @@ use crate::Error; use std::fmt::Debug; use std::marker::PhantomData; -/// Proof defines a multifolding proof +/// NIMFSProof defines a multifolding proof #[derive(Clone, Debug)] -pub struct Proof { +pub struct NIMFSProof { pub sc_proof: SumCheckProof, pub sigmas_thetas: SigmasThetas, } +impl NIMFSProof { + pub fn dummy(ccs: &CCS, mu: usize, nu: usize) -> Self { + // use 'C::ScalarField::one()' instead of 'zero()' to enforce the NIMFSProof to have the + // same in-circuit representation to match the number of constraints of an actual proof. + NIMFSProof:: { + sc_proof: SumCheckProof:: { + point: vec![C::ScalarField::one(); ccs.s], + proofs: vec![ + IOPProverMessage { + coeffs: vec![C::ScalarField::one(); ccs.t + 1] + }; + ccs.s + ], + }, + sigmas_thetas: SigmasThetas( + vec![vec![C::ScalarField::one(); ccs.t]; mu], + vec![vec![C::ScalarField::one(); ccs.t]; nu], + ), + } + } +} + #[derive(Clone, Debug)] pub struct SigmasThetas(pub Vec>, pub Vec>); #[derive(Debug)] /// Implements the Non-Interactive Multi Folding Scheme described in section 5 of /// [HyperNova](https://eprint.iacr.org/2023/573.pdf) -pub struct NIMFS> { +pub struct NIMFS> { pub _c: PhantomData, pub _t: PhantomData, } -impl> NIMFS +impl> NIMFS where ::ScalarField: Absorb, + C::BaseField: PrimeField, { pub fn fold( lcccs: &[LCCCS], @@ -47,7 +73,7 @@ where sigmas_thetas: &SigmasThetas, r_x_prime: Vec, rho: C::ScalarField, - ) -> LCCCS { + ) -> (LCCCS, Vec) { let (sigmas, thetas) = (sigmas_thetas.0.clone(), sigmas_thetas.1.clone()); let mut C_folded = C::zero(); let mut u_folded = C::ScalarField::zero(); @@ -55,6 +81,7 @@ where let mut v_folded: Vec = vec![C::ScalarField::zero(); sigmas[0].len()]; let mut rho_i = C::ScalarField::one(); + let mut rho_powers = vec![C::ScalarField::zero(); lcccs.len() + cccs.len() - 1]; for i in 0..(lcccs.len() + cccs.len()) { let c: C; let u: C::ScalarField; @@ -94,16 +121,28 @@ where .map(|(a_i, b_i)| *a_i + b_i) .collect(); + // compute the next power of rho rho_i *= rho; + // crop the size of rho_i to N_BITS_RO + let rho_i_bits = rho_i.into_bigint().to_bits_le(); + rho_i = C::ScalarField::from_bigint(BigInteger::from_bits_le(&rho_i_bits[..N_BITS_RO])) + .unwrap(); + if i < lcccs.len() + cccs.len() - 1 { + // store the cropped rho_i into the rho_powers vector + rho_powers[i] = rho_i; + } } - LCCCS:: { - C: C_folded, - u: u_folded, - x: x_folded, - r_x: r_x_prime, - v: v_folded, - } + ( + LCCCS:: { + C: C_folded, + u: u_folded, + x: x_folded, + r_x: r_x_prime, + v: v_folded, + }, + rho_powers, + ) } pub fn fold_witness( @@ -114,8 +153,9 @@ where let mut w_folded: Vec = vec![C::ScalarField::zero(); w_lcccs[0].w.len()]; let mut r_w_folded = C::ScalarField::zero(); + let mut rho_i = C::ScalarField::one(); for i in 0..(w_lcccs.len() + w_cccs.len()) { - let rho_i = rho.pow([i as u64]); + // let rho_i = rho.pow([i as u64]); let w: Vec; let r_w: C::ScalarField; @@ -138,6 +178,13 @@ where .collect(); r_w_folded += rho_i * r_w; + + // compute the next power of rho + rho_i *= rho; + // crop the size of rho_i to N_BITS_RO + let rho_i_bits = rho_i.into_bigint().to_bits_le(); + rho_i = C::ScalarField::from_bigint(BigInteger::from_bits_le(&rho_i_bits[..N_BITS_RO])) + .unwrap(); } Witness { w: w_folded, @@ -151,14 +198,25 @@ where /// contains the sumcheck proof and the helper sumcheck claim sigmas and thetas. #[allow(clippy::type_complexity)] pub fn prove( - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, ccs: &CCS, running_instances: &[LCCCS], new_instances: &[CCCS], w_lcccs: &[Witness], w_cccs: &[Witness], - ) -> Result<(Proof, LCCCS, Witness), Error> { - // TODO appends to transcript + ) -> Result< + ( + NIMFSProof, + LCCCS, + Witness, + // Vec, + Vec, + ), + Error, + > { + // absorb instances to transcript + transcript.absorb(&running_instances); + transcript.absorb(&new_instances); if running_instances.is_empty() { return Err(Error::Empty); @@ -168,7 +226,6 @@ where } // construct the LCCCS z vector from the relaxation factor, public IO and witness - // XXX this deserves its own function in LCCCS let mut z_lcccs = Vec::new(); for (i, running_instance) in running_instances.iter().enumerate() { let z_1: Vec = [ @@ -203,43 +260,9 @@ where let g = compute_g(ccs, running_instances, &z_lcccs, &z_cccs, gamma, &beta)?; // Step 3: Run the sumcheck prover - let sumcheck_proof = IOPSumCheck::::prove(&g, transcript) + let sumcheck_proof = IOPSumCheck::::prove(&g, transcript) .map_err(|err| Error::SumCheckProveError(err.to_string()))?; - // Note: The following two "sanity checks" are done for this prototype, in a final version - // they should be removed. - // - // Sanity check 1: evaluate g(x) over x \in {0,1} (the boolean hypercube), and check that - // its sum is equal to the extracted_sum from the SumCheck. - ////////////////////////////////////////////////////////////////////// - let mut g_over_bhc = C::ScalarField::zero(); - for x in BooleanHypercube::new(ccs.s) { - g_over_bhc += g.evaluate(&x)?; - } - - // note: this is the sum of g(x) over the whole boolean hypercube - let extracted_sum = IOPSumCheck::::extract_sum(&sumcheck_proof); - - if extracted_sum != g_over_bhc { - return Err(Error::NotEqual); - } - // Sanity check 2: expect \sum v_j * gamma^j to be equal to the sum of g(x) over the - // boolean hypercube (and also equal to the extracted_sum from the SumCheck). - let mut sum_v_j_gamma = C::ScalarField::zero(); - for (i, running_instance) in running_instances.iter().enumerate() { - for j in 0..running_instance.v.len() { - let gamma_j = gamma.pow([(i * ccs.t + j) as u64]); - sum_v_j_gamma += running_instance.v[j] * gamma_j; - } - } - if g_over_bhc != sum_v_j_gamma { - return Err(Error::NotEqual); - } - if extracted_sum != sum_v_j_gamma { - return Err(Error::NotEqual); - } - ////////////////////////////////////////////////////////////////////// - // Step 2: dig into the sumcheck and extract r_x_prime let r_x_prime = sumcheck_proof.point.clone(); @@ -249,10 +272,12 @@ where // Step 6: Get the folding challenge let rho_scalar = C::ScalarField::from_le_bytes_mod_order(b"rho"); transcript.absorb(&rho_scalar); - let rho: C::ScalarField = transcript.get_challenge(); + let rho_bits: Vec = transcript.get_challenge_nbits(N_BITS_RO); + let rho: C::ScalarField = + C::ScalarField::from_bigint(BigInteger::from_bits_le(&rho_bits)).unwrap(); // Step 7: Create the folded instance - let folded_lcccs = Self::fold( + let (folded_lcccs, rho_powers) = Self::fold( running_instances, new_instances, &sigmas_thetas, @@ -264,12 +289,13 @@ where let folded_witness = Self::fold_witness(w_lcccs, w_cccs, rho); Ok(( - Proof:: { + NIMFSProof:: { sc_proof: sumcheck_proof, sigmas_thetas, }, folded_lcccs, folded_witness, + rho_powers, )) } @@ -277,13 +303,15 @@ where /// into a single LCCCS instance. /// Returns the folded LCCCS instance. pub fn verify( - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, ccs: &CCS, running_instances: &[LCCCS], new_instances: &[CCCS], - proof: Proof, + proof: NIMFSProof, ) -> Result, Error> { - // TODO appends to transcript + // absorb instances to transcript + transcript.absorb(&running_instances); + transcript.absorb(&new_instances); if running_instances.is_empty() { return Err(Error::Empty); @@ -318,9 +346,13 @@ where } // Verify the interactive part of the sumcheck - let sumcheck_subclaim = - IOPSumCheck::::verify(sum_v_j_gamma, &proof.sc_proof, &vp_aux_info, transcript) - .map_err(|err| Error::SumCheckVerifyError(err.to_string()))?; + let sumcheck_subclaim = IOPSumCheck::::verify( + sum_v_j_gamma, + &proof.sc_proof, + &vp_aux_info, + transcript, + ) + .map_err(|err| Error::SumCheckVerifyError(err.to_string()))?; // Step 2: Dig into the sumcheck claim and extract the randomness used let r_x_prime = sumcheck_subclaim.point.clone(); @@ -359,7 +391,9 @@ where // Step 6: Get the folding challenge let rho_scalar = C::ScalarField::from_le_bytes_mod_order(b"rho"); transcript.absorb(&rho_scalar); - let rho: C::ScalarField = transcript.get_challenge(); + let rho_bits: Vec = transcript.get_challenge_nbits(N_BITS_RO); + let rho: C::ScalarField = + C::ScalarField::from_bigint(BigInteger::from_bits_le(&rho_bits)).unwrap(); // Step 7: Compute the folded instance Ok(Self::fold( @@ -368,16 +402,21 @@ where &proof.sigmas_thetas, r_x_prime, rho, - )) + ) + .0) } } #[cfg(test)] pub mod tests { use super::*; - use crate::ccs::tests::{get_test_ccs, get_test_z}; + use crate::arith::{ + ccs::tests::{get_test_ccs, get_test_z}, + Arith, + }; use crate::transcript::poseidon::poseidon_canonical_config; - use crate::transcript::poseidon::PoseidonTranscript; + use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; + use ark_crypto_primitives::sponge::CryptographicSponge; use ark_std::test_rng; use ark_std::UniformRand; @@ -401,16 +440,20 @@ pub mod tests { let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); - let (lcccs, w1) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap(); - let (cccs, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z2).unwrap(); + let (lcccs, w1) = ccs + .to_lcccs::<_, Projective, Pedersen>(&mut rng, &pedersen_params, &z1) + .unwrap(); + let (cccs, w2) = ccs + .to_cccs::<_, Projective, Pedersen>(&mut rng, &pedersen_params, &z2) + .unwrap(); - lcccs.check_relation(&pedersen_params, &ccs, &w1).unwrap(); - cccs.check_relation(&pedersen_params, &ccs, &w2).unwrap(); + lcccs.check_relation(&ccs, &w1).unwrap(); + cccs.check_relation(&ccs, &w2).unwrap(); let mut rng = test_rng(); let rho = Fr::rand(&mut rng); - let folded = NIMFS::>::fold( + let (folded, _) = NIMFS::>::fold( &[lcccs], &[cccs], &sigmas_thetas, @@ -418,13 +461,10 @@ pub mod tests { rho, ); - let w_folded = - NIMFS::>::fold_witness(&[w1], &[w2], rho); + let w_folded = NIMFS::>::fold_witness(&[w1], &[w2], rho); // check lcccs relation - folded - .check_relation(&pedersen_params, &ccs, &w_folded) - .unwrap(); + folded.check_relation(&ccs, &w_folded).unwrap(); } /// Perform multifolding of an LCCCS instance with a CCCS instance (as described in the paper) @@ -443,19 +483,22 @@ pub mod tests { let z_2 = get_test_z(4); // Create the LCCCS instance out of z_1 - let (running_instance, w1) = ccs.to_lcccs(&mut rng, &pedersen_params, &z_1).unwrap(); + let (running_instance, w1) = ccs + .to_lcccs::<_, _, Pedersen>(&mut rng, &pedersen_params, &z_1) + .unwrap(); // Create the CCCS instance out of z_2 - let (new_instance, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z_2).unwrap(); + let (new_instance, w2) = ccs + .to_cccs::<_, _, Pedersen>(&mut rng, &pedersen_params, &z_2) + .unwrap(); // Prover's transcript let poseidon_config = poseidon_canonical_config::(); - let mut transcript_p: PoseidonTranscript = - PoseidonTranscript::::new(&poseidon_config); + let mut transcript_p: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init")); // Run the prover side of the multifolding - let (proof, folded_lcccs, folded_witness) = - NIMFS::>::prove( + let (proof, folded_lcccs, folded_witness, _) = + NIMFS::>::prove( &mut transcript_p, &ccs, &[running_instance.clone()], @@ -466,12 +509,11 @@ pub mod tests { .unwrap(); // Verifier's transcript - let mut transcript_v: PoseidonTranscript = - PoseidonTranscript::::new(&poseidon_config); + let mut transcript_v: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); transcript_v.absorb(&Fr::from_le_bytes_mod_order(b"init init")); // Run the verifier side of the multifolding - let folded_lcccs_v = NIMFS::>::verify( + let folded_lcccs_v = NIMFS::>::verify( &mut transcript_v, &ccs, &[running_instance.clone()], @@ -482,9 +524,7 @@ pub mod tests { assert_eq!(folded_lcccs, folded_lcccs_v); // Check that the folded LCCCS instance is a valid instance with respect to the folded witness - folded_lcccs - .check_relation(&pedersen_params, &ccs, &folded_witness) - .unwrap(); + folded_lcccs.check_relation(&ccs, &folded_witness).unwrap(); } /// Perform multiple steps of multifolding of an LCCCS instance with a CCCS instance @@ -499,32 +539,30 @@ pub mod tests { // LCCCS witness let z_1 = get_test_z(2); - let (mut running_instance, mut w1) = - ccs.to_lcccs(&mut rng, &pedersen_params, &z_1).unwrap(); + let (mut running_instance, mut w1) = ccs + .to_lcccs::<_, _, Pedersen>(&mut rng, &pedersen_params, &z_1) + .unwrap(); let poseidon_config = poseidon_canonical_config::(); - let mut transcript_p: PoseidonTranscript = - PoseidonTranscript::::new(&poseidon_config); + let mut transcript_p: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init")); - let mut transcript_v: PoseidonTranscript = - PoseidonTranscript::::new(&poseidon_config); + let mut transcript_v: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); transcript_v.absorb(&Fr::from_le_bytes_mod_order(b"init init")); let n: usize = 10; for i in 3..n { - println!("\niteration: i {}", i); // DBG - // CCS witness let z_2 = get_test_z(i); - println!("z_2 {:?}", z_2); // DBG - let (new_instance, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z_2).unwrap(); + let (new_instance, w2) = ccs + .to_cccs::<_, _, Pedersen>(&mut rng, &pedersen_params, &z_2) + .unwrap(); // run the prover side of the multifolding - let (proof, folded_lcccs, folded_witness) = - NIMFS::>::prove( + let (proof, folded_lcccs, folded_witness, _) = + NIMFS::>::prove( &mut transcript_p, &ccs, &[running_instance.clone()], @@ -535,7 +573,7 @@ pub mod tests { .unwrap(); // run the verifier side of the multifolding - let folded_lcccs_v = NIMFS::>::verify( + let folded_lcccs_v = NIMFS::>::verify( &mut transcript_v, &ccs, &[running_instance.clone()], @@ -546,10 +584,7 @@ pub mod tests { assert_eq!(folded_lcccs, folded_lcccs_v); // check that the folded instance with the folded witness holds the LCCCS relation - println!("check_relation {}", i); - folded_lcccs - .check_relation(&pedersen_params, &ccs, &folded_witness) - .unwrap(); + folded_lcccs.check_relation(&ccs, &folded_witness).unwrap(); running_instance = folded_lcccs; w1 = folded_witness; @@ -585,7 +620,9 @@ pub mod tests { let mut lcccs_instances = Vec::new(); let mut w_lcccs = Vec::new(); for z_i in z_lcccs.iter() { - let (running_instance, w) = ccs.to_lcccs(&mut rng, &pedersen_params, z_i).unwrap(); + let (running_instance, w) = ccs + .to_lcccs::<_, _, Pedersen>(&mut rng, &pedersen_params, z_i) + .unwrap(); lcccs_instances.push(running_instance); w_lcccs.push(w); } @@ -593,20 +630,21 @@ pub mod tests { let mut cccs_instances = Vec::new(); let mut w_cccs = Vec::new(); for z_i in z_cccs.iter() { - let (new_instance, w) = ccs.to_cccs(&mut rng, &pedersen_params, z_i).unwrap(); + let (new_instance, w) = ccs + .to_cccs::<_, _, Pedersen>(&mut rng, &pedersen_params, z_i) + .unwrap(); cccs_instances.push(new_instance); w_cccs.push(w); } // Prover's transcript let poseidon_config = poseidon_canonical_config::(); - let mut transcript_p: PoseidonTranscript = - PoseidonTranscript::::new(&poseidon_config); + let mut transcript_p: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init")); // Run the prover side of the multifolding - let (proof, folded_lcccs, folded_witness) = - NIMFS::>::prove( + let (proof, folded_lcccs, folded_witness, _) = + NIMFS::>::prove( &mut transcript_p, &ccs, &lcccs_instances, @@ -617,12 +655,11 @@ pub mod tests { .unwrap(); // Verifier's transcript - let mut transcript_v: PoseidonTranscript = - PoseidonTranscript::::new(&poseidon_config); + let mut transcript_v: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); transcript_v.absorb(&Fr::from_le_bytes_mod_order(b"init init")); // Run the verifier side of the multifolding - let folded_lcccs_v = NIMFS::>::verify( + let folded_lcccs_v = NIMFS::>::verify( &mut transcript_v, &ccs, &lcccs_instances, @@ -633,9 +670,7 @@ pub mod tests { assert_eq!(folded_lcccs, folded_lcccs_v); // Check that the folded LCCCS instance is a valid instance with respect to the folded witness - folded_lcccs - .check_relation(&pedersen_params, &ccs, &folded_witness) - .unwrap(); + folded_lcccs.check_relation(&ccs, &folded_witness).unwrap(); } /// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step @@ -651,13 +686,11 @@ pub mod tests { let poseidon_config = poseidon_canonical_config::(); // Prover's transcript - let mut transcript_p: PoseidonTranscript = - PoseidonTranscript::::new(&poseidon_config); + let mut transcript_p: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init")); // Verifier's transcript - let mut transcript_v: PoseidonTranscript = - PoseidonTranscript::::new(&poseidon_config); + let mut transcript_v: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); transcript_v.absorb(&Fr::from_le_bytes_mod_order(b"init init")); let n_steps = 3; @@ -683,7 +716,9 @@ pub mod tests { let mut lcccs_instances = Vec::new(); let mut w_lcccs = Vec::new(); for z_i in z_lcccs.iter() { - let (running_instance, w) = ccs.to_lcccs(&mut rng, &pedersen_params, z_i).unwrap(); + let (running_instance, w) = ccs + .to_lcccs::<_, _, Pedersen>(&mut rng, &pedersen_params, z_i) + .unwrap(); lcccs_instances.push(running_instance); w_lcccs.push(w); } @@ -691,14 +726,16 @@ pub mod tests { let mut cccs_instances = Vec::new(); let mut w_cccs = Vec::new(); for z_i in z_cccs.iter() { - let (new_instance, w) = ccs.to_cccs(&mut rng, &pedersen_params, z_i).unwrap(); + let (new_instance, w) = ccs + .to_cccs::<_, _, Pedersen>(&mut rng, &pedersen_params, z_i) + .unwrap(); cccs_instances.push(new_instance); w_cccs.push(w); } // Run the prover side of the multifolding - let (proof, folded_lcccs, folded_witness) = - NIMFS::>::prove( + let (proof, folded_lcccs, folded_witness, _) = + NIMFS::>::prove( &mut transcript_p, &ccs, &lcccs_instances, @@ -709,7 +746,7 @@ pub mod tests { .unwrap(); // Run the verifier side of the multifolding - let folded_lcccs_v = NIMFS::>::verify( + let folded_lcccs_v = NIMFS::>::verify( &mut transcript_v, &ccs, &lcccs_instances, @@ -721,9 +758,7 @@ pub mod tests { assert_eq!(folded_lcccs, folded_lcccs_v); // Check that the folded LCCCS instance is a valid instance with respect to the folded witness - folded_lcccs - .check_relation(&pedersen_params, &ccs, &folded_witness) - .unwrap(); + folded_lcccs.check_relation(&ccs, &folded_witness).unwrap(); } } } diff --git a/folding-schemes/src/folding/hypernova/utils.rs b/folding-schemes/src/folding/hypernova/utils.rs index 83a20bf8..160ae5a3 100644 --- a/folding-schemes/src/folding/hypernova/utils.rs +++ b/folding-schemes/src/folding/hypernova/utils.rs @@ -7,7 +7,7 @@ use std::sync::Arc; use super::lcccs::LCCCS; use super::nimfs::SigmasThetas; -use crate::ccs::CCS; +use crate::arith::ccs::CCS; use crate::utils::mle::dense_vec_to_dense_mle; use crate::utils::vec::mat_vec_mul; use crate::utils::virtual_polynomial::{build_eq_x_r_vec, eq_eval, VirtualPolynomial}; @@ -49,7 +49,7 @@ pub fn compute_sigmas_thetas( Ok(SigmasThetas(sigmas, thetas)) } -/// computes c from the step 5 in section 5 of HyperNova, adapted to multiple LCCCS & CCCS +/// Computes c from the step 5 in section 5 of HyperNova, adapted to multiple LCCCS & CCCS /// instances: /// $$ /// c = \sum_{i \in [\mu]} \left(\sum_{j \in [t]} \gamma^{i \cdot t + j} \cdot e_i \cdot \sigma_{i,j} \right) @@ -167,13 +167,15 @@ pub mod tests { use ark_std::Zero; use super::*; - use crate::ccs::tests::{get_test_ccs, get_test_z}; + use crate::arith::{ + ccs::tests::{get_test_ccs, get_test_z}, + Arith, + }; use crate::commitment::{pedersen::Pedersen, CommitmentScheme}; use crate::folding::hypernova::lcccs::tests::compute_Ls; use crate::utils::hypercube::BooleanHypercube; use crate::utils::mle::matrix_to_dense_mle; use crate::utils::multilinear_polynomial::tests::fix_last_variables; - use crate::utils::virtual_polynomial::eq_eval; /// Given M(x,y) matrix and a random field element `r`, test that ~M(r,y) is is an s'-variable polynomial which /// compresses every column j of the M(x,y) matrix by performing a random linear combination between the elements @@ -239,7 +241,9 @@ pub mod tests { // Initialize a multifolding object let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); - let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap(); + let (lcccs_instance, _) = ccs + .to_lcccs::<_, _, Pedersen>(&mut rng, &pedersen_params, &z1) + .unwrap(); let sigmas_thetas = compute_sigmas_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime).unwrap(); @@ -287,7 +291,9 @@ pub mod tests { // Initialize a multifolding object let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); - let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap(); + let (lcccs_instance, _) = ccs + .to_lcccs::<_, _, Pedersen>(&mut rng, &pedersen_params, &z1) + .unwrap(); // Compute g(x) with that r_x let g = compute_g::( diff --git a/folding-schemes/src/folding/mova/homogenization.rs b/folding-schemes/src/folding/mova/homogenization.rs index 70e683c8..2ce5fe95 100644 --- a/folding-schemes/src/folding/mova/homogenization.rs +++ b/folding-schemes/src/folding/mova/homogenization.rs @@ -1,5 +1,6 @@ use std::fmt::Debug; use std::marker::PhantomData; +use std::time::Instant; use ark_crypto_primitives::sponge::Absorb; use ark_ec::{CurveGroup, Group}; @@ -31,11 +32,11 @@ pub struct HomogeneousEvaluationClaim { pub rE_prime: Vec, } -pub trait Homogenization> { +pub trait Homogenization> { type Proof: Clone + Debug; fn prove( - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, ci1: &CommittedInstance, ci2: &CommittedInstance, w1: &Witness, @@ -43,7 +44,7 @@ pub trait Homogenization> { ) -> Result<(Self::Proof, HomogeneousEvaluationClaim), Error>; fn verify( - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, ci1: &CommittedInstance, ci2: &CommittedInstance, proof: &Self::Proof, @@ -55,12 +56,6 @@ pub trait Homogenization> { >; } -#[derive(Clone, Debug, Default)] -pub struct SumCheckHomogenization> { - _phantom_C: std::marker::PhantomData, - _phantom_T: std::marker::PhantomData, -} - #[derive(Clone, Debug)] pub struct PointVsLineProof { pub h1: DensePolynomial, @@ -68,123 +63,19 @@ pub struct PointVsLineProof { } #[derive(Clone, Debug, Default)] -pub struct PointVsLineHomogenization> { +pub struct PointVsLineHomogenization> { _phantom_C: std::marker::PhantomData, _phantom_T: std::marker::PhantomData, } -impl> Homogenization for SumCheckHomogenization -where - ::ScalarField: Absorb, -{ - type Proof = SumCheckProof; - - fn prove( - transcript: &mut impl Transcript, - ci1: &CommittedInstance, - ci2: &CommittedInstance, - w1: &Witness, - w2: &Witness, - ) -> Result<(Self::Proof, HomogeneousEvaluationClaim), Error> { - let vars = log2(w1.E.len()) as usize; - - let beta_scalar = C::ScalarField::from_le_bytes_mod_order(b"beta"); - transcript.absorb(&beta_scalar); - let beta: C::ScalarField = transcript.get_challenge(); - - let g = compute_g(ci1, ci2, w1, w2, &beta)?; - - let sumcheck_proof = IOPSumCheck::::prove(&g, transcript) - .map_err(|err| Error::SumCheckProveError(err.to_string()))?; - - let rE_prime = sumcheck_proof.point.clone(); - - let mleE1 = dense_vec_to_dense_mle(vars, &w1.E); - let mleE2 = dense_vec_to_dense_mle(vars, &w2.E); - - let mleE1_prime = mleE1.evaluate(&rE_prime).ok_or(Error::EvaluationFail)?; - let mleE2_prime = mleE2.evaluate(&rE_prime).ok_or(Error::EvaluationFail)?; - - Ok(( - sumcheck_proof, - HomogeneousEvaluationClaim { - mleE1_prime, - mleE2_prime, - rE_prime, - }, - )) - } - - fn verify( - transcript: &mut impl Transcript, - ci1: &CommittedInstance, - ci2: &CommittedInstance, - proof: &Self::Proof, - mleE1_prime: &C::ScalarField, - mleE2_prime: &C::ScalarField, - ) -> Result< - Vec<::ScalarField>, // rE=rE1'=rE2' - Error, - > { - let beta_scalar = C::ScalarField::from_le_bytes_mod_order(b"beta"); - transcript.absorb(&beta_scalar); - let beta: C::ScalarField = transcript.get_challenge(); - - let vp_aux_info = VPAuxInfo:: { - max_degree: 2, - num_variables: ci1.rE.len(), - phantom: PhantomData::, - }; - - // Step 3: Start verifying the sumcheck - // First, compute the expected sumcheck sum: \sum gamma^j v_j - let mut sum_evaluation_claims = ci1.mleE; - - sum_evaluation_claims += beta * ci2.mleE; - - // Verify the interactive part of the sumcheck - let sumcheck_subclaim = - IOPSumCheck::::verify(sum_evaluation_claims, proof, &vp_aux_info, transcript) - .map_err(|err| Error::SumCheckVerifyError(err.to_string()))?; - - let rE_prime = sumcheck_subclaim.point.clone(); - - let g = compute_c( - *mleE1_prime, - *mleE2_prime, - beta, - &ci1.rE, - &ci2.rE, - &rE_prime, - )?; - - if g != sumcheck_subclaim.expected_evaluation { - return Err(Error::NotEqual); - } - - let g_on_rxprime_from_sumcheck_last_eval = DensePolynomial::from_coefficients_slice( - &proof.proofs.last().ok_or(Error::Empty)?.coeffs, - ) - .evaluate(rE_prime.last().ok_or(Error::Empty)?); - if g_on_rxprime_from_sumcheck_last_eval != g { - return Err(Error::NotEqual); - } - if g_on_rxprime_from_sumcheck_last_eval != sumcheck_subclaim.expected_evaluation { - return Err(Error::NotEqual); - } - - Ok(rE_prime) - } -} - -impl> Homogenization for PointVsLineHomogenization +impl> Homogenization for PointVsLineHomogenization where ::ScalarField: Absorb, { type Proof = PointVsLineProof; fn prove( - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, ci1: &CommittedInstance, ci2: &CommittedInstance, w1: &Witness, @@ -194,19 +85,29 @@ where let mleE1 = dense_vec_to_dense_mle(vars, &w1.E); let mleE2 = dense_vec_to_dense_mle(vars, &w2.E); + let start = Instant::now(); + let elapsed = start.elapsed(); + println!("Time before computing h {:?}", elapsed); let h1 = compute_h(&mleE1, &ci1.rE, &ci2.rE)?; let h2 = compute_h(&mleE2, &ci1.rE, &ci2.rE)?; - transcript.absorb_vec(h1.coeffs()); - transcript.absorb_vec(h2.coeffs()); + let elapsed = start.elapsed(); + println!("Time after computing h1 h2 {:?}", elapsed); + + transcript.absorb(&h1.coeffs()); + transcript.absorb(&h2.coeffs()); let beta_scalar = C::ScalarField::from_le_bytes_mod_order(b"beta"); transcript.absorb(&beta_scalar); let beta = transcript.get_challenge(); + let elapsed = start.elapsed(); + println!("Time before evaluating h {:?}", elapsed); let mleE1_prime = h1.evaluate(&beta); let mleE2_prime = h2.evaluate(&beta); + let elapsed = start.elapsed(); + println!("Time after evaluating h {:?}", elapsed); let rE_prime = compute_l(&ci1.rE, &ci2.rE, beta)?; @@ -221,7 +122,7 @@ where } fn verify( - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, ci1: &CommittedInstance, ci2: &CommittedInstance, proof: &Self::Proof, @@ -239,8 +140,8 @@ where return Err(Error::NotEqual); } - transcript.absorb_vec(proof.h1.coeffs()); - transcript.absorb_vec(proof.h2.coeffs()); + transcript.absorb(&proof.h1.coeffs()); + transcript.absorb(&proof.h2.coeffs()); let beta_scalar = C::ScalarField::from_le_bytes_mod_order(b"beta"); transcript.absorb(&beta_scalar); diff --git a/folding-schemes/src/folding/mova/mod.rs b/folding-schemes/src/folding/mova/mod.rs index dc43550d..0e1b4f71 100644 --- a/folding-schemes/src/folding/mova/mod.rs +++ b/folding-schemes/src/folding/mova/mod.rs @@ -5,7 +5,7 @@ use ark_crypto_primitives::{ sponge::{poseidon::PoseidonConfig, Absorb}, }; use ark_ec::{AffineRepr, CurveGroup, Group}; -use ark_ff::ToConstraintField; +use ark_ff::{PrimeField, ToConstraintField}; use ark_poly::MultilinearExtension; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; @@ -15,11 +15,10 @@ use ark_std::{One, Zero}; use std::usize; use crate::commitment::CommitmentScheme; -use crate::folding::circuits::nonnative::{ - affine::nonnative_affine_to_field_elements, uint::nonnative_field_to_field_elements, -}; + use crate::utils::vec::is_zero_vec; use crate::Error; +use crate::transcript::{AbsorbNonNative, Transcript}; use crate::utils::mle::dense_vec_to_dense_mle; @@ -51,74 +50,71 @@ impl CommittedInstance { } } +impl Absorb for CommittedInstance + where + C::ScalarField: Absorb, +{ + fn to_sponge_bytes(&self, dest: &mut Vec) { + // This is never called + unimplemented!() + } + + fn to_sponge_field_elements(&self, dest: &mut Vec) { + self.u.to_sponge_field_elements(dest); + self.x.to_sponge_field_elements(dest); + self.cmW + .to_native_sponge_field_elements_as_vec() + .to_sponge_field_elements(dest); + } +} + +impl AbsorbNonNative for CommittedInstance + where + ::BaseField: ark_ff::PrimeField + Absorb, +{ + fn to_native_sponge_field_elements(&self, dest: &mut Vec) { + self.rE.to_native_sponge_field_elements(dest); + [self.mleE].to_native_sponge_field_elements(dest); + [self.u].to_native_sponge_field_elements(dest); + self.x.to_native_sponge_field_elements(dest); + let (cmW_x, cmW_y) = match self.cmW.into_affine().xy() { + Some((&x, &y)) => (x, y), + None => (C::BaseField::zero(), C::BaseField::zero()), + }; + + cmW_x.to_sponge_field_elements(dest); + cmW_y.to_sponge_field_elements(dest); + } +} + impl CommittedInstance -where - ::ScalarField: Absorb, - ::BaseField: ark_ff::PrimeField, + where + ::ScalarField: Absorb, + ::BaseField: ark_ff::PrimeField, { /// hash implements the committed instance hash compatible with the gadget implemented in /// nova/circuits.rs::CommittedInstanceVar.hash. /// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U_i` is the /// `CommittedInstance`. - pub fn hash( + pub fn hash>( &self, - poseidon_config: &PoseidonConfig, + sponge: &T, + pp_hash: C::ScalarField, // public params hash i: C::ScalarField, z_0: Vec, z_i: Vec, - ) -> Result { - // let (cmE_x, cmE_y) = nonnative_affine_to_field_elements::(self.cmE)?; - let (cmW_x, cmW_y) = nonnative_affine_to_field_elements::(self.cmW)?; - - CRH::::evaluate( - poseidon_config, - [ - vec![i], - z_0, - z_i, - vec![self.u], - self.x.clone(), - vec![self.mleE], - cmW_x, - cmW_y, - ] - .concat(), - ) - .map_err(|e| Error::Other(e.to_string())) + ) -> C::ScalarField { + let mut sponge = sponge.clone(); + sponge.absorb(&pp_hash); + sponge.absorb(&i); + sponge.absorb(&z_0); + sponge.absorb(&z_i); + sponge.absorb(&self); + sponge.squeeze_field_elements(1)[0] } } -impl ToConstraintField for CommittedInstance -where - ::BaseField: ark_ff::PrimeField + Absorb, -{ - fn to_field_elements(&self) -> Option> { - let rE = self - .rE - .iter() - .flat_map(nonnative_field_to_field_elements) - .collect(); - let mleE = nonnative_field_to_field_elements(&self.mleE); - let u = nonnative_field_to_field_elements(&self.u); - let x = self - .x - .iter() - .flat_map(nonnative_field_to_field_elements) - .collect::>(); - let (cmW_x, cmW_y, cmW_is_inf) = match self.cmW.into_affine().xy() { - Some((&x, &y)) => (x, y, C::BaseField::zero()), - None => ( - C::BaseField::zero(), - C::BaseField::zero(), - C::BaseField::one(), - ), - }; - // Concatenate `cmE_is_inf` and `cmW_is_inf` to save constraints for CRHGadget::evaluate in the corresponding circuit - let is_inf = cmW_is_inf; - Some([u, x, rE, mleE, vec![cmW_x, cmW_y, is_inf]].concat()) - } -} #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] pub struct Witness { diff --git a/folding-schemes/src/folding/mova/nifs.rs b/folding-schemes/src/folding/mova/nifs.rs index 2179d395..ead664bc 100644 --- a/folding-schemes/src/folding/mova/nifs.rs +++ b/folding-schemes/src/folding/mova/nifs.rs @@ -5,11 +5,12 @@ use ark_ff::PrimeField; use ark_poly::MultilinearExtension; use ark_std::{log2, Zero}; use std::marker::PhantomData; +use std::time::Instant; use super::homogenization::{HomogeneousEvaluationClaim, Homogenization}; use super::{CommittedInstance, InstanceWitness, Witness}; -use crate::ccs::r1cs::R1CS; +use crate::arith::r1cs::R1CS; use crate::commitment::CommitmentScheme; use crate::transcript::Transcript; @@ -21,7 +22,7 @@ use crate::Error; /// Proof defines a multifolding proof #[derive(Clone, Debug)] -pub struct Proof, H: Homogenization> { +pub struct Proof, H: Homogenization> { pub hg_proof: H::Proof, pub mleE1_prime: C::ScalarField, pub mleE2_prime: C::ScalarField, @@ -30,17 +31,17 @@ pub struct Proof, H: Homogenization> { /// Implements the Non-Interactive Folding Scheme described in section 4 of /// [Nova](https://eprint.iacr.org/2021/370.pdf) -pub struct NIFS, T: Transcript, H: Homogenization> { +pub struct NIFS, T: Transcript, H: Homogenization> { _c: PhantomData, _cp: PhantomData, _ct: PhantomData, _ch: PhantomData, } -impl, T: Transcript, H: Homogenization> - NIFS -where - ::ScalarField: Absorb, +impl, T: Transcript, H: Homogenization> +NIFS + where + ::ScalarField: Absorb, { // compute_T: compute cross-terms T pub fn compute_T( @@ -181,12 +182,15 @@ where pub fn prove( _cs_prover_params: &CS::ProverParams, r1cs: &R1CS, - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, ci1: &CommittedInstance, ci2: &CommittedInstance, w1: &Witness, w2: &Witness, ) -> Result<(Proof, InstanceWitness), Error> { + let start = Instant::now(); + let elapsed = start.elapsed(); + println!("Time before homogenization point-vs-line {:?}", elapsed); let ( hg_proof, HomogeneousEvaluationClaim { @@ -195,6 +199,9 @@ where rE_prime, }, ) = H::prove(transcript, ci1, ci2, w1, w2)?; + let elapsed = start.elapsed(); + println!("Time after homogenization point-vs-line {:?}", elapsed); + transcript.absorb(&mleE1_prime); transcript.absorb(&mleE2_prime); @@ -202,17 +209,29 @@ where let z1: Vec = [vec![ci1.u], ci1.x.to_vec(), w1.W.to_vec()].concat(); let z2: Vec = [vec![ci2.u], ci2.x.to_vec(), w2.W.to_vec()].concat(); + let elapsed = start.elapsed(); + println!("Time before computing T {:?}", elapsed); + let T = Self::compute_T(r1cs, ci1.u, ci2.u, &z1, &z2)?; + let elapsed = start.elapsed(); + println!("Time after computing T {:?}", elapsed); + + let vars = log2(w1.E.len()) as usize; if log2(T.len()) as usize != vars { return Err(Error::NotEqual); } + let elapsed = start.elapsed(); + println!("Time before mleT evaluation {:?}", elapsed); let mleT = dense_vec_to_dense_mle(vars, &T); let mleT_evaluated = mleT.evaluate(&rE_prime).ok_or(Error::EvaluationFail)?; + let elapsed = start.elapsed(); + println!("Time after mleT evaluation {:?}", elapsed); + transcript.absorb(&mleT_evaluated); let rho_scalar = C::ScalarField::from_le_bytes_mod_order(b"rho"); @@ -220,7 +239,9 @@ where let rho: C::ScalarField = transcript.get_challenge(); let _r2 = rho * rho; - Ok(( + let elapsed = start.elapsed(); + println!("Time before start folding {:?}", elapsed); + let temp = Ok(( Proof { hg_proof, mleE1_prime, @@ -239,13 +260,16 @@ where )?, w: Self::fold_witness(rho, w1, w2, &T)?, }, - )) + )); + let elapsed = start.elapsed(); + println!("Time after folding {:?}", elapsed); + temp } /// verify implements NIFS.V logic described in [Nova](https://eprint.iacr.org/2021/370.pdf)'s /// section 4. It returns the folded Committed Instance pub fn verify( - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, ci1: &CommittedInstance, ci2: &CommittedInstance, proof: &Proof, @@ -279,7 +303,7 @@ where } pub fn prove_expansion( - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, ci: &CommittedInstance, w: &Witness, ) -> Result, Error> { @@ -305,7 +329,7 @@ where // Do the verifier's expansion part. pub fn verify_expansion( - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, ci: &CommittedInstance, n_vars_mleE: usize, ) -> Result, Error> { diff --git a/folding-schemes/src/folding/mova/traits.rs b/folding-schemes/src/folding/mova/traits.rs index a49c21f6..af9c6bac 100644 --- a/folding-schemes/src/folding/mova/traits.rs +++ b/folding-schemes/src/folding/mova/traits.rs @@ -1,9 +1,10 @@ use ark_crypto_primitives::sponge::Absorb; use ark_ec::{CurveGroup, Group}; use ark_std::{One, Zero}; +use crate::arith::Arith; use super::{CommittedInstance, Witness}; -use crate::ccs::r1cs::R1CS; +use crate::arith::r1cs::R1CS; use crate::Error; /// NovaR1CS extends R1CS methods with Nova specific methods diff --git a/folding-schemes/src/folding/nova/circuits.rs b/folding-schemes/src/folding/nova/circuits.rs index a45ffffc..49532f38 100644 --- a/folding-schemes/src/folding/nova/circuits.rs +++ b/folding-schemes/src/folding/nova/circuits.rs @@ -1,11 +1,7 @@ /// contains [Nova](https://eprint.iacr.org/2021/370.pdf) related circuits -use ark_crypto_primitives::crh::{ - poseidon::constraints::{CRHGadget, CRHParametersVar}, - CRHSchemeGadget, -}; use ark_crypto_primitives::sponge::{ - constraints::CryptographicSpongeVar, - poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge}, + constraints::{AbsorbGadget, CryptographicSpongeVar}, + poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig}, Absorb, CryptographicSponge, }; use ark_ec::{CurveGroup, Group}; @@ -17,27 +13,24 @@ use ark_r1cs_std::{ fields::{fp::FpVar, FieldVar}, groups::GroupOpsBounds, prelude::CurveVar, + uint8::UInt8, R1CSVar, ToConstraintFieldGadget, }; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError}; use ark_std::{fmt::Debug, One, Zero}; use core::{borrow::Borrow, marker::PhantomData}; -use super::{ - cyclefold::{ - CycleFoldChallengeGadget, CycleFoldCommittedInstanceVar, NIFSFullGadget, CF_IO_LEN, - }, - CommittedInstance, -}; +use super::{CommittedInstance, NOVA_CF_N_POINTS}; use crate::constants::N_BITS_RO; use crate::folding::circuits::{ - nonnative::{ - affine::{nonnative_affine_to_field_elements, NonNativeAffineVar}, - uint::NonNativeUintVar, + cyclefold::{ + cf_io_len, CycleFoldChallengeGadget, CycleFoldCommittedInstanceVar, NIFSFullGadget, }, + nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar}, CF1, CF2, }; use crate::frontend::FCircuit; +use crate::transcript::{AbsorbNonNativeGadget, Transcript, TranscriptVar}; /// CommittedInstanceVar contains the u, x, cmE and cmW values which are folded on the main Nova /// constraints field (E1::Fr, where E1 is the main curve). The peculiarity is that cmE and cmW are @@ -80,6 +73,26 @@ where } } +impl AbsorbGadget for CommittedInstanceVar +where + C: CurveGroup, + ::BaseField: ark_ff::PrimeField, +{ + fn to_sponge_bytes(&self) -> Result>, SynthesisError> { + unimplemented!() + } + + fn to_sponge_field_elements(&self) -> Result>, SynthesisError> { + Ok([ + vec![self.u.clone()], + self.x.clone(), + self.cmE.to_constraint_field()?, + self.cmW.to_constraint_field()?, + ] + .concat()) + } +} + impl CommittedInstanceVar where C: CurveGroup, @@ -93,25 +106,22 @@ where /// Additionally it returns the vector of the field elements from the self parameters, so they /// can be reused in other gadgets avoiding recalculating (reconstraining) them. #[allow(clippy::type_complexity)] - pub fn hash( + pub fn hash, S>>( self, - crh_params: &CRHParametersVar>, + sponge: &T, + pp_hash: FpVar>, i: FpVar>, z_0: Vec>>, z_i: Vec>>, ) -> Result<(FpVar>, Vec>>), SynthesisError> { - let U_vec = [ - vec![self.u], - self.x, - self.cmE.to_constraint_field()?, - self.cmW.to_constraint_field()?, - ] - .concat(); - let input = [vec![i], z_0, z_i, U_vec.clone()].concat(); - Ok(( - CRHGadget::::evaluate(crh_params, &input)?, - U_vec, - )) + let mut sponge = sponge.clone(); + let U_vec = self.to_sponge_field_elements()?; + sponge.absorb(&pp_hash)?; + sponge.absorb(&i)?; + sponge.absorb(&z_0)?; + sponge.absorb(&z_i)?; + sponge.absorb(&U_vec)?; + Ok((sponge.squeeze_field_elements(1)?.pop().unwrap(), U_vec)) } } @@ -175,63 +185,33 @@ where ::BaseField: PrimeField, ::ScalarField: Absorb, { - pub fn get_challenge_native( - poseidon_config: &PoseidonConfig, + pub fn get_challenge_native>( + transcript: &mut T, + pp_hash: C::ScalarField, // public params hash U_i: CommittedInstance, u_i: CommittedInstance, cmT: C, - ) -> Result, SynthesisError> { - let (U_cmE_x, U_cmE_y) = nonnative_affine_to_field_elements::(U_i.cmE)?; - let (U_cmW_x, U_cmW_y) = nonnative_affine_to_field_elements::(U_i.cmW)?; - let (u_cmE_x, u_cmE_y) = nonnative_affine_to_field_elements::(u_i.cmE)?; - let (u_cmW_x, u_cmW_y) = nonnative_affine_to_field_elements::(u_i.cmW)?; - let (cmT_x, cmT_y) = nonnative_affine_to_field_elements::(cmT)?; - - let mut sponge = PoseidonSponge::::new(poseidon_config); - let input = vec![ - vec![U_i.u], - U_i.x.clone(), - U_cmE_x, - U_cmE_y, - U_cmW_x, - U_cmW_y, - vec![u_i.u], - u_i.x.clone(), - u_cmE_x, - u_cmE_y, - u_cmW_x, - u_cmW_y, - cmT_x, - cmT_y, - ] - .concat(); - sponge.absorb(&input); - let bits = sponge.squeeze_bits(N_BITS_RO); - Ok(bits) + ) -> Vec { + transcript.absorb(&pp_hash); + transcript.absorb(&U_i); + transcript.absorb(&u_i); + transcript.absorb_nonnative(&cmT); + transcript.squeeze_bits(N_BITS_RO) } // compatible with the native get_challenge_native - pub fn get_challenge_gadget( - cs: ConstraintSystemRef, - poseidon_config: &PoseidonConfig, + pub fn get_challenge_gadget, S>>( + transcript: &mut T, + pp_hash: FpVar>, // public params hash U_i_vec: Vec>>, // apready processed input, so we don't have to recompute these values u_i: CommittedInstanceVar, cmT: NonNativeAffineVar, ) -> Result>, SynthesisError> { - let mut sponge = PoseidonSpongeVar::::new(cs, poseidon_config); - - let input: Vec> = [ - U_i_vec, - vec![u_i.u.clone()], - u_i.x.clone(), - u_i.cmE.to_constraint_field()?, - u_i.cmW.to_constraint_field()?, - cmT.to_constraint_field()?, - ] - .concat(); - sponge.absorb(&input)?; - let bits = sponge.squeeze_bits(N_BITS_RO)?; - Ok(bits) + transcript.absorb(&pp_hash)?; + transcript.absorb(&U_i_vec)?; + transcript.absorb(&u_i)?; + transcript.absorb_nonnative(&cmT)?; + transcript.squeeze_bits(N_BITS_RO) } } @@ -249,6 +229,7 @@ pub struct AugmentedFCircuit< { pub _gc2: PhantomData, pub poseidon_config: PoseidonConfig>, + pub pp_hash: Option>, pub i: Option>, pub i_usize: Option, pub z_0: Option>, @@ -282,6 +263,7 @@ where Self { _gc2: PhantomData, poseidon_config: poseidon_config.clone(), + pp_hash: None, i: None, i_usize: None, z_0: None, @@ -319,6 +301,9 @@ where for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, { fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { + let pp_hash = FpVar::>::new_witness(cs.clone(), || { + Ok(self.pp_hash.unwrap_or_else(CF1::::zero)) + })?; let i = FpVar::>::new_witness(cs.clone(), || { Ok(self.i.unwrap_or_else(CF1::::zero)) })?; @@ -352,17 +337,17 @@ where let cmT = NonNativeAffineVar::new_witness(cs.clone(), || Ok(self.cmT.unwrap_or_else(C1::zero)))?; - let cf_u_dummy = CommittedInstance::dummy(CF_IO_LEN); + let cf_u_dummy = CommittedInstance::dummy(cf_io_len(NOVA_CF_N_POINTS)); let cf_U_i = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { Ok(self.cf_U_i.unwrap_or(cf_u_dummy.clone())) })?; let cf1_cmT = GC2::new_witness(cs.clone(), || Ok(self.cf1_cmT.unwrap_or_else(C2::zero)))?; let cf2_cmT = GC2::new_witness(cs.clone(), || Ok(self.cf2_cmT.unwrap_or_else(C2::zero)))?; - let crh_params = CRHParametersVar::::new_constant( - cs.clone(), - self.poseidon_config.clone(), - )?; + // `sponge` is for digest computation. + let sponge = PoseidonSpongeVar::::new(cs.clone(), &self.poseidon_config); + // `transcript` is for challenge generation. + let mut transcript = sponge.clone(); // get z_{i+1} from the F circuit let i_usize = self.i_usize.unwrap_or(0); @@ -375,11 +360,15 @@ where // Primary Part // P.1. Compute u_i.x // u_i.x[0] = H(i, z_0, z_i, U_i) - let (u_i_x, U_i_vec) = - U_i.clone() - .hash(&crh_params, i.clone(), z_0.clone(), z_i.clone())?; + let (u_i_x, U_i_vec) = U_i.clone().hash( + &sponge, + pp_hash.clone(), + i.clone(), + z_0.clone(), + z_i.clone(), + )?; // u_i.x[1] = H(cf_U_i) - let (cf_u_i_x, cf_U_i_vec) = cf_U_i.clone().hash(&crh_params)?; + let (cf_u_i_x, cf_U_i_vec) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?; // P.2. Construct u_i let u_i = CommittedInstanceVar { @@ -399,8 +388,8 @@ where // compute r = H(u_i, U_i, cmT) let r_bits = ChallengeGadget::::get_challenge_gadget( - cs.clone(), - &self.poseidon_config, + &mut transcript, + pp_hash.clone(), U_i_vec, u_i.clone(), cmT.clone(), @@ -425,13 +414,15 @@ where // Base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{\bot}) // Non-base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{i+1}) let (u_i1_x, _) = U_i1.clone().hash( - &crh_params, + &sponge, + pp_hash.clone(), i + FpVar::>::one(), z_0.clone(), z_i1.clone(), )?; let (u_i1_x_base, _) = CommittedInstanceVar::new_constant(cs.clone(), u_dummy)?.hash( - &crh_params, + &sponge, + pp_hash.clone(), FpVar::>::one(), z_0.clone(), z_i1.clone(), @@ -484,8 +475,8 @@ where // compute cf1_r = H(cf1_u_i, cf_U_i, cf1_cmT) // cf_r_bits is denoted by rho* in the paper. let cf1_r_bits = CycleFoldChallengeGadget::::get_challenge_gadget( - cs.clone(), - &self.poseidon_config, + &mut transcript, + pp_hash.clone(), cf_U_i_vec, cf1_u_i.clone(), cf1_cmT.clone(), @@ -507,9 +498,9 @@ where // same for cf2_r: let cf2_r_bits = CycleFoldChallengeGadget::::get_challenge_gadget( - cs.clone(), - &self.poseidon_config, - cf1_U_i1.to_constraint_field()?, + &mut transcript, + pp_hash.clone(), + cf1_U_i1.to_native_sponge_field_elements()?, cf2_u_i.clone(), cf2_cmT.clone(), )?; @@ -530,10 +521,10 @@ where // P.4.b compute and check the second output of F' // Base case: u_{i+1}.x[1] == H(cf_U_{\bot}) // Non-base case: u_{i+1}.x[1] == H(cf_U_{i+1}) - let (cf_u_i1_x, _) = cf_U_i1.clone().hash(&crh_params)?; + let (cf_u_i1_x, _) = cf_U_i1.clone().hash(&sponge, pp_hash.clone())?; let (cf_u_i1_x_base, _) = CycleFoldCommittedInstanceVar::new_constant(cs.clone(), cf_u_dummy)? - .hash(&crh_params)?; + .hash(&sponge, pp_hash)?; let cf_x = FpVar::new_input(cs.clone(), || { Ok(self.cf_x.unwrap_or(cf_u_i1_x_base.value()?)) })?; @@ -547,6 +538,7 @@ where pub mod tests { use super::*; use ark_bn254::{Fr, G1Projective as Projective}; + use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; use ark_ff::BigInteger; use ark_relations::r1cs::ConstraintSystem; use ark_std::UniformRand; @@ -611,6 +603,8 @@ pub mod tests { fn test_committed_instance_hash() { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); + let sponge = PoseidonSponge::::new(&poseidon_config); + let pp_hash = Fr::from(42u32); // only for test let i = Fr::from(3_u32); let z_0 = vec![Fr::from(3_u32)]; @@ -623,22 +617,23 @@ pub mod tests { }; // compute the CommittedInstance hash natively - let h = ci - .hash(&poseidon_config, i, z_0.clone(), z_i.clone()) - .unwrap(); + let h = ci.hash(&sponge, pp_hash, i, z_0.clone(), z_i.clone()); let cs = ConstraintSystem::::new_ref(); + let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash)).unwrap(); let iVar = FpVar::::new_witness(cs.clone(), || Ok(i)).unwrap(); let z_0Var = Vec::>::new_witness(cs.clone(), || Ok(z_0.clone())).unwrap(); let z_iVar = Vec::>::new_witness(cs.clone(), || Ok(z_i.clone())).unwrap(); let ciVar = CommittedInstanceVar::::new_witness(cs.clone(), || Ok(ci.clone())).unwrap(); - let crh_params = CRHParametersVar::::new_constant(cs.clone(), poseidon_config).unwrap(); + let sponge = PoseidonSpongeVar::::new(cs.clone(), &poseidon_config); // compute the CommittedInstance hash in-circuit - let (hVar, _) = ciVar.hash(&crh_params, iVar, z_0Var, z_iVar).unwrap(); + let (hVar, _) = ciVar + .hash(&sponge, pp_hashVar, iVar, z_0Var, z_iVar) + .unwrap(); assert!(cs.is_satisfied().unwrap()); // check that the natively computed and in-circuit computed hashes match @@ -650,6 +645,7 @@ pub mod tests { fn test_challenge_gadget() { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); + let mut transcript = PoseidonSponge::::new(&poseidon_config); let u_i = CommittedInstance:: { cmE: Projective::rand(&mut rng), @@ -665,17 +661,20 @@ pub mod tests { }; let cmT = Projective::rand(&mut rng); + let pp_hash = Fr::from(42u32); // only for testing + // compute the challenge natively let r_bits = ChallengeGadget::::get_challenge_native( - &poseidon_config, + &mut transcript, + pp_hash, U_i.clone(), u_i.clone(), cmT, - ) - .unwrap(); + ); let r = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap(); let cs = ConstraintSystem::::new_ref(); + let pp_hashVar = FpVar::::new_witness(cs.clone(), || Ok(pp_hash)).unwrap(); let u_iVar = CommittedInstanceVar::::new_witness(cs.clone(), || Ok(u_i.clone())) .unwrap(); @@ -683,6 +682,7 @@ pub mod tests { CommittedInstanceVar::::new_witness(cs.clone(), || Ok(U_i.clone())) .unwrap(); let cmTVar = NonNativeAffineVar::::new_witness(cs.clone(), || Ok(cmT)).unwrap(); + let mut transcriptVar = PoseidonSpongeVar::::new(cs.clone(), &poseidon_config); // compute the challenge in-circuit let U_iVar_vec = [ @@ -693,8 +693,8 @@ pub mod tests { ] .concat(); let r_bitsVar = ChallengeGadget::::get_challenge_gadget( - cs.clone(), - &poseidon_config, + &mut transcriptVar, + pp_hashVar, U_iVar_vec, u_iVar, cmTVar, diff --git a/folding-schemes/src/folding/nova/decider_eth.rs b/folding-schemes/src/folding/nova/decider_eth.rs index 3c1bf153..a8a0136c 100644 --- a/folding-schemes/src/folding/nova/decider_eth.rs +++ b/folding-schemes/src/folding/nova/decider_eth.rs @@ -62,12 +62,13 @@ where GC1: CurveVar> + ToConstraintFieldGadget>, GC2: CurveVar> + ToConstraintFieldGadget>, FC: FCircuit, + // CS1 is a KZG commitment, where challenge is C1::Fr elem CS1: CommitmentScheme< C1, ProverChallenge = C1::ScalarField, Challenge = C1::ScalarField, Proof = KZGProof, - >, // KZG commitment, where challenge is C1::Fr elem + >, // enforce that the CS2 is Pedersen commitment scheme, since we're at Ethereum's EVM decider CS2: CommitmentScheme>, S: SNARK, @@ -77,20 +78,54 @@ where ::ScalarField: Absorb, ::ScalarField: Absorb, C1: CurveGroup, + for<'b> &'b GC1: GroupOpsBounds<'b, C1, GC1>, for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>, // constrain FS into Nova, since this is a Decider specifically for Nova Nova: From, + crate::folding::nova::ProverParams: + From<>::ProverParam>, + crate::folding::nova::VerifierParams: + From<>::VerifierParam>, { + type PreprocessorParam = (FS::ProverParam, FS::VerifierParam); type ProverParam = (S::ProvingKey, CS1::ProverParams); type Proof = Proof; - type VerifierParam = (S::VerifyingKey, CS1::VerifierParams); + /// VerifierParam = (pp_hash, snark::vk, commitment_scheme::vk) + type VerifierParam = (C1::ScalarField, S::VerifyingKey, CS1::VerifierParams); type PublicInput = Vec; - type CommittedInstanceWithWitness = (); type CommittedInstance = CommittedInstance; + fn preprocess( + mut rng: impl RngCore + CryptoRng, + prep_param: &Self::PreprocessorParam, + fs: FS, + ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { + let circuit = + DeciderEthCircuit::::from_nova::(fs.into()).unwrap(); + + // get the Groth16 specific setup for the circuit + let (g16_pk, g16_vk) = S::circuit_specific_setup(circuit, &mut rng).unwrap(); + + // get the FoldingScheme prover & verifier params from Nova + #[allow(clippy::type_complexity)] + let nova_pp: + as FoldingScheme>::ProverParam = + prep_param.0.clone().into() + ; + #[allow(clippy::type_complexity)] + let nova_vp: + as FoldingScheme>::VerifierParam = + prep_param.1.clone().into(); + let pp_hash = nova_vp.pp_hash()?; + + let pp = (g16_pk, nova_pp.cs_pp); + let vp = (pp_hash, g16_vk, nova_vp.cs_vp); + Ok((pp, vp)) + } + fn prove( - pp: Self::ProverParam, mut rng: impl RngCore + CryptoRng, + pp: Self::ProverParam, folding_scheme: FS, ) -> Result { let (snark_pk, cs_pk): (S::ProvingKey, CS1::ProverParams) = pp; @@ -153,7 +188,8 @@ where return Err(Error::NotEnoughSteps); } - let (snark_vk, cs_vk): (S::VerifyingKey, CS1::VerifierParams) = vp; + let (pp_hash, snark_vk, cs_vk): (C1::ScalarField, S::VerifyingKey, CS1::VerifierParams) = + vp; // compute U = U_{d+1}= NIFS.V(U_d, u_d, cmT) let U = NIFS::::verify(proof.r, running_instance, incoming_instance, &proof.cmT); @@ -163,7 +199,7 @@ where let (cmT_x, cmT_y) = NonNativeAffineVar::inputize(proof.cmT)?; let public_input: Vec = vec![ - vec![i], + vec![pp_hash, i], z_0, z_i, vec![U.u], @@ -281,23 +317,20 @@ fn point2_to_eth_format(p: ark_bn254::G2Affine) -> Result, Error> { #[cfg(test)] pub mod tests { - use super::*; - use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; - use ark_groth16::Groth16; + use ark_bn254::{constraints::GVar, Fr, G1Projective as Projective}; use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; - use ark_poly_commit::kzg10::VerifierKey as KZGVerifierKey; use std::time::Instant; - use crate::commitment::kzg::{ProverKey as KZGProverKey, KZG}; + use super::*; use crate::commitment::pedersen::Pedersen; - use crate::folding::nova::{get_cs_params_len, ProverParams}; + use crate::folding::nova::PreprocessorParam; use crate::frontend::tests::CubicFCircuit; use crate::transcript::poseidon::poseidon_canonical_config; #[test] fn test_decider() { // use Nova as FoldingScheme - type NOVA = Nova< + type N = Nova< Projective, GVar, Projective2, @@ -306,7 +339,7 @@ pub mod tests { KZG<'static, Bn254>, Pedersen, >; - type DECIDER = Decider< + type D = Decider< Projective, GVar, Projective2, @@ -315,7 +348,7 @@ pub mod tests { KZG<'static, Bn254>, Pedersen, Groth16, // here we define the Snark to use in the decider - NOVA, // here we define the FoldingScheme to use + N, // here we define the FoldingScheme to use >; let mut rng = ark_std::test_rng(); @@ -324,60 +357,30 @@ pub mod tests { let F_circuit = CubicFCircuit::::new(()).unwrap(); let z_0 = vec![Fr::from(3_u32)]; - let (cs_len, cf_cs_len) = - get_cs_params_len::>( - &poseidon_config, - F_circuit, - ) - .unwrap(); - let start = Instant::now(); - let (kzg_pk, kzg_vk): (KZGProverKey, KZGVerifierKey) = - KZG::::setup(&mut rng, cs_len).unwrap(); - let (cf_pedersen_params, _) = Pedersen::::setup(&mut rng, cf_cs_len).unwrap(); - println!("generated KZG params, {:?}", start.elapsed()); - - let prover_params = - ProverParams::, Pedersen> { - poseidon_config: poseidon_config.clone(), - cs_params: kzg_pk.clone(), - cf_cs_params: cf_pedersen_params, - }; + let prep_param = PreprocessorParam::new(poseidon_config, F_circuit); + let nova_params = N::preprocess(&mut rng, &prep_param).unwrap(); let start = Instant::now(); - let mut nova = NOVA::init(&prover_params, F_circuit, z_0.clone()).unwrap(); + let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); println!("Nova initialized, {:?}", start.elapsed()); let start = Instant::now(); - nova.prove_step(vec![]).unwrap(); + nova.prove_step(&mut rng, vec![], None).unwrap(); println!("prove_step, {:?}", start.elapsed()); - nova.prove_step(vec![]).unwrap(); // do a 2nd step + nova.prove_step(&mut rng, vec![], None).unwrap(); // do a 2nd step - // generate Groth16 setup - let circuit = DeciderEthCircuit::< - Projective, - GVar, - Projective2, - GVar2, - KZG, - Pedersen, - >::from_nova::>(nova.clone()) - .unwrap(); let mut rng = rand::rngs::OsRng; - let start = Instant::now(); - let (g16_pk, g16_vk) = - Groth16::::circuit_specific_setup(circuit.clone(), &mut rng).unwrap(); - println!("Groth16 setup, {:?}", start.elapsed()); + // prepare the Decider prover & verifier params + let (decider_pp, decider_vp) = D::preprocess(&mut rng, &nova_params, nova.clone()).unwrap(); // decider proof generation let start = Instant::now(); - let decider_pp = (g16_pk, kzg_pk); - let proof = DECIDER::prove(decider_pp, rng, nova.clone()).unwrap(); + let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); println!("Decider prove, {:?}", start.elapsed()); // decider proof verification let start = Instant::now(); - let decider_vp = (g16_vk, kzg_vk); - let verified = DECIDER::verify( + let verified = D::verify( decider_vp, nova.i, nova.z_0, nova.z_i, &nova.U_i, &nova.u_i, &proof, ) .unwrap(); diff --git a/folding-schemes/src/folding/nova/decider_eth_circuit.rs b/folding-schemes/src/folding/nova/decider_eth_circuit.rs index 9ec17cca..4cd6c65a 100644 --- a/folding-schemes/src/folding/nova/decider_eth_circuit.rs +++ b/folding-schemes/src/folding/nova/decider_eth_circuit.rs @@ -1,7 +1,10 @@ /// This file implements the onchain (Ethereum's EVM) decider circuit. For non-ethereum use cases, /// other more efficient approaches can be used. -use ark_crypto_primitives::crh::poseidon::constraints::CRHParametersVar; -use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb}; +use ark_crypto_primitives::sponge::{ + constraints::CryptographicSpongeVar, + poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge}, + Absorb, CryptographicSponge, +}; use ark_ec::{CurveGroup, Group}; use ark_ff::{BigInteger, PrimeField}; use ark_poly::Polynomial; @@ -20,21 +23,15 @@ use ark_std::{log2, Zero}; use core::{borrow::Borrow, marker::PhantomData}; use super::{circuits::ChallengeGadget, nifs::NIFS}; -use crate::ccs::r1cs::R1CS; +use crate::arith::r1cs::R1CS; use crate::commitment::{pedersen::Params as PedersenParams, CommitmentScheme}; use crate::folding::circuits::{ - nonnative::{ - affine::{nonnative_affine_to_field_elements, NonNativeAffineVar}, - uint::NonNativeUintVar, - }, + nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar}, CF1, CF2, }; use crate::folding::nova::{circuits::CommittedInstanceVar, CommittedInstance, Nova, Witness}; use crate::frontend::FCircuit; -use crate::transcript::{ - poseidon::{PoseidonTranscript, PoseidonTranscriptVar}, - Transcript, TranscriptVar, -}; +use crate::transcript::{Transcript, TranscriptVar}; use crate::utils::{ gadgets::{MatrixGadget, SparseMatrixVar, VectorGadget}, vec::poly_from_vec, @@ -223,6 +220,8 @@ where /// CycleFold PedersenParams over C2 pub cf_pedersen_params: PedersenParams, pub poseidon_config: PoseidonConfig>, + /// public params hash + pub pp_hash: Option, pub i: Option>, /// initial state pub z_0: Option>, @@ -262,9 +261,11 @@ where pub fn from_nova>( nova: Nova, ) -> Result { + let mut transcript = PoseidonSponge::::new(&nova.poseidon_config); + // compute the U_{i+1}, W_{i+1} let (T, cmT) = NIFS::::compute_cmT( - &nova.cs_params, + &nova.cs_pp, &nova.r1cs.clone(), &nova.w_i.clone(), &nova.u_i.clone(), @@ -272,11 +273,12 @@ where &nova.U_i.clone(), )?; let r_bits = ChallengeGadget::::get_challenge_native( - &nova.poseidon_config, + &mut transcript, + nova.pp_hash, nova.U_i.clone(), nova.u_i.clone(), cmT, - )?; + ); let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)) .ok_or(Error::OutOfBounds)?; let (W_i1, U_i1) = NIFS::::fold_instances( @@ -285,7 +287,7 @@ where // compute the KZG challenges used as inputs in the circuit let (kzg_challenge_W, kzg_challenge_E) = - KZGChallengesGadget::::get_challenges_native(&nova.poseidon_config, U_i1.clone())?; + KZGChallengesGadget::::get_challenges_native(&mut transcript, U_i1.clone()); // get KZG evals let mut W = W_i1.W.clone(); @@ -315,8 +317,9 @@ where cf_E_len: nova.cf_W_i.E.len(), r1cs: nova.r1cs, cf_r1cs: nova.cf_r1cs, - cf_pedersen_params: nova.cf_cs_params, + cf_pedersen_params: nova.cf_cs_pp, poseidon_config: nova.poseidon_config, + pp_hash: Some(nova.pp_hash), i: Some(nova.i), z_0: Some(nova.z_0), z_i: Some(nova.z_i), @@ -360,6 +363,9 @@ where Ok(self.r1cs.clone()) })?; + let pp_hash = FpVar::>::new_input(cs.clone(), || { + Ok(self.pp_hash.unwrap_or_else(CF1::::zero)) + })?; let i = FpVar::>::new_input(cs.clone(), || Ok(self.i.unwrap_or_else(CF1::::zero)))?; let z_0 = Vec::>>::new_input(cs.clone(), || { @@ -403,10 +409,10 @@ where Ok(self.eval_E.unwrap_or_else(CF1::::zero)) })?; - let crh_params = CRHParametersVar::::new_constant( - cs.clone(), - self.poseidon_config.clone(), - )?; + // `sponge` is for digest computation. + let sponge = PoseidonSpongeVar::::new(cs.clone(), &self.poseidon_config); + // `transcript` is for challenge generation. + let mut transcript = sponge.clone(); // 1. check RelaxedR1CS of U_{i+1} let z_U1: Vec>> = @@ -421,9 +427,13 @@ where (u_i.u.is_one()?).enforce_equal(&Boolean::TRUE)?; // 3.a u_i.x[0] == H(i, z_0, z_i, U_i) - let (u_i_x, U_i_vec) = - U_i.clone() - .hash(&crh_params, i.clone(), z_0.clone(), z_i.clone())?; + let (u_i_x, U_i_vec) = U_i.clone().hash( + &sponge, + pp_hash.clone(), + i.clone(), + z_0.clone(), + z_i.clone(), + )?; (u_i.x[0]).enforce_equal(&u_i_x)?; #[cfg(feature = "light-test")] @@ -437,11 +447,12 @@ where { // imports here instead of at the top of the file, so we avoid having multiple // `#[cfg(not(test))]` + use super::NOVA_CF_N_POINTS; use crate::commitment::pedersen::PedersenGadget; - use crate::folding::nova::cyclefold::{CycleFoldCommittedInstanceVar, CF_IO_LEN}; + use crate::folding::circuits::cyclefold::{cf_io_len, CycleFoldCommittedInstanceVar}; use ark_r1cs_std::ToBitsGadget; - let cf_u_dummy_native = CommittedInstance::::dummy(CF_IO_LEN); + let cf_u_dummy_native = CommittedInstance::::dummy(cf_io_len(NOVA_CF_N_POINTS)); let w_dummy_native = Witness::::new( vec![C2::ScalarField::zero(); self.cf_r1cs.A.n_cols - 1 - self.cf_r1cs.l], self.cf_E_len, @@ -454,7 +465,7 @@ where })?; // 3.b u_i.x[1] == H(cf_U_i) - let (cf_u_i_x, _) = cf_U_i.clone().hash(&crh_params)?; + let (cf_u_i_x, _) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?; (u_i.x[1]).enforce_equal(&cf_u_i_x)?; // 4. check Pedersen commitments of cf_U_i.{cmE, cmW} @@ -487,12 +498,23 @@ where RelaxedR1CSGadget::check_nonnative(cf_r1cs, cf_W_i.E, cf_U_i.u.clone(), cf_z_U)?; } - // 6. check KZG challenges - let (incircuit_c_W, incircuit_c_E) = KZGChallengesGadget::::get_challenges_gadget( - cs.clone(), - &self.poseidon_config, - U_i1.clone(), + // 8.a, 6.a compute NIFS.V and KZG challenges. + // We need to ensure the order of challenge generation is the same as + // the native counterpart, so we first compute the challenges here and + // do the actual checks later. + let cmT = + NonNativeAffineVar::new_input(cs.clone(), || Ok(self.cmT.unwrap_or_else(C1::zero)))?; + let r_bits = ChallengeGadget::::get_challenge_gadget( + &mut transcript, + pp_hash, + U_i_vec, + u_i.clone(), + cmT.clone(), )?; + let (incircuit_c_W, incircuit_c_E) = + KZGChallengesGadget::::get_challenges_gadget(&mut transcript, U_i1.clone())?; + + // 6.b check KZG challenges incircuit_c_W.enforce_equal(&kzg_c_W)?; incircuit_c_E.enforce_equal(&kzg_c_E)?; @@ -505,17 +527,8 @@ where // incircuit_eval_W.enforce_equal(&eval_W)?; // incircuit_eval_E.enforce_equal(&eval_E)?; - // 8. compute the NIFS.V challenge and check that matches the one from the public input (so we + // 8.b check the NIFS.V challenge matches the one from the public input (so we // avoid the verifier computing it) - let cmT = - NonNativeAffineVar::new_input(cs.clone(), || Ok(self.cmT.unwrap_or_else(C1::zero)))?; - let r_bits = ChallengeGadget::::get_challenge_gadget( - cs.clone(), - &self.poseidon_config, - U_i_vec, - u_i.clone(), - cmT.clone(), - )?; let r_Fr = Boolean::le_bits_to_fp_var(&r_bits)?; // check that the in-circuit computed r is equal to the inputted r let r = @@ -557,38 +570,28 @@ where ::BaseField: PrimeField, C::ScalarField: Absorb, { - pub fn get_challenges_native( - poseidon_config: &PoseidonConfig, + pub fn get_challenges_native>( + transcript: &mut T, U_i: CommittedInstance, - ) -> Result<(C::ScalarField, C::ScalarField), Error> { - let (cmE_x_limbs, cmE_y_limbs) = nonnative_affine_to_field_elements(U_i.cmE)?; - let (cmW_x_limbs, cmW_y_limbs) = nonnative_affine_to_field_elements(U_i.cmW)?; - - let transcript = &mut PoseidonTranscript::::new(poseidon_config); + ) -> (C::ScalarField, C::ScalarField) { // compute the KZG challenges, which are computed in-circuit and checked that it matches // the inputted one - transcript.absorb_vec(&cmW_x_limbs); - transcript.absorb_vec(&cmW_y_limbs); + transcript.absorb_nonnative(&U_i.cmW); let challenge_W = transcript.get_challenge(); - transcript.absorb_vec(&cmE_x_limbs); - transcript.absorb_vec(&cmE_y_limbs); + transcript.absorb_nonnative(&U_i.cmE); let challenge_E = transcript.get_challenge(); - Ok((challenge_W, challenge_E)) + (challenge_W, challenge_E) } // compatible with the native get_challenges_native - pub fn get_challenges_gadget( - cs: ConstraintSystemRef, - poseidon_config: &PoseidonConfig, + pub fn get_challenges_gadget, S>>( + transcript: &mut T, U_i: CommittedInstanceVar, ) -> Result<(FpVar, FpVar), SynthesisError> { - let mut transcript = - PoseidonTranscriptVar::>::new(cs.clone(), &poseidon_config.clone()); - - transcript.absorb_vec(&U_i.cmW.to_constraint_field()?[..])?; + transcript.absorb(&U_i.cmW.to_constraint_field()?)?; let challenge_W = transcript.get_challenge()?; - transcript.absorb_vec(&U_i.cmE.to_constraint_field()?[..])?; + transcript.absorb(&U_i.cmE.to_constraint_field()?)?; let challenge_E = transcript.get_challenge()?; Ok((challenge_W, challenge_E)) @@ -597,7 +600,6 @@ where #[cfg(test)] pub mod tests { - use super::*; use ark_crypto_primitives::crh::{ sha256::{ constraints::{Sha256Gadget, UnitVar}, @@ -611,15 +613,20 @@ pub mod tests { use ark_std::{One, UniformRand}; use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2}; + use super::*; + use crate::arith::{ + r1cs::{ + tests::{get_test_r1cs, get_test_z}, + {extract_r1cs, extract_w_x}, + }, + Arith, + }; use crate::commitment::pedersen::Pedersen; - use crate::folding::nova::{get_cs_params_len, ProverParams, VerifierParams}; + use crate::folding::nova::PreprocessorParam; use crate::frontend::tests::{CubicFCircuit, CustomFCircuit, WrapperCircuit}; use crate::transcript::poseidon::poseidon_canonical_config; use crate::FoldingScheme; - use crate::ccs::r1cs::tests::{get_test_r1cs, get_test_z}; - use crate::ccs::r1cs::{extract_r1cs, extract_w_x}; - #[test] fn test_relaxed_r1cs_small_gadget_handcrafted() { let r1cs: R1CS = get_test_r1cs(); @@ -773,24 +780,7 @@ pub mod tests { let F_circuit = CubicFCircuit::::new(()).unwrap(); let z_0 = vec![Fr::from(3_u32)]; - // get the CS & CF_CS len - let (cs_len, cf_cs_len) = - get_cs_params_len::>( - &poseidon_config, - F_circuit, - ) - .unwrap(); - let (pedersen_params, _) = Pedersen::::setup(&mut rng, cs_len).unwrap(); - let (cf_pedersen_params, _) = Pedersen::::setup(&mut rng, cf_cs_len).unwrap(); - - let prover_params = - ProverParams::, Pedersen> { - poseidon_config: poseidon_config.clone(), - cs_params: pedersen_params, - cf_cs_params: cf_pedersen_params, - }; - - type NOVA = Nova< + type N = Nova< Projective, GVar, Projective2, @@ -800,18 +790,22 @@ pub mod tests { Pedersen, >; + let prep_param = PreprocessorParam::< + Projective, + Projective2, + CubicFCircuit, + Pedersen, + Pedersen, + >::new(poseidon_config, F_circuit); + let nova_params = N::preprocess(&mut rng, &prep_param).unwrap(); + // generate a Nova instance and do a step of it - let mut nova = NOVA::init(&prover_params, F_circuit, z_0.clone()).unwrap(); - nova.prove_step(vec![]).unwrap(); + let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); + nova.prove_step(&mut rng, vec![], None).unwrap(); let ivc_v = nova.clone(); - let verifier_params = VerifierParams:: { - poseidon_config: poseidon_config.clone(), - r1cs: ivc_v.clone().r1cs, - cf_r1cs: ivc_v.clone().cf_r1cs, - }; let (running_instance, incoming_instance, cyclefold_instance) = ivc_v.instances(); - NOVA::verify( - verifier_params, + N::verify( + nova_params.1, // verifier_params z_0, ivc_v.z_i, Fr::one(), @@ -844,6 +838,7 @@ pub mod tests { fn test_kzg_challenge_gadget() { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); + let mut transcript = PoseidonSponge::::new(&poseidon_config); let U_i = CommittedInstance:: { cmE: Projective::rand(&mut rng), @@ -854,21 +849,17 @@ pub mod tests { // compute the challenge natively let (challenge_W, challenge_E) = - KZGChallengesGadget::::get_challenges_native(&poseidon_config, U_i.clone()) - .unwrap(); + KZGChallengesGadget::::get_challenges_native(&mut transcript, U_i.clone()); let cs = ConstraintSystem::::new_ref(); let U_iVar = CommittedInstanceVar::::new_witness(cs.clone(), || Ok(U_i.clone())) .unwrap(); + let mut transcript_var = PoseidonSpongeVar::::new(cs.clone(), &poseidon_config); let (challenge_W_Var, challenge_E_Var) = - KZGChallengesGadget::::get_challenges_gadget( - cs.clone(), - &poseidon_config, - U_iVar, - ) - .unwrap(); + KZGChallengesGadget::::get_challenges_gadget(&mut transcript_var, U_iVar) + .unwrap(); assert!(cs.is_satisfied().unwrap()); // check that the natively computed and in-circuit computed hashes match diff --git a/folding-schemes/src/folding/nova/mod.rs b/folding-schemes/src/folding/nova/mod.rs index a938df7c..99d559dc 100644 --- a/folding-schemes/src/folding/nova/mod.rs +++ b/folding-schemes/src/folding/nova/mod.rs @@ -1,47 +1,45 @@ /// Implements the scheme described in [Nova](https://eprint.iacr.org/2021/370.pdf) and /// [CycleFold](https://eprint.iacr.org/2023/1192.pdf). -use ark_crypto_primitives::{ - crh::{poseidon::CRH, CRHScheme}, - sponge::{poseidon::PoseidonConfig, Absorb}, +use ark_crypto_primitives::sponge::{ + poseidon::{PoseidonConfig, PoseidonSponge}, + Absorb, CryptographicSponge, }; use ark_ec::{AffineRepr, CurveGroup, Group}; -use ark_ff::{BigInteger, Field, PrimeField, ToConstraintField}; +use ark_ff::{BigInteger, PrimeField}; use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget}; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::fmt::Debug; +use ark_std::rand::RngCore; use ark_std::{One, Zero}; use core::marker::PhantomData; -use std::usize; -use crate::ccs::r1cs::{extract_r1cs, extract_w_x, R1CS}; use crate::commitment::CommitmentScheme; -use crate::folding::circuits::{ - nonnative::{ - affine::nonnative_affine_to_field_elements, uint::nonnative_field_to_field_elements, - }, - CF2, -}; +use crate::folding::circuits::cyclefold::{fold_cyclefold_circuit, CycleFoldCircuit}; +use crate::folding::circuits::CF2; use crate::frontend::FCircuit; +use crate::transcript::{AbsorbNonNative, Transcript}; use crate::utils::vec::is_zero_vec; use crate::Error; use crate::FoldingScheme; +use crate::{ + arith::r1cs::{extract_r1cs, extract_w_x, R1CS}, + utils::{get_cm_coordinates, pp_hash}, +}; pub mod circuits; -pub mod cyclefold; pub mod decider_eth; pub mod decider_eth_circuit; pub mod nifs; pub mod serialize; pub mod traits; - use circuits::{AugmentedFCircuit, ChallengeGadget}; -use cyclefold::{CycleFoldChallengeGadget, CycleFoldCircuit}; use nifs::NIFS; use traits::NovaR1CS; -#[cfg(test)] -use cyclefold::CF_IO_LEN; +/// Number of points to be folded in the CycleFold circuit, in Nova's case, this is a fixed amount: +/// 2 points to be folded. +const NOVA_CF_N_POINTS: usize = 2_usize; #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] pub struct CommittedInstance { @@ -62,6 +60,54 @@ impl CommittedInstance { } } +impl Absorb for CommittedInstance +where + C::ScalarField: Absorb, +{ + fn to_sponge_bytes(&self, _dest: &mut Vec) { + // This is never called + unimplemented!() + } + + fn to_sponge_field_elements(&self, dest: &mut Vec) { + self.u.to_sponge_field_elements(dest); + self.x.to_sponge_field_elements(dest); + // We cannot call `to_native_sponge_field_elements(dest)` directly, as + // `to_native_sponge_field_elements` needs `F` to be `C::ScalarField`, + // but here `F` is a generic `PrimeField`. + self.cmE + .to_native_sponge_field_elements_as_vec() + .to_sponge_field_elements(dest); + self.cmW + .to_native_sponge_field_elements_as_vec() + .to_sponge_field_elements(dest); + } +} + +impl AbsorbNonNative for CommittedInstance +where + ::BaseField: ark_ff::PrimeField + Absorb, +{ + // Compatible with the in-circuit `CycleFoldCommittedInstanceVar::to_native_sponge_field_elements` + // in `cyclefold.rs`. + fn to_native_sponge_field_elements(&self, dest: &mut Vec) { + [self.u].to_native_sponge_field_elements(dest); + self.x.to_native_sponge_field_elements(dest); + let (cmE_x, cmE_y) = match self.cmE.into_affine().xy() { + Some((&x, &y)) => (x, y), + None => (C::BaseField::zero(), C::BaseField::zero()), + }; + let (cmW_x, cmW_y) = match self.cmW.into_affine().xy() { + Some((&x, &y)) => (x, y), + None => (C::BaseField::zero(), C::BaseField::zero()), + }; + cmE_x.to_sponge_field_elements(dest); + cmE_y.to_sponge_field_elements(dest); + cmW_x.to_sponge_field_elements(dest); + cmW_y.to_sponge_field_elements(dest); + } +} + impl CommittedInstance where ::ScalarField: Absorb, @@ -71,66 +117,21 @@ where /// nova/circuits.rs::CommittedInstanceVar.hash. /// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U_i` is the /// `CommittedInstance`. - pub fn hash( + pub fn hash>( &self, - poseidon_config: &PoseidonConfig, + sponge: &T, + pp_hash: C::ScalarField, // public params hash i: C::ScalarField, z_0: Vec, z_i: Vec, - ) -> Result { - let (cmE_x, cmE_y) = nonnative_affine_to_field_elements::(self.cmE)?; - let (cmW_x, cmW_y) = nonnative_affine_to_field_elements::(self.cmW)?; - - CRH::::evaluate( - poseidon_config, - vec![ - vec![i], - z_0, - z_i, - vec![self.u], - self.x.clone(), - cmE_x, - cmE_y, - cmW_x, - cmW_y, - ] - .concat(), - ) - .map_err(|e| Error::Other(e.to_string())) - } -} - -impl ToConstraintField for CommittedInstance -where - ::BaseField: ark_ff::PrimeField + Absorb, -{ - fn to_field_elements(&self) -> Option> { - let u = nonnative_field_to_field_elements(&self.u); - let x = self - .x - .iter() - .flat_map(nonnative_field_to_field_elements) - .collect::>(); - let (cmE_x, cmE_y, cmE_is_inf) = match self.cmE.into_affine().xy() { - Some((&x, &y)) => (x, y, C::BaseField::zero()), - None => ( - C::BaseField::zero(), - C::BaseField::zero(), - C::BaseField::one(), - ), - }; - let (cmW_x, cmW_y, cmW_is_inf) = match self.cmW.into_affine().xy() { - Some((&x, &y)) => (x, y, C::BaseField::zero()), - None => ( - C::BaseField::zero(), - C::BaseField::zero(), - C::BaseField::one(), - ), - }; - // Concatenate `cmE_is_inf` and `cmW_is_inf` to save constraints for CRHGadget::evaluate in the corresponding circuit - let is_inf = cmE_is_inf.double() + cmW_is_inf; - - Some([u, x, vec![cmE_x, cmE_y, cmW_x, cmW_y, is_inf]].concat()) + ) -> C::ScalarField { + let mut sponge = sponge.clone(); + sponge.absorb(&pp_hash); + sponge.absorb(&i); + sponge.absorb(&z_0); + sponge.absorb(&z_i); + sponge.absorb(&self); + sponge.squeeze_field_elements(1)[0] } } @@ -141,12 +142,15 @@ where /// hash_cyclefold implements the committed instance hash compatible with the gadget implemented in /// nova/cyclefold.rs::CycleFoldCommittedInstanceVar.hash. /// Returns `H(U_i)`, where `U_i` is the `CommittedInstance` for CycleFold. - pub fn hash_cyclefold( + pub fn hash_cyclefold>( &self, - poseidon_config: &PoseidonConfig, - ) -> Result { - CRH::::evaluate(poseidon_config, self.to_field_elements().unwrap()) - .map_err(|e| Error::Other(e.to_string())) + sponge: &T, + pp_hash: C::BaseField, // public params hash + ) -> C::BaseField { + let mut sponge = sponge.clone(); + sponge.absorb(&pp_hash); + sponge.absorb_nonnative(self); + sponge.squeeze_field_elements(1)[0] } } @@ -191,6 +195,44 @@ where } } +#[derive(Debug, Clone)] +pub struct PreprocessorParam +where + C1: CurveGroup, + C2: CurveGroup, + FC: FCircuit, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ + pub poseidon_config: PoseidonConfig, + pub F: FC, + // cs params if not provided, will be generated at the preprocess method + pub cs_pp: Option, + pub cs_vp: Option, + pub cf_cs_pp: Option, + pub cf_cs_vp: Option, +} + +impl PreprocessorParam +where + C1: CurveGroup, + C2: CurveGroup, + FC: FCircuit, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ + pub fn new(poseidon_config: PoseidonConfig, F: FC) -> Self { + Self { + poseidon_config, + F, + cs_pp: None, + cs_vp: None, + cf_cs_pp: None, + cf_cs_vp: None, + } + } +} + #[derive(Debug, Clone)] pub struct ProverParams where @@ -200,15 +242,42 @@ where CS2: CommitmentScheme, { pub poseidon_config: PoseidonConfig, - pub cs_params: CS1::ProverParams, - pub cf_cs_params: CS2::ProverParams, + pub cs_pp: CS1::ProverParams, + pub cf_cs_pp: CS2::ProverParams, } #[derive(Debug, Clone)] -pub struct VerifierParams { +pub struct VerifierParams +where + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ pub poseidon_config: PoseidonConfig, pub r1cs: R1CS, pub cf_r1cs: R1CS, + pub cs_vp: CS1::VerifierParams, + pub cf_cs_vp: CS2::VerifierParams, +} + +impl VerifierParams +where + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ + /// returns the hash of the public parameters of Nova + pub fn pp_hash(&self) -> Result { + pp_hash::( + &self.r1cs, + &self.cf_r1cs, + &self.cs_vp, + &self.cf_cs_vp, + &self.poseidon_config, + ) + } } /// Implements Nova+CycleFold's IVC, described in [Nova](https://eprint.iacr.org/2021/370.pdf) and @@ -233,11 +302,13 @@ where pub cf_r1cs: R1CS, pub poseidon_config: PoseidonConfig, /// CommitmentScheme::ProverParams over C1 - pub cs_params: CS1::ProverParams, + pub cs_pp: CS1::ProverParams, /// CycleFold CommitmentScheme::ProverParams, over C2 - pub cf_cs_params: CS2::ProverParams, + pub cf_cs_pp: CS2::ProverParams, /// F circuit, the circuit that is being folded pub F: FC, + /// public params hash + pub pp_hash: C1::ScalarField, pub i: C1::ScalarField, /// initial state pub z_0: Vec, @@ -272,37 +343,71 @@ where for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>, for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, { - type PreprocessorParam = (Self::ProverParam, FC); + type PreprocessorParam = PreprocessorParam; type ProverParam = ProverParams; - type VerifierParam = VerifierParams; - type CommittedInstanceWithWitness = (CommittedInstance, Witness); - type CFCommittedInstanceWithWitness = (CommittedInstance, Witness); + type VerifierParam = VerifierParams; + type RunningInstance = (CommittedInstance, Witness); + type IncomingInstance = (CommittedInstance, Witness); + type MultiCommittedInstanceWithWitness = (); + type CFInstance = (CommittedInstance, Witness); fn preprocess( + mut rng: impl RngCore, prep_param: &Self::PreprocessorParam, ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { - let (prover_params, F_circuit) = prep_param; - let (r1cs, cf_r1cs) = - get_r1cs::(&prover_params.poseidon_config, F_circuit.clone())?; + get_r1cs::(&prep_param.poseidon_config, prep_param.F.clone())?; + + // if cs params exist, use them, if not, generate new ones + let cs_pp: CS1::ProverParams; + let cs_vp: CS1::VerifierParams; + let cf_cs_pp: CS2::ProverParams; + let cf_cs_vp: CS2::VerifierParams; + if prep_param.cs_pp.is_some() + && prep_param.cf_cs_pp.is_some() + && prep_param.cs_vp.is_some() + && prep_param.cf_cs_vp.is_some() + { + cs_pp = prep_param.clone().cs_pp.unwrap(); + cs_vp = prep_param.clone().cs_vp.unwrap(); + cf_cs_pp = prep_param.clone().cf_cs_pp.unwrap(); + cf_cs_vp = prep_param.clone().cf_cs_vp.unwrap(); + } else { + (cs_pp, cs_vp) = CS1::setup(&mut rng, r1cs.A.n_rows)?; + (cf_cs_pp, cf_cs_vp) = CS2::setup(&mut rng, cf_r1cs.A.n_rows)?; + } - let verifier_params = VerifierParams:: { - poseidon_config: prover_params.poseidon_config.clone(), + let prover_params = ProverParams:: { + poseidon_config: prep_param.poseidon_config.clone(), + cs_pp: cs_pp.clone(), + cf_cs_pp: cf_cs_pp.clone(), + }; + let verifier_params = VerifierParams:: { + poseidon_config: prep_param.poseidon_config.clone(), r1cs, cf_r1cs, + cs_vp, + cf_cs_vp, }; - Ok((prover_params.clone(), verifier_params)) + + Ok((prover_params, verifier_params)) } /// Initializes the Nova+CycleFold's IVC for the given parameters and initial state `z_0`. - fn init(pp: &Self::ProverParam, F: FC, z_0: Vec) -> Result { + fn init( + params: &(Self::ProverParam, Self::VerifierParam), + F: FC, + z_0: Vec, + ) -> Result { + let (pp, vp) = params; + // prepare the circuit to obtain its R1CS let cs = ConstraintSystem::::new_ref(); let cs2 = ConstraintSystem::::new_ref(); let augmented_F_circuit = AugmentedFCircuit::::empty(&pp.poseidon_config, F.clone()); - let cf_circuit = CycleFoldCircuit::::empty(); + let cf_circuit = CycleFoldCircuit::::empty(NOVA_CF_N_POINTS); augmented_F_circuit.generate_constraints(cs.clone())?; cs.finalize(); @@ -314,6 +419,9 @@ where let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cf_r1cs = extract_r1cs::(&cs2); + // compute the public params hash + let pp_hash = vp.pp_hash()?; + // setup the dummy instances let (w_dummy, u_dummy) = r1cs.dummy_instance(); let (cf_w_dummy, cf_u_dummy) = cf_r1cs.dummy_instance(); @@ -327,9 +435,10 @@ where r1cs, cf_r1cs, poseidon_config: pp.poseidon_config.clone(), - cs_params: pp.cs_params.clone(), - cf_cs_params: pp.cf_cs_params.clone(), + cs_pp: pp.cs_pp.clone(), + cf_cs_pp: pp.cf_cs_pp.clone(), F, + pp_hash, i: C1::ScalarField::zero(), z_0: z_0.clone(), z_i: z_0, @@ -344,9 +453,25 @@ where } /// Implements IVC.P of Nova+CycleFold - fn prove_step(&mut self, external_inputs: Vec) -> Result<(), Error> { + fn prove_step( + &mut self, + _rng: impl RngCore, + external_inputs: Vec, + // Nova does not support multi-instances folding + _other_instances: Option, + ) -> Result<(), Error> { + // `sponge` is for digest computation. + let sponge = PoseidonSponge::::new(&self.poseidon_config); + // `transcript` is for challenge generation. + let mut transcript = sponge.clone(); + let augmented_F_circuit: AugmentedFCircuit; + // Nova does not support (by design) multi-instances folding + if _other_instances.is_some() { + return Err(Error::NoMultiInstances); + } + if self.z_i.len() != self.F.state_len() { return Err(Error::NotSameLength( "z_i.len()".to_string(), @@ -380,11 +505,12 @@ where // r_bits is the r used to the RLC of the F' instances let r_bits = ChallengeGadget::::get_challenge_native( - &self.poseidon_config, + &mut transcript, + self.pp_hash, self.U_i.clone(), self.u_i.clone(), cmT, - )?; + ); let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)) .ok_or(Error::OutOfBounds)?; let r_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&r_bits)) @@ -398,20 +524,22 @@ where // folded instance output (public input, x) // u_{i+1}.x[0] = H(i+1, z_0, z_{i+1}, U_{i+1}) let u_i1_x = U_i1.hash( - &self.poseidon_config, + &sponge, + self.pp_hash, self.i + C1::ScalarField::one(), self.z_0.clone(), z_i1.clone(), - )?; + ); // u_{i+1}.x[1] = H(cf_U_{i+1}) let cf_u_i1_x: C1::ScalarField; if self.i == C1::ScalarField::zero() { - cf_u_i1_x = self.cf_U_i.hash_cyclefold(&self.poseidon_config)?; + cf_u_i1_x = self.cf_U_i.hash_cyclefold(&sponge, self.pp_hash); // base case augmented_F_circuit = AugmentedFCircuit:: { _gc2: PhantomData, poseidon_config: self.poseidon_config.clone(), + pp_hash: Some(self.pp_hash), i: Some(C1::ScalarField::zero()), // = i=0 i_usize: Some(0), z_0: Some(self.z_0.clone()), // = z_i @@ -456,35 +584,42 @@ where let cfW_circuit = CycleFoldCircuit:: { _gc: PhantomData, - r_bits: Some(r_bits.clone()), - p1: Some(self.U_i.clone().cmW), - p2: Some(self.u_i.clone().cmW), + n_points: NOVA_CF_N_POINTS, + r_bits: Some(vec![r_bits.clone()]), + points: Some(vec![self.U_i.clone().cmW, self.u_i.clone().cmW]), x: Some(cfW_u_i_x.clone()), }; let cfE_circuit = CycleFoldCircuit:: { _gc: PhantomData, - r_bits: Some(r_bits.clone()), - p1: Some(self.U_i.clone().cmE), - p2: Some(cmT), + n_points: NOVA_CF_N_POINTS, + r_bits: Some(vec![r_bits.clone()]), + points: Some(vec![self.U_i.clone().cmE, cmT]), x: Some(cfE_u_i_x.clone()), }; // fold self.cf_U_i + cfW_U -> folded running with cfW let (_cfW_w_i, cfW_u_i, cfW_W_i1, cfW_U_i1, cfW_cmT, _) = self.fold_cyclefold_circuit( + &mut transcript, self.cf_W_i.clone(), // CycleFold running instance witness self.cf_U_i.clone(), // CycleFold running instance cfW_u_i_x, cfW_circuit, )?; // fold [the output from folding self.cf_U_i + cfW_U] + cfE_U = folded_running_with_cfW + cfE - let (_cfE_w_i, cfE_u_i, cf_W_i1, cf_U_i1, cf_cmT, _) = - self.fold_cyclefold_circuit(cfW_W_i1, cfW_U_i1.clone(), cfE_u_i_x, cfE_circuit)?; + let (_cfE_w_i, cfE_u_i, cf_W_i1, cf_U_i1, cf_cmT, _) = self.fold_cyclefold_circuit( + &mut transcript, + cfW_W_i1, + cfW_U_i1.clone(), + cfE_u_i_x, + cfE_circuit, + )?; - cf_u_i1_x = cf_U_i1.hash_cyclefold(&self.poseidon_config)?; + cf_u_i1_x = cf_U_i1.hash_cyclefold(&sponge, self.pp_hash); augmented_F_circuit = AugmentedFCircuit:: { _gc2: PhantomData, poseidon_config: self.poseidon_config.clone(), + pp_hash: Some(self.pp_hash), i: Some(self.i), i_usize: Some(i_usize), z_0: Some(self.z_0.clone()), @@ -540,7 +675,7 @@ where self.i += C1::ScalarField::one(); self.z_i = z_i1; self.w_i = Witness::::new(w_i1, self.r1cs.A.n_rows); - self.u_i = self.w_i.commit::(&self.cs_params, x_i1)?; + self.u_i = self.w_i.commit::(&self.cs_pp, x_i1)?; self.W_i = W_i1; self.U_i = U_i1; @@ -557,12 +692,13 @@ where fn state(&self) -> Vec { self.z_i.clone() } + fn instances( &self, ) -> ( - Self::CommittedInstanceWithWitness, - Self::CommittedInstanceWithWitness, - Self::CFCommittedInstanceWithWitness, + Self::RunningInstance, + Self::IncomingInstance, + Self::CFInstance, ) { ( (self.U_i.clone(), self.W_i.clone()), @@ -571,16 +707,26 @@ where ) } - /// Implements IVC.V of Nova+CycleFold + /// Implements IVC.V of Nova+CycleFold. Notice that this method does not include the + /// commitments verification, which is done in the Decider. fn verify( vp: Self::VerifierParam, z_0: Vec, // initial state z_i: Vec, // last state num_steps: C1::ScalarField, - running_instance: Self::CommittedInstanceWithWitness, - incoming_instance: Self::CommittedInstanceWithWitness, - cyclefold_instance: Self::CFCommittedInstanceWithWitness, + running_instance: Self::RunningInstance, + incoming_instance: Self::IncomingInstance, + cyclefold_instance: Self::CFInstance, ) -> Result<(), Error> { + let sponge = PoseidonSponge::::new(&vp.poseidon_config); + + if num_steps == C1::ScalarField::zero() { + if z_0 != z_i { + return Err(Error::IVCVerificationFail); + } + return Ok(()); + } + let (U_i, W_i) = running_instance; let (u_i, w_i) = incoming_instance; let (cf_U_i, cf_W_i) = cyclefold_instance; @@ -589,14 +735,16 @@ where return Err(Error::IVCVerificationFail); } + let pp_hash = vp.pp_hash()?; + // check that u_i's output points to the running instance // u_i.X[0] == H(i, z_0, z_i, U_i) - let expected_u_i_x = U_i.hash(&vp.poseidon_config, num_steps, z_0, z_i.clone())?; + let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, z_0, z_i.clone()); if expected_u_i_x != u_i.x[0] { return Err(Error::IVCVerificationFail); } // u_i.X[1] == H(cf_U_i) - let expected_cf_u_i_x = cf_U_i.hash_cyclefold(&vp.poseidon_config)?; + let expected_cf_u_i_x = cf_U_i.hash_cyclefold(&sponge, pp_hash); if expected_cf_u_i_x != u_i.x[1] { return Err(Error::IVCVerificationFail); } @@ -636,7 +784,7 @@ where // computes T and cmT for the AugmentedFCircuit fn compute_cmT(&self) -> Result<(Vec, C1), Error> { NIFS::::compute_cmT( - &self.cs_params, + &self.cs_pp, &self.r1cs, &self.w_i, &self.u_i, @@ -644,23 +792,6 @@ where &self.U_i, ) } - // computes T* and cmT* for the CycleFoldCircuit - fn compute_cf_cmT( - &self, - cf_w_i: &Witness, - cf_u_i: &CommittedInstance, - cf_W_i: &Witness, - cf_U_i: &CommittedInstance, - ) -> Result<(Vec, C2), Error> { - NIFS::::compute_cyclefold_cmT( - &self.cf_cs_params, - &self.cf_r1cs, - cf_w_i, - cf_u_i, - cf_W_i, - cf_U_i, - ) - } } impl Nova @@ -682,8 +813,9 @@ where { // folds the given cyclefold circuit and its instances #[allow(clippy::type_complexity)] - fn fold_cyclefold_circuit( + fn fold_cyclefold_circuit>( &self, + transcript: &mut T, cf_W_i: Witness, // witness of the running instance cf_U_i: CommittedInstance, // running instance cf_u_i_x: Vec, @@ -699,41 +831,17 @@ where ), Error, > { - let cs2 = ConstraintSystem::::new_ref(); - cf_circuit.generate_constraints(cs2.clone())?; - - let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; - let (cf_w_i, cf_x_i) = extract_w_x::(&cs2); - if cf_x_i != cf_u_i_x { - return Err(Error::NotEqual); - } - - #[cfg(test)] - if cf_x_i.len() != CF_IO_LEN { - return Err(Error::NotExpectedLength(cf_x_i.len(), CF_IO_LEN)); - } - - // fold cyclefold instances - let cf_w_i = Witness::::new(cf_w_i.clone(), self.cf_r1cs.A.n_rows); - let cf_u_i: CommittedInstance = - cf_w_i.commit::(&self.cf_cs_params, cf_x_i.clone())?; - - // compute T* and cmT* for CycleFoldCircuit - let (cf_T, cf_cmT) = self.compute_cf_cmT(&cf_w_i, &cf_u_i, &cf_W_i, &cf_U_i)?; - - let cf_r_bits = CycleFoldChallengeGadget::::get_challenge_native( - &self.poseidon_config, - cf_U_i.clone(), - cf_u_i.clone(), - cf_cmT, - )?; - let cf_r_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&cf_r_bits)) - .ok_or(Error::OutOfBounds)?; - - let (cf_W_i1, cf_U_i1) = NIFS::::fold_instances( - cf_r_Fq, &cf_W_i, &cf_U_i, &cf_w_i, &cf_u_i, &cf_T, cf_cmT, - )?; - Ok((cf_w_i, cf_u_i, cf_W_i1, cf_U_i1, cf_cmT, cf_r_Fq)) + fold_cyclefold_circuit::( + NOVA_CF_N_POINTS, + transcript, + self.cf_r1cs.clone(), + self.cf_cs_pp.clone(), + self.pp_hash, + cf_W_i, + cf_U_i, + cf_u_i_x, + cf_circuit, + ) } } @@ -771,7 +879,7 @@ where { let augmented_F_circuit = AugmentedFCircuit::::empty(poseidon_config, F_circuit); - let cf_circuit = CycleFoldCircuit::::empty(); + let cf_circuit = CycleFoldCircuit::::empty(NOVA_CF_N_POINTS); let r1cs = get_r1cs_from_cs::(augmented_F_circuit)?; let cf_r1cs = get_r1cs_from_cs::(cf_circuit)?; Ok((r1cs, cf_r1cs)) @@ -801,23 +909,13 @@ where Ok((r1cs.A.n_rows, cf_r1cs.A.n_rows)) } -/// returns the coordinates of a commitment point. This is compatible with the arkworks -/// GC.to_constraint_field()[..2] -pub(crate) fn get_cm_coordinates(cm: &C) -> Vec { - let zero = (&C::BaseField::zero(), &C::BaseField::zero()); - let cm = cm.into_affine(); - let (cm_x, cm_y) = cm.xy().unwrap_or(zero); - vec![*cm_x, *cm_y] -} - #[cfg(test)] pub mod tests { - use super::*; - use crate::commitment::kzg::{ProverKey as KZGProverKey, KZG}; + use crate::commitment::kzg::KZG; use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; - use ark_poly_commit::kzg10::VerifierKey as KZGVerifierKey; + use super::*; use crate::commitment::pedersen::Pedersen; use crate::frontend::tests::CubicFCircuit; use crate::transcript::poseidon::poseidon_canonical_config; @@ -826,72 +924,50 @@ pub mod tests { /// AugmentedFCircuit #[test] fn test_ivc() { - let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); let F_circuit = CubicFCircuit::::new(()).unwrap(); - let (cs_len, cf_cs_len) = - get_cs_params_len::>( - &poseidon_config, - F_circuit, - ) - .unwrap(); - let (kzg_pk, _): (KZGProverKey, KZGVerifierKey) = - KZG::::setup(&mut rng, cs_len).unwrap(); - let (pedersen_params, _) = Pedersen::::setup(&mut rng, cs_len).unwrap(); - let (cf_pedersen_params, _) = Pedersen::::setup(&mut rng, cf_cs_len).unwrap(); - // run the test using Pedersen commitments on both sides of the curve cycle test_ivc_opt::, Pedersen>( poseidon_config.clone(), - pedersen_params, - cf_pedersen_params.clone(), F_circuit, ); // run the test using KZG for the commitments on the main curve, and Pedersen for the // commitments on the secondary curve - test_ivc_opt::, Pedersen>( - poseidon_config, - kzg_pk, - cf_pedersen_params, - F_circuit, - ); + test_ivc_opt::, Pedersen>(poseidon_config, F_circuit); } // test_ivc allowing to choose the CommitmentSchemes fn test_ivc_opt, CS2: CommitmentScheme>( poseidon_config: PoseidonConfig, - cs_params: CS1::ProverParams, - cf_cs_params: CS2::ProverParams, F_circuit: CubicFCircuit, ) { - type NOVA = - Nova, CS1, CS2>; + let mut rng = ark_std::test_rng(); + type N = Nova, CS1, CS2>; - let prover_params = ProverParams:: { - poseidon_config: poseidon_config.clone(), - cs_params, - cf_cs_params, + let prep_param = PreprocessorParam::, CS1, CS2> { + poseidon_config, + F: F_circuit, + cs_pp: None, + cs_vp: None, + cf_cs_pp: None, + cf_cs_vp: None, }; + let nova_params = N::preprocess(&mut rng, &prep_param).unwrap(); let z_0 = vec![Fr::from(3_u32)]; - let mut nova = NOVA::init(&prover_params, F_circuit, z_0.clone()).unwrap(); + let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); let num_steps: usize = 3; for _ in 0..num_steps { - nova.prove_step(vec![]).unwrap(); + nova.prove_step(&mut rng, vec![], None).unwrap(); } assert_eq!(Fr::from(num_steps as u32), nova.i); - let verifier_params = VerifierParams:: { - poseidon_config, - r1cs: nova.clone().r1cs, - cf_r1cs: nova.clone().cf_r1cs, - }; let (running_instance, incoming_instance, cyclefold_instance) = nova.instances(); - NOVA::::verify( - verifier_params, + N::::verify( + nova_params.1, // Nova's verifier params z_0, nova.z_i, nova.i, diff --git a/folding-schemes/src/folding/nova/nifs.rs b/folding-schemes/src/folding/nova/nifs.rs index 823a4dda..330a7ef9 100644 --- a/folding-schemes/src/folding/nova/nifs.rs +++ b/folding-schemes/src/folding/nova/nifs.rs @@ -1,15 +1,17 @@ -use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{CurveGroup, Group}; -use ark_std::{ Zero}; use std::marker::PhantomData; use std::time::Instant; -use super::{CommittedInstance, Witness}; -use crate::ccs::r1cs::R1CS; +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::{CurveGroup, Group}; +use ark_std::Zero; + +use crate::arith::r1cs::R1CS; use crate::commitment::CommitmentScheme; +use crate::Error; use crate::transcript::Transcript; use crate::utils::vec::{hadamard, mat_vec_mul, vec_add, vec_scalar_mul, vec_sub}; -use crate::Error; + +use super::{CommittedInstance, Witness}; /// Implements the Non-Interactive Folding Scheme described in section 4 of /// [Nova](https://eprint.iacr.org/2021/370.pdf) @@ -98,24 +100,23 @@ where w2: &Witness, ci2: &CommittedInstance, ) -> Result<(Vec, C), Error> { - println!("Nova Point 0: nifs.compute_cmT - Starting "); let start = Instant::now(); - let z1: Vec = [vec![ci1.u], ci1.x.to_vec(), w1.W.to_vec()].concat(); let z2: Vec = [vec![ci2.u], ci2.x.to_vec(), w2.W.to_vec()].concat(); - println!("Nova Point 1 nifs.compute_cmT {:?} - Z concatenations", start.elapsed()); - - // compute cross terms - + let elapsed = start.elapsed(); + println!("Time before computing T {:?}", elapsed); let T = Self::compute_T(r1cs, ci1.u, ci2.u, &z1, &z2)?; - println!("Nova Point 2 nifs.compute_cmT {:?} - Computed T", start.elapsed()); - + let elapsed = start.elapsed(); + println!("Time after computing T {:?}", elapsed); + let elapsed = start.elapsed(); + println!("Time before commiting T {:?}", elapsed); // use r_T=0 since we don't need hiding property for cm(T) let cmT = CS::commit(cs_prover_params, &T, &C::ScalarField::zero())?; - println!("Nova Point 3 nifs.compute_cmT {:?} - Committed T", start.elapsed()); + let elapsed = start.elapsed(); + println!("Time after commiting T {:?}", elapsed); Ok((T, cmT)) } @@ -196,7 +197,7 @@ where } pub fn prove_commitments( - tr: &mut impl Transcript, + tr: &mut impl Transcript, cs_prover_params: &CS::ProverParams, w: &Witness, ci: &CommittedInstance, @@ -212,17 +213,21 @@ where #[cfg(test)] pub mod tests { - use super::*; - use ark_crypto_primitives::sponge::poseidon::PoseidonConfig; + use ark_crypto_primitives::sponge::{ + CryptographicSponge, + poseidon::{PoseidonConfig, PoseidonSponge}, + }; use ark_ff::{BigInteger, PrimeField}; use ark_pallas::{Fr, Projective}; use ark_std::{ops::Mul, UniformRand}; - use crate::ccs::r1cs::tests::{get_test_r1cs, get_test_z}; + use crate::arith::r1cs::tests::{get_test_r1cs, get_test_z}; use crate::commitment::pedersen::{Params as PedersenParams, Pedersen}; use crate::folding::nova::circuits::ChallengeGadget; use crate::folding::nova::traits::NovaR1CS; - use crate::transcript::poseidon::{poseidon_canonical_config, PoseidonTranscript}; + use crate::transcript::poseidon::poseidon_canonical_config; + + use super::*; #[allow(clippy::type_complexity)] pub(crate) fn prepare_simple_fold_inputs() -> ( @@ -271,14 +276,16 @@ pub mod tests { .unwrap(); let poseidon_config = poseidon_canonical_config::(); + let mut transcript = PoseidonSponge::::new(&poseidon_config); + let pp_hash = C::ScalarField::from(42u32); // only for test let r_bits = ChallengeGadget::::get_challenge_native( - &poseidon_config, + &mut transcript, + pp_hash, ci1.clone(), ci2.clone(), cmT, - ) - .unwrap(); + ); let r_Fr = C::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap(); let (w3, ci3) = @@ -375,9 +382,9 @@ pub mod tests { .unwrap(); // init Prover's transcript - let mut transcript_p = PoseidonTranscript::::new(&poseidon_config); + let mut transcript_p = PoseidonSponge::::new(&poseidon_config); // init Verifier's transcript - let mut transcript_v = PoseidonTranscript::::new(&poseidon_config); + let mut transcript_v = PoseidonSponge::::new(&poseidon_config); // prove the ci3.cmE, ci3.cmW, cmT commitments let cm_proofs = NIFS::>::prove_commitments( diff --git a/folding-schemes/src/folding/nova/serialize.rs b/folding-schemes/src/folding/nova/serialize.rs index eb4e88b8..62dea3f1 100644 --- a/folding-schemes/src/folding/nova/serialize.rs +++ b/folding-schemes/src/folding/nova/serialize.rs @@ -1,10 +1,3 @@ -use super::{circuits::AugmentedFCircuit, cyclefold::CycleFoldCircuit, Nova, ProverParams}; -pub use super::{CommittedInstance, Witness}; -pub use crate::folding::circuits::CF2; -use crate::{ - ccs::r1cs::extract_r1cs, commitment::CommitmentScheme, folding::circuits::CF1, - frontend::FCircuit, -}; use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb}; use ark_ec::{CurveGroup, Group}; use ark_ff::PrimeField; @@ -17,6 +10,17 @@ use ark_relations::r1cs::ConstraintSystem; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError, Write}; use std::marker::PhantomData; +use super::{circuits::AugmentedFCircuit, Nova, ProverParams}; +use super::{CommittedInstance, Witness}; +use crate::folding::{ + circuits::{cyclefold::CycleFoldCircuit, CF2}, + nova::NOVA_CF_N_POINTS, +}; +use crate::{ + arith::r1cs::extract_r1cs, commitment::CommitmentScheme, folding::circuits::CF1, + frontend::FCircuit, +}; + impl CanonicalSerialize for Nova where C1: CurveGroup, @@ -40,6 +44,7 @@ where mut writer: W, compress: ark_serialize::Compress, ) -> Result<(), ark_serialize::SerializationError> { + self.pp_hash.serialize_with_mode(&mut writer, compress)?; self.i.serialize_with_mode(&mut writer, compress)?; self.z_0.serialize_with_mode(&mut writer, compress)?; self.z_i.serialize_with_mode(&mut writer, compress)?; @@ -52,7 +57,8 @@ where } fn serialized_size(&self, compress: ark_serialize::Compress) -> usize { - self.i.serialized_size(compress) + self.pp_hash.serialized_size(compress) + + self.i.serialized_size(compress) + self.z_0.serialized_size(compress) + self.z_i.serialized_size(compress) + self.w_i.serialized_size(compress) @@ -114,6 +120,7 @@ where prover_params: ProverParams, poseidon_config: PoseidonConfig, ) -> Result { + let pp_hash = C1::ScalarField::deserialize_with_mode(&mut reader, compress, validate)?; let i = C1::ScalarField::deserialize_with_mode(&mut reader, compress, validate)?; let z_0 = Vec::::deserialize_with_mode(&mut reader, compress, validate)?; let z_i = Vec::::deserialize_with_mode(&mut reader, compress, validate)?; @@ -130,7 +137,7 @@ where let cs2 = ConstraintSystem::::new_ref(); let augmented_F_circuit = AugmentedFCircuit::::empty(&poseidon_config, f_circuit.clone()); - let cf_circuit = CycleFoldCircuit::::empty(); + let cf_circuit = CycleFoldCircuit::::empty(NOVA_CF_N_POINTS); augmented_F_circuit .generate_constraints(cs.clone()) @@ -150,8 +157,13 @@ where _gc1: PhantomData, _c2: PhantomData, _gc2: PhantomData, - cs_params: prover_params.cs_params, - cf_cs_params: prover_params.cf_cs_params, + r1cs, + cf_r1cs, + poseidon_config, + cs_pp: prover_params.cs_pp, + cf_cs_pp: prover_params.cf_cs_pp, + F: f_circuit, + pp_hash, i, z_0, z_i, @@ -161,64 +173,50 @@ where U_i, cf_W_i, cf_U_i, - r1cs, - cf_r1cs, - poseidon_config, - F: f_circuit, }) } } #[cfg(test)] pub mod tests { + use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; + use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; + use ark_serialize::{CanonicalSerialize, Compress, Validate}; + use std::{fs, io::Write}; + use crate::{ - commitment::{ - kzg::{ProverKey as KZGProverKey, KZG}, - pedersen::Pedersen, - CommitmentScheme, - }, - folding::nova::{get_cs_params_len, Nova, ProverParams}, + commitment::{kzg::KZG, pedersen::Pedersen}, + folding::nova::{Nova, PreprocessorParam}, frontend::{tests::CubicFCircuit, FCircuit}, transcript::poseidon::poseidon_canonical_config, FoldingScheme, }; - use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; - use ark_poly_commit::kzg10::VerifierKey as KZGVerifierKey; - use ark_serialize::{CanonicalSerialize, Compress, Validate}; - use std::{fs, io::Write}; #[test] fn test_serde_nova() { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); let F_circuit = CubicFCircuit::::new(()).unwrap(); - let (cs_len, cf_cs_len) = - get_cs_params_len::>( - &poseidon_config, - F_circuit, - ) - .unwrap(); - let (kzg_pk, _): (KZGProverKey, KZGVerifierKey) = - KZG::::setup(&mut rng, cs_len).unwrap(); - let (cf_pedersen_params, _) = Pedersen::::setup(&mut rng, cf_cs_len).unwrap(); // Initialize nova and make multiple `prove_step()` - type NOVA = - Nova, CS1, CS2>; - let prover_params = - ProverParams::, Pedersen> { - poseidon_config: poseidon_config.clone(), - cs_params: kzg_pk.clone(), - cf_cs_params: cf_pedersen_params.clone(), - }; + type N = Nova< + Projective, + GVar, + Projective2, + GVar2, + CubicFCircuit, + KZG<'static, Bn254>, + Pedersen, + >; + let prep_param = PreprocessorParam::new(poseidon_config.clone(), F_circuit); + let nova_params = N::preprocess(&mut rng, &prep_param).unwrap(); let z_0 = vec![Fr::from(3_u32)]; - let mut nova = NOVA::init(&prover_params, F_circuit, z_0.clone()).unwrap(); + let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); let num_steps: usize = 3; for _ in 0..num_steps { - nova.prove_step(vec![]).unwrap(); + nova.prove_step(&mut rng, vec![], None).unwrap(); } let mut writer = vec![]; @@ -248,7 +246,7 @@ pub mod tests { bytes.as_slice(), Compress::No, Validate::No, - prover_params, + nova_params.0, // Nova's prover params poseidon_config, ) .unwrap(); @@ -257,8 +255,10 @@ pub mod tests { let num_steps: usize = 3; for _ in 0..num_steps { - deserialized_nova.prove_step(vec![]).unwrap(); - nova.prove_step(vec![]).unwrap(); + deserialized_nova + .prove_step(&mut rng, vec![], None) + .unwrap(); + nova.prove_step(&mut rng, vec![], None).unwrap(); } assert_eq!(deserialized_nova.w_i, nova.w_i); diff --git a/folding-schemes/src/folding/nova/traits.rs b/folding-schemes/src/folding/nova/traits.rs index 6b50da8b..29a4eded 100644 --- a/folding-schemes/src/folding/nova/traits.rs +++ b/folding-schemes/src/folding/nova/traits.rs @@ -3,7 +3,7 @@ use ark_ec::{CurveGroup, Group}; use ark_std::{One, Zero}; use super::{CommittedInstance, Witness}; -use crate::ccs::r1cs::R1CS; +use crate::arith::{r1cs::R1CS, Arith}; use crate::Error; /// NovaR1CS extends R1CS methods with Nova specific methods @@ -39,6 +39,7 @@ where (w_dummy, u_dummy) } + // notice that this method does not check the commitment correctness fn check_instance_relation( &self, W: &Witness, @@ -52,6 +53,7 @@ where self.check_relation(&Z) } + // notice that this method does not check the commitment correctness fn check_relaxed_instance_relation( &self, W: &Witness, diff --git a/folding-schemes/src/folding/protogalaxy/circuits.rs b/folding-schemes/src/folding/protogalaxy/circuits.rs new file mode 100644 index 00000000..5f726903 --- /dev/null +++ b/folding-schemes/src/folding/protogalaxy/circuits.rs @@ -0,0 +1,179 @@ +use ark_crypto_primitives::sponge::CryptographicSponge; +use ark_ec::CurveGroup; +use ark_poly::{univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain}; +use ark_r1cs_std::{ + alloc::AllocVar, + fields::{fp::FpVar, FieldVar}, + poly::polynomial::univariate::dense::DensePolynomialVar, +}; +use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; + +use super::{ + folding::lagrange_polys, + utils::{all_powers_var, betas_star_var, exponential_powers_var}, + CommittedInstanceVar, +}; +use crate::{ + folding::circuits::nonnative::affine::NonNativeAffineVar, transcript::TranscriptVar, + utils::gadgets::VectorGadget, +}; + +pub struct FoldingGadget {} + +impl FoldingGadget { + pub fn fold_committed_instance( + transcript: &mut impl TranscriptVar, + // running instance + instance: &CommittedInstanceVar, + // incoming instances + vec_instances: &[CommittedInstanceVar], + // polys from P + F_coeffs: Vec>, + K_coeffs: Vec>, + ) -> Result, SynthesisError> { + let t = instance.betas.len(); + let n = F_coeffs.len(); + + // absorb the committed instances + transcript.absorb(instance)?; + transcript.absorb(&vec_instances)?; + + let delta = transcript.get_challenge()?; + let deltas = exponential_powers_var(delta, t); + + transcript.absorb(&F_coeffs)?; + + let alpha = transcript.get_challenge()?; + let alphas = all_powers_var(alpha.clone(), n); + + // F(alpha) = e + \sum_t F_i * alpha^i + let mut F_alpha = instance.e.clone(); + for (i, F_i) in F_coeffs.iter().skip(1).enumerate() { + F_alpha += F_i * &alphas[i + 1]; + } + + let betas_star = betas_star_var(&instance.betas, &deltas, &alpha); + + let k = vec_instances.len(); + let H = GeneralEvaluationDomain::new(k + 1).unwrap(); + let L_X = lagrange_polys(H) + .into_iter() + .map(|poly| { + DensePolynomialVar::from_coefficients_vec( + poly.coeffs + .into_iter() + .map(FpVar::constant) + .collect::>(), + ) + }) + .collect::>(); + let Z_X = DensePolynomialVar::from_coefficients_vec( + DensePolynomial::from(H.vanishing_polynomial()) + .coeffs + .into_iter() + .map(FpVar::constant) + .collect::>(), + ); + let K_X = DensePolynomialVar { coeffs: K_coeffs }; + + transcript.absorb(&K_X.coeffs)?; + + let gamma = transcript.get_challenge()?; + + let L_X_evals = L_X + .iter() + .take(k + 1) + .map(|L| L.evaluate(&gamma)) + .collect::, _>>()?; + + let e_star = F_alpha * &L_X_evals[0] + Z_X.evaluate(&gamma)? * K_X.evaluate(&gamma)?; + + let mut u_star = &instance.u * &L_X_evals[0]; + let mut x_star = instance.x.mul_scalar(&L_X_evals[0])?; + for i in 0..k { + u_star += &vec_instances[i].u * &L_X_evals[i + 1]; + x_star = x_star.add(&vec_instances[i].x.mul_scalar(&L_X_evals[i + 1])?)?; + } + + // return the folded instance + Ok(CommittedInstanceVar { + betas: betas_star, + // phi will be computed in CycleFold + phi: NonNativeAffineVar::new_constant(ConstraintSystemRef::None, C::zero())?, + e: e_star, + u: u_star, + x: x_star, + }) + } +} + +#[cfg(test)] +mod tests { + use ark_crypto_primitives::sponge::{ + constraints::CryptographicSpongeVar, + poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge}, + }; + use ark_pallas::{Fr, Projective}; + use ark_r1cs_std::R1CSVar; + use ark_relations::r1cs::ConstraintSystem; + use std::error::Error; + + use super::*; + use crate::{ + arith::r1cs::tests::get_test_r1cs, + folding::protogalaxy::folding::{tests::prepare_inputs, Folding}, + transcript::poseidon::poseidon_canonical_config, + }; + + #[test] + fn test_fold_gadget() -> Result<(), Box> { + let k = 7; + let (witness, instance, witnesses, instances) = prepare_inputs(k); + let r1cs = get_test_r1cs::(); + + // init Prover & Verifier's transcript + let poseidon_config = poseidon_canonical_config::(); + let mut transcript_p = PoseidonSponge::new(&poseidon_config); + let mut transcript_v = PoseidonSponge::new(&poseidon_config); + + let (_, _, F_coeffs, K_coeffs) = Folding::::prove( + &mut transcript_p, + &r1cs, + &instance, + &witness, + &instances, + &witnesses, + )?; + + let folded_instance = Folding::::verify( + &mut transcript_v, + &r1cs, + &instance, + &instances, + F_coeffs.clone(), + K_coeffs.clone(), + )?; + + let cs = ConstraintSystem::new_ref(); + let mut transcript_var = PoseidonSpongeVar::new(cs.clone(), &poseidon_config); + let instance_var = CommittedInstanceVar::new_witness(cs.clone(), || Ok(instance))?; + let instances_var = Vec::new_witness(cs.clone(), || Ok(instances))?; + let F_coeffs_var = Vec::new_witness(cs.clone(), || Ok(F_coeffs))?; + let K_coeffs_var = Vec::new_witness(cs.clone(), || Ok(K_coeffs))?; + + let folded_instance_var = FoldingGadget::fold_committed_instance( + &mut transcript_var, + &instance_var, + &instances_var, + F_coeffs_var, + K_coeffs_var, + )?; + assert_eq!(folded_instance.betas, folded_instance_var.betas.value()?); + assert_eq!(folded_instance.e, folded_instance_var.e.value()?); + assert_eq!(folded_instance.u, folded_instance_var.u.value()?); + assert_eq!(folded_instance.x, folded_instance_var.x.value()?); + assert!(cs.is_satisfied()?); + + Ok(()) + } +} diff --git a/folding-schemes/src/folding/protogalaxy/folding.rs b/folding-schemes/src/folding/protogalaxy/folding.rs index d60ee420..ef9b50d8 100644 --- a/folding-schemes/src/folding/protogalaxy/folding.rs +++ b/folding-schemes/src/folding/protogalaxy/folding.rs @@ -6,17 +6,15 @@ use ark_poly::{ univariate::{DensePolynomial, SparsePolynomial}, DenseUVPolynomial, EvaluationDomain, Evaluations, GeneralEvaluationDomain, Polynomial, }; -use ark_std::log2; -use ark_std::{cfg_into_iter, Zero}; +use ark_std::{cfg_into_iter, log2, Zero}; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use std::marker::PhantomData; -use super::traits::ProtoGalaxyTranscript; use super::utils::{all_powers, betas_star, exponential_powers}; use super::ProtoGalaxyError; use super::{CommittedInstance, Witness}; -use crate::ccs::r1cs::R1CS; +use crate::arith::r1cs::R1CS; use crate::transcript::Transcript; use crate::utils::vec::*; use crate::utils::virtual_polynomial::bit_decompose; @@ -36,7 +34,7 @@ where #![allow(clippy::type_complexity)] /// implements the non-interactive Prover from the folding scheme described in section 4 pub fn prove( - transcript: &mut (impl Transcript + ProtoGalaxyTranscript), + transcript: &mut impl Transcript, r1cs: &R1CS, // running instance instance: &CommittedInstance, @@ -65,10 +63,13 @@ where let k = vec_instances.len(); let t = instance.betas.len(); let n = r1cs.A.n_cols; - if w.w.len() != n { + + let z = [vec![instance.u], instance.x.clone(), w.w.clone()].concat(); + + if z.len() != n { return Err(Error::NotSameLength( - "w.w.len()".to_string(), - w.w.len(), + "z.len()".to_string(), + z.len(), "n".to_string(), n, )); @@ -81,21 +82,19 @@ where } // absorb the committed instances - transcript.absorb_committed_instance(instance)?; - for ci in vec_instances.iter() { - transcript.absorb_committed_instance(ci)?; - } + transcript.absorb(instance); + transcript.absorb(&vec_instances); let delta = transcript.get_challenge(); let deltas = exponential_powers(delta, t); - let f_w = eval_f(r1cs, &w.w)?; + let f_z = eval_f(r1cs, &z)?; // F(X) let F_X: SparsePolynomial = - calc_f_from_btree(&f_w, &instance.betas, &deltas).expect("Error calculating F[x]"); + calc_f_from_btree(&f_z, &instance.betas, &deltas).expect("Error calculating F[x]"); let F_X_dense = DensePolynomial::from(F_X.clone()); - transcript.absorb_vec(&F_X_dense.coeffs); + transcript.absorb(&F_X_dense.coeffs); let alpha = transcript.get_challenge(); @@ -114,24 +113,28 @@ where phi: instance.phi, betas: betas_star.clone(), e: F_alpha, + u: instance.u, + x: instance.x.clone(), }, w, )?; - let ws: Vec> = std::iter::once(w.w.clone()) + let zs: Vec> = std::iter::once(z.clone()) .chain( vec_w .iter() - .map(|wj| { - if wj.w.len() != n { + .zip(vec_instances) + .map(|(wj, uj)| { + let zj = [vec![uj.u], uj.x.clone(), wj.w.clone()].concat(); + if zj.len() != n { return Err(Error::NotSameLength( - "wj.w.len()".to_string(), - wj.w.len(), + "zj.len()".to_string(), + zj.len(), "n".to_string(), n, )); } - Ok(wj.w.clone()) + Ok(zj) }) .collect::>, Error>>()?, ) @@ -148,17 +151,17 @@ where let mut G_evals: Vec = vec![C::ScalarField::zero(); G_domain.size()]; for (hi, h) in G_domain.elements().enumerate() { // each iteration evaluates G(h) - // inner = L_0(x) * w + \sum_k L_i(x) * w_j - let mut inner: Vec = vec![C::ScalarField::zero(); ws[0].len()]; - for (i, w) in ws.iter().enumerate() { - // Li_w_h = (Li(X)*wj)(h) = Li(h) * wj - let mut Liw_h: Vec = vec![C::ScalarField::zero(); w.len()]; - for (j, wj) in w.iter().enumerate() { - Liw_h[j] = (&L_X[i] * *wj).evaluate(&h); + // inner = L_0(x) * z + \sum_k L_i(x) * z_j + let mut inner: Vec = vec![C::ScalarField::zero(); zs[0].len()]; + for (i, z) in zs.iter().enumerate() { + // Li_z_h = (Li(X)*zj)(h) = Li(h) * zj + let mut Liz_h: Vec = vec![C::ScalarField::zero(); z.len()]; + for (j, zj) in z.iter().enumerate() { + Liz_h[j] = (&L_X[i] * *zj).evaluate(&h); } for j in 0..inner.len() { - inner[j] += Liw_h[j]; + inner[j] += Liz_h[j]; } } let f_ev = eval_f(r1cs, &inner)?; @@ -187,23 +190,31 @@ where return Err(Error::ProtoGalaxy(ProtoGalaxyError::RemainderNotZero)); } - transcript.absorb_vec(&K_X.coeffs); + transcript.absorb(&K_X.coeffs); let gamma = transcript.get_challenge(); - let e_star = - F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma); - - let mut phi_star: C = instance.phi * L_X[0].evaluate(&gamma); + let L_X_evals = L_X + .iter() + .take(k + 1) + .map(|L| L.evaluate(&gamma)) + .collect::>(); + + let e_star = F_alpha * L_X_evals[0] + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma); + let mut w_star = vec_scalar_mul(&w.w, &L_X_evals[0]); + let mut r_w_star = w.r_w * L_X_evals[0]; + let mut phi_star = instance.phi * L_X_evals[0]; + let mut u_star = instance.u * L_X_evals[0]; + let mut x_star = vec_scalar_mul(&instance.x, &L_X_evals[0]); for i in 0..k { - phi_star += vec_instances[i].phi * L_X[i + 1].evaluate(&gamma); - } - let mut w_star: Vec = vec_scalar_mul(&w.w, &L_X[0].evaluate(&gamma)); - let mut r_w_star: C::ScalarField = w.r_w * L_X[0].evaluate(&gamma); - for i in 0..k { - let L_X_at_i1 = L_X[i + 1].evaluate(&gamma); - w_star = vec_add(&w_star, &vec_scalar_mul(&vec_w[i].w, &L_X_at_i1))?; - r_w_star += vec_w[i].r_w * L_X_at_i1; + w_star = vec_add(&w_star, &vec_scalar_mul(&vec_w[i].w, &L_X_evals[i + 1]))?; + r_w_star += vec_w[i].r_w * L_X_evals[i + 1]; + phi_star += vec_instances[i].phi * L_X_evals[i + 1]; + u_star += vec_instances[i].u * L_X_evals[i + 1]; + x_star = vec_add( + &x_star, + &vec_scalar_mul(&vec_instances[i].x, &L_X_evals[i + 1]), + )?; } Ok(( @@ -211,6 +222,8 @@ where betas: betas_star, phi: phi_star, e: e_star, + u: u_star, + x: x_star, }, Witness { w: w_star, @@ -223,7 +236,7 @@ where /// implements the non-interactive Verifier from the folding scheme described in section 4 pub fn verify( - transcript: &mut (impl Transcript + ProtoGalaxyTranscript), + transcript: &mut impl Transcript, r1cs: &R1CS, // running instance instance: &CommittedInstance, @@ -237,15 +250,13 @@ where let n = r1cs.A.n_cols; // absorb the committed instances - transcript.absorb_committed_instance(instance)?; - for ci in vec_instances.iter() { - transcript.absorb_committed_instance(ci)?; - } + transcript.absorb(instance); + transcript.absorb(&vec_instances); let delta = transcript.get_challenge(); let deltas = exponential_powers(delta, t); - transcript.absorb_vec(&F_coeffs); + transcript.absorb(&F_coeffs); let alpha = transcript.get_challenge(); let alphas = all_powers(alpha, n); @@ -266,16 +277,28 @@ where let K_X: DensePolynomial = DensePolynomial::::from_coefficients_vec(K_coeffs); - transcript.absorb_vec(&K_X.coeffs); + transcript.absorb(&K_X.coeffs); let gamma = transcript.get_challenge(); - let e_star = - F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma); + let L_X_evals = L_X + .iter() + .take(k + 1) + .map(|L| L.evaluate(&gamma)) + .collect::>(); + + let e_star = F_alpha * L_X_evals[0] + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma); - let mut phi_star: C = instance.phi * L_X[0].evaluate(&gamma); + let mut phi_star = instance.phi * L_X_evals[0]; + let mut u_star = instance.u * L_X_evals[0]; + let mut x_star = vec_scalar_mul(&instance.x, &L_X_evals[0]); for i in 0..k { - phi_star += vec_instances[i].phi * L_X[i + 1].evaluate(&gamma); + phi_star += vec_instances[i].phi * L_X_evals[i + 1]; + u_star += vec_instances[i].u * L_X_evals[i + 1]; + x_star = vec_add( + &x_star, + &vec_scalar_mul(&vec_instances[i].x, &L_X_evals[i + 1]), + )?; } // return the folded instance @@ -283,6 +306,8 @@ where betas: betas_star, phi: phi_star, e: e_star, + u: u_star, + x: x_star, }) } } @@ -356,7 +381,9 @@ fn calc_f_from_btree( } // lagrange_polys method from caulk: https://github.com/caulk-crypto/caulk/tree/8210b51fb8a9eef4335505d1695c44ddc7bf8170/src/multi/setup.rs#L300 -fn lagrange_polys(domain_n: GeneralEvaluationDomain) -> Vec> { +pub fn lagrange_polys( + domain_n: GeneralEvaluationDomain, +) -> Vec> { let mut lagrange_polynomials: Vec> = Vec::new(); for i in 0..domain_n.size() { let evals: Vec = cfg_into_iter!(0..domain_n.size()) @@ -369,43 +396,47 @@ fn lagrange_polys(domain_n: GeneralEvaluationDomain) -> Vec(r1cs: &R1CS, w: &[F]) -> Result, Error> { - let Az = mat_vec_mul(&r1cs.A, w)?; - let Bz = mat_vec_mul(&r1cs.B, w)?; - let Cz = mat_vec_mul(&r1cs.C, w)?; +fn eval_f(r1cs: &R1CS, z: &[F]) -> Result, Error> { + let Az = mat_vec_mul(&r1cs.A, z)?; + let Bz = mat_vec_mul(&r1cs.B, z)?; + let Cz = mat_vec_mul(&r1cs.C, z)?; let AzBz = hadamard(&Az, &Bz)?; vec_sub(&AzBz, &Cz) } #[cfg(test)] -mod tests { +pub mod tests { use super::*; + use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; + use ark_crypto_primitives::sponge::CryptographicSponge; use ark_pallas::{Fr, Projective}; - use ark_std::UniformRand; + use ark_std::{rand::Rng, UniformRand}; - use crate::ccs::r1cs::tests::{get_test_r1cs, get_test_z}; + use crate::arith::r1cs::tests::{get_test_r1cs, get_test_z, get_test_z_split}; use crate::commitment::{pedersen::Pedersen, CommitmentScheme}; - use crate::transcript::poseidon::{poseidon_canonical_config, PoseidonTranscript}; + use crate::transcript::poseidon::poseidon_canonical_config; pub(crate) fn check_instance( r1cs: &R1CS, instance: &CommittedInstance, w: &Witness, ) -> Result<(), Error> { - if instance.betas.len() != log2(w.w.len()) as usize { + let z = [vec![instance.u], instance.x.clone(), w.w.clone()].concat(); + + if instance.betas.len() != log2(z.len()) as usize { return Err(Error::NotSameLength( "instance.betas.len()".to_string(), instance.betas.len(), - "log2(w.w.len())".to_string(), - log2(w.w.len()) as usize, + "log2(z.len())".to_string(), + log2(z.len()) as usize, )); } - let f_w = eval_f(r1cs, &w.w)?; // f(w) + let f_z = eval_f(r1cs, &z)?; // f(z) let mut r = C::ScalarField::zero(); - for (i, f_w_i) in f_w.iter().enumerate() { - r += pow_i(i, &instance.betas) * f_w_i; + for (i, f_z_i) in f_z.iter().enumerate() { + r += pow_i(i, &instance.betas) * f_z_i; } if instance.e == r { return Ok(()); @@ -430,8 +461,9 @@ mod tests { #[test] fn test_eval_f() { + let mut rng = ark_std::test_rng(); let r1cs = get_test_r1cs::(); - let mut z = get_test_z::(3); + let mut z = get_test_z::(rng.gen::() as usize); let f_w = eval_f(&r1cs, &z).unwrap(); assert!(is_zero_vec(&f_w)); @@ -443,7 +475,7 @@ mod tests { // k represents the number of instances to be fold, apart from the running instance #[allow(clippy::type_complexity)] - fn prepare_inputs( + pub fn prepare_inputs( k: usize, ) -> ( Witness, @@ -452,23 +484,19 @@ mod tests { Vec>, ) { let mut rng = ark_std::test_rng(); - let (pedersen_params, _) = Pedersen::::setup(&mut rng, 100).unwrap(); // 100 is wip, will get it from actual vec - let z = get_test_z::(3); - let mut zs: Vec> = Vec::new(); - for i in 0..k { - let z_i = get_test_z::(i + 4); - zs.push(z_i); - } + let (u, x, w) = get_test_z_split::(rng.gen::() as usize); - let n = z.len(); + let (pedersen_params, _) = Pedersen::::setup(&mut rng, w.len()).unwrap(); + + let n = 1 + x.len() + w.len(); let t = log2(n) as usize; let beta = Fr::rand(&mut rng); let betas = exponential_powers(beta, t); let witness = Witness:: { - w: z.clone(), + w, r_w: Fr::rand(&mut rng), }; let phi = Pedersen::::commit(&pedersen_params, &witness.w, &witness.r_w) @@ -477,14 +505,17 @@ mod tests { phi, betas: betas.clone(), e: Fr::zero(), + u, + x, }; // same for the other instances let mut witnesses: Vec> = Vec::new(); let mut instances: Vec> = Vec::new(); #[allow(clippy::needless_range_loop)] - for i in 0..k { + for _ in 0..k { + let (u_i, x_i, w_i) = get_test_z_split::(rng.gen::() as usize); let witness_i = Witness:: { - w: zs[i].clone(), + w: w_i, r_w: Fr::rand(&mut rng), }; let phi_i = Pedersen::::commit( @@ -495,8 +526,10 @@ mod tests { .unwrap(); let instance_i = CommittedInstance:: { phi: phi_i, - betas: betas.clone(), + betas: vec![], e: Fr::zero(), + u: u_i, + x: x_i, }; witnesses.push(witness_i); instances.push(instance_i); @@ -506,15 +539,15 @@ mod tests { } #[test] - fn test_fold_native_case() { + fn test_fold() { let k = 7; let (witness, instance, witnesses, instances) = prepare_inputs(k); let r1cs = get_test_r1cs::(); // init Prover & Verifier's transcript let poseidon_config = poseidon_canonical_config::(); - let mut transcript_p = PoseidonTranscript::::new(&poseidon_config); - let mut transcript_v = PoseidonTranscript::::new(&poseidon_config); + let mut transcript_p = PoseidonSponge::::new(&poseidon_config); + let mut transcript_v = PoseidonSponge::::new(&poseidon_config); let (folded_instance, folded_witness, F_coeffs, K_coeffs) = Folding::::prove( &mut transcript_p, @@ -553,8 +586,8 @@ mod tests { // init Prover & Verifier's transcript let poseidon_config = poseidon_canonical_config::(); - let mut transcript_p = PoseidonTranscript::::new(&poseidon_config); - let mut transcript_v = PoseidonTranscript::::new(&poseidon_config); + let mut transcript_p = PoseidonSponge::::new(&poseidon_config); + let mut transcript_v = PoseidonSponge::::new(&poseidon_config); let (mut running_witness, mut running_instance, _, _) = prepare_inputs(0); @@ -588,9 +621,8 @@ mod tests { .unwrap(); // check that prover & verifier folded instances are the same values - assert_eq!(folded_instance.phi, folded_instance_v.phi); - assert_eq!(folded_instance.betas, folded_instance_v.betas); - assert_eq!(folded_instance.e, folded_instance_v.e); + assert_eq!(folded_instance, folded_instance_v); + assert!(!folded_instance.e.is_zero()); // check that the folded instance satisfies the relation diff --git a/folding-schemes/src/folding/protogalaxy/mod.rs b/folding-schemes/src/folding/protogalaxy/mod.rs index f2f244f4..192f2274 100644 --- a/folding-schemes/src/folding/protogalaxy/mod.rs +++ b/folding-schemes/src/folding/protogalaxy/mod.rs @@ -1,17 +1,60 @@ +use std::borrow::Borrow; + /// Implements the scheme described in [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf) use ark_ec::CurveGroup; use ark_ff::PrimeField; +use ark_r1cs_std::{ + alloc::{AllocVar, AllocationMode}, + fields::fp::FpVar, +}; +use ark_relations::r1cs::{Namespace, SynthesisError}; use thiserror::Error; +use super::circuits::nonnative::affine::NonNativeAffineVar; + +pub mod circuits; pub mod folding; pub mod traits; pub(crate) mod utils; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct CommittedInstance { phi: C, betas: Vec, e: C::ScalarField, + u: C::ScalarField, + x: Vec, +} + +#[derive(Clone, Debug)] +pub struct CommittedInstanceVar { + phi: NonNativeAffineVar, + betas: Vec>, + e: FpVar, + u: FpVar, + x: Vec>, +} + +impl AllocVar, C::ScalarField> for CommittedInstanceVar { + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + f().and_then(|u| { + let cs = cs.into(); + + let u = u.borrow(); + + Ok(Self { + phi: NonNativeAffineVar::new_variable(cs.clone(), || Ok(u.phi), mode)?, + betas: Vec::new_variable(cs.clone(), || Ok(u.betas.clone()), mode)?, + e: FpVar::new_variable(cs.clone(), || Ok(u.e), mode)?, + u: FpVar::new_variable(cs.clone(), || Ok(u.u), mode)?, + x: Vec::new_variable(cs.clone(), || Ok(u.x.clone()), mode)?, + }) + }) + } } #[derive(Clone, Debug)] diff --git a/folding-schemes/src/folding/protogalaxy/traits.rs b/folding-schemes/src/folding/protogalaxy/traits.rs index ff943e12..ef45b317 100644 --- a/folding-schemes/src/folding/protogalaxy/traits.rs +++ b/folding-schemes/src/folding/protogalaxy/traits.rs @@ -1,23 +1,46 @@ -use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{CurveGroup, Group}; +use ark_crypto_primitives::sponge::{constraints::AbsorbGadget, Absorb}; +use ark_ec::CurveGroup; +use ark_ff::PrimeField; +use ark_r1cs_std::{fields::fp::FpVar, uint8::UInt8, ToConstraintFieldGadget}; +use ark_relations::r1cs::SynthesisError; -use super::CommittedInstance; -use crate::transcript::{poseidon::PoseidonTranscript, Transcript}; -use crate::Error; +use super::{CommittedInstance, CommittedInstanceVar}; +use crate::transcript::AbsorbNonNative; -/// ProtoGalaxyTranscript extends [`Transcript`] with the method to absorb ProtoGalaxy's -/// CommittedInstance. -pub trait ProtoGalaxyTranscript: Transcript { - fn absorb_committed_instance(&mut self, ci: &CommittedInstance) -> Result<(), Error> { - self.absorb_point(&ci.phi)?; - self.absorb_vec(&ci.betas); - self.absorb(&ci.e); - Ok(()) +// Implements the trait for absorbing ProtoGalaxy's CommittedInstance. +impl Absorb for CommittedInstance +where + C::ScalarField: Absorb, +{ + fn to_sponge_bytes(&self, _dest: &mut Vec) { + unimplemented!() + } + + fn to_sponge_field_elements(&self, dest: &mut Vec) { + self.phi + .to_native_sponge_field_elements_as_vec() + .to_sponge_field_elements(dest); + self.betas.to_sponge_field_elements(dest); + self.e.to_sponge_field_elements(dest); + self.u.to_sponge_field_elements(dest); + self.x.to_sponge_field_elements(dest); } } -// Implements ProtoGalaxyTranscript for PoseidonTranscript -impl ProtoGalaxyTranscript for PoseidonTranscript where - ::ScalarField: Absorb -{ +// Implements the trait for absorbing ProtoGalaxy's CommittedInstanceVar in-circuit. +impl AbsorbGadget for CommittedInstanceVar { + fn to_sponge_bytes(&self) -> Result>, SynthesisError> { + unimplemented!() + } + + fn to_sponge_field_elements(&self) -> Result>, SynthesisError> { + Ok([ + self.phi.to_constraint_field()?, + self.betas.to_sponge_field_elements()?, + self.e.to_sponge_field_elements()?, + self.u.to_sponge_field_elements()?, + self.x.to_sponge_field_elements()?, + ] + .concat()) + } } diff --git a/folding-schemes/src/folding/protogalaxy/utils.rs b/folding-schemes/src/folding/protogalaxy/utils.rs index 4910279e..3ac31f11 100644 --- a/folding-schemes/src/folding/protogalaxy/utils.rs +++ b/folding-schemes/src/folding/protogalaxy/utils.rs @@ -1,6 +1,7 @@ use ark_ff::PrimeField; +use ark_r1cs_std::fields::{fp::FpVar, FieldVar}; -// returns (b, b^2, b^4, ..., b^{2^{t-1}}) +/// Returns (b, b^2, b^4, ..., b^{2^{t-1}}) pub fn exponential_powers(b: F, t: usize) -> Vec { let mut r = vec![F::zero(); t]; r[0] = b; @@ -9,6 +10,18 @@ pub fn exponential_powers(b: F, t: usize) -> Vec { } r } + +/// The in-circuit version of `exponential_powers` +pub fn exponential_powers_var(b: FpVar, t: usize) -> Vec> { + let mut r = vec![FpVar::zero(); t]; + r[0] = b; + for i in 1..t { + r[i] = &r[i - 1] * &r[i - 1]; + } + r +} + +/// Returns (a, a^2, a^3, ..., a^{n-1}) pub fn all_powers(a: F, n: usize) -> Vec { let mut r = vec![F::zero(); n]; for (i, r_i) in r.iter_mut().enumerate() { @@ -17,7 +30,20 @@ pub fn all_powers(a: F, n: usize) -> Vec { r } -// returns a vector containing βᵢ* = βᵢ + α ⋅ δᵢ +/// The in-circuit version of `all_powers` +pub fn all_powers_var(a: FpVar, n: usize) -> Vec> { + if n == 0 { + return vec![]; + } + let mut r = vec![FpVar::zero(); n]; + r[0] = FpVar::one(); + for i in 1..n { + r[i] = &r[i - 1] * &a; + } + r +} + +/// returns a vector containing βᵢ* = βᵢ + α ⋅ δᵢ pub fn betas_star(betas: &[F], deltas: &[F], alpha: F) -> Vec { betas .iter() @@ -30,3 +56,92 @@ pub fn betas_star(betas: &[F], deltas: &[F], alpha: F) -> Vec .map(|(beta_i, delta_i_alpha)| *beta_i + delta_i_alpha) .collect() } + +/// The in-circuit version of `betas_star` +pub fn betas_star_var( + betas: &[FpVar], + deltas: &[FpVar], + alpha: &FpVar, +) -> Vec> { + betas + .iter() + .zip(deltas) + .map(|(beta_i, delta_i)| beta_i + alpha * delta_i) + .collect::>>() +} + +#[cfg(test)] +mod tests { + use std::error::Error; + + use ark_bn254::Fr; + use ark_r1cs_std::{alloc::AllocVar, R1CSVar}; + use ark_relations::r1cs::ConstraintSystem; + use ark_std::{test_rng, UniformRand}; + + use super::*; + + #[test] + fn test_exponential_powers() -> Result<(), Box> { + let rng = &mut test_rng(); + + for t in 1..10 { + let cs = ConstraintSystem::::new_ref(); + + let b = Fr::rand(rng); + let b_var = FpVar::new_witness(cs.clone(), || Ok(b))?; + + let r = exponential_powers(b, t); + let r_var = exponential_powers_var(b_var, t); + + assert_eq!(r, r_var.value()?); + assert!(cs.is_satisfied()?); + } + + Ok(()) + } + + #[test] + fn test_all_powers() -> Result<(), Box> { + let rng = &mut test_rng(); + + for n in 1..10 { + let cs = ConstraintSystem::::new_ref(); + + let a = Fr::rand(rng); + let a_var = FpVar::new_witness(cs.clone(), || Ok(a))?; + + let r = all_powers(a, n); + let r_var = all_powers_var(a_var, n); + + assert_eq!(r, r_var.value()?); + assert!(cs.is_satisfied()?); + } + + Ok(()) + } + + #[test] + fn test_betas_star() -> Result<(), Box> { + let rng = &mut test_rng(); + + for t in 1..10 { + let cs = ConstraintSystem::::new_ref(); + + let betas = (0..t).map(|_| Fr::rand(rng)).collect::>(); + let deltas = (0..t).map(|_| Fr::rand(rng)).collect::>(); + let alpha = Fr::rand(rng); + + let betas_var = Vec::new_witness(cs.clone(), || Ok(betas.clone()))?; + let deltas_var = Vec::new_witness(cs.clone(), || Ok(deltas.clone()))?; + let alpha_var = FpVar::new_witness(cs.clone(), || Ok(alpha))?; + + let r = betas_star(&betas, &deltas, alpha); + let r_var = betas_star_var(&betas_var, &deltas_var, &alpha_var); + assert_eq!(r, r_var.value()?); + assert!(cs.is_satisfied()?); + } + + Ok(()) + } +} diff --git a/folding-schemes/src/frontend/circom/mod.rs b/folding-schemes/src/frontend/circom/mod.rs index 9259a94f..303a5de8 100644 --- a/folding-schemes/src/frontend/circom/mod.rs +++ b/folding-schemes/src/frontend/circom/mod.rs @@ -207,8 +207,7 @@ impl CircomFCircuit { pub mod tests { use super::*; use ark_bn254::Fr; - use ark_r1cs_std::alloc::AllocVar; - use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; + use ark_relations::r1cs::ConstraintSystem; // Tests the step_native function of CircomFCircuit. #[test] diff --git a/folding-schemes/src/frontend/circom/utils.rs b/folding-schemes/src/frontend/circom/utils.rs index 4bdab8b7..9ee7dfb0 100644 --- a/folding-schemes/src/frontend/circom/utils.rs +++ b/folding-schemes/src/frontend/circom/utils.rs @@ -110,7 +110,6 @@ mod tests { use ark_circom::circom::{CircomBuilder, CircomConfig}; use ark_circom::CircomCircuit; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; - use std::path::PathBuf; //To generate .r1cs and .wasm files, run the below command in the terminal. //bash ./folding-schemes/src/frontend/circom/test_folder/compile.sh diff --git a/folding-schemes/src/frontend/mod.rs b/folding-schemes/src/frontend/mod.rs index 59f18eb1..e8afa3fa 100644 --- a/folding-schemes/src/frontend/mod.rs +++ b/folding-schemes/src/frontend/mod.rs @@ -5,6 +5,7 @@ use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; use ark_std::fmt::Debug; pub mod circom; +pub mod noname; /// FCircuit defines the trait of the circuit of the F function, which is the one being folded (ie. /// inside the agmented F' function). @@ -52,9 +53,7 @@ pub mod tests { use super::*; use ark_bn254::Fr; use ark_r1cs_std::{alloc::AllocVar, eq::EqGadget}; - use ark_relations::r1cs::{ - ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef, SynthesisError, - }; + use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; use core::marker::PhantomData; /// CubicFCircuit is a struct that implements the FCircuit trait, for the R1CS example circuit diff --git a/folding-schemes/src/frontend/noname/mod.rs b/folding-schemes/src/frontend/noname/mod.rs new file mode 100644 index 00000000..61ac1e7a --- /dev/null +++ b/folding-schemes/src/frontend/noname/mod.rs @@ -0,0 +1,201 @@ +use crate::Error; +use ark_noname::sonobe::NonameSonobeCircuit; +use ark_r1cs_std::alloc::AllocVar; +use ark_r1cs_std::fields::fp::FpVar; +use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}; +use num_bigint::BigUint; +use std::marker::PhantomData; + +use self::utils::NonameInputs; + +use super::FCircuit; +use ark_ff::PrimeField; +use ark_noname::utils::compile_source_code; +use noname::backends::{r1cs::R1CS as R1CSNoname, BackendField}; +use noname::witness::CompiledCircuit; +pub mod utils; +#[derive(Debug, Clone)] +pub struct NonameFCircuit { + pub state_len: usize, + pub external_inputs_len: usize, + pub circuit: CompiledCircuit>, + _f: PhantomData, +} + +impl FCircuit for NonameFCircuit { + type Params = (String, usize, usize); + + fn new(params: Self::Params) -> Result { + let (code, state_len, external_inputs_len) = params; + let compiled_circuit = compile_source_code::(&code).map_err(|_| { + Error::Other("Encountered an error while compiling a noname circuit".to_owned()) + })?; + Ok(NonameFCircuit { + state_len, + external_inputs_len, + circuit: compiled_circuit, + _f: PhantomData, + }) + } + + fn state_len(&self) -> usize { + self.state_len + } + + fn external_inputs_len(&self) -> usize { + self.external_inputs_len + } + + fn step_native( + &self, + _i: usize, + z_i: Vec, + external_inputs: Vec, + ) -> Result, crate::Error> { + let wtns_external_inputs = + NonameInputs::from((&external_inputs, "external_inputs".to_string())); + let wtns_ivc_inputs = NonameInputs::from((&z_i, "ivc_inputs".to_string())); + + let noname_witness = self + .circuit + .generate_witness(wtns_ivc_inputs.0, wtns_external_inputs.0) + .map_err(|e| Error::WitnessCalculationError(e.to_string()))?; + + let z_i1_end_index = z_i.len() + 1; + let assigned_z_i1 = (1..z_i1_end_index) + .map(|idx| { + let value: BigUint = Into::into(noname_witness.witness[idx]); + F::from(value) + }) + .collect(); + + Ok(assigned_z_i1) + } + + fn generate_step_constraints( + &self, + cs: ConstraintSystemRef, + _i: usize, + z_i: Vec>, + external_inputs: Vec>, + ) -> Result>, SynthesisError> { + let wtns_external_inputs = + NonameInputs::from_fpvars((&external_inputs, "external_inputs".to_string()))?; + let wtns_ivc_inputs = NonameInputs::from_fpvars((&z_i, "ivc_inputs".to_string()))?; + let noname_witness = self + .circuit + .generate_witness(wtns_ivc_inputs.0, wtns_external_inputs.0) + .map_err(|_| SynthesisError::Unsatisfiable)?; + let z_i1_end_index = z_i.len() + 1; + let assigned_z_i1: Vec> = (1..z_i1_end_index) + .map(|idx| -> Result, SynthesisError> { + // the assigned zi1 is of the same size than the initial zi and is located in the + // output of the witness vector + // we prefer to assign z_i1 here since (1) we have to return it, (2) we cant return + // anything with the `generate_constraints` method used below + let value: BigUint = Into::into(noname_witness.witness[idx]); + let field_element = F::from(value); + FpVar::::new_witness(cs.clone(), || Ok(field_element)) + }) + .collect::>, SynthesisError>>()?; + + let noname_circuit = NonameSonobeCircuit { + compiled_circuit: self.circuit.clone(), + witness: noname_witness, + assigned_z_i: &z_i, + assigned_external_inputs: &external_inputs, + assigned_z_i1: &assigned_z_i1, + }; + noname_circuit.generate_constraints(cs.clone())?; + + Ok(assigned_z_i1) + } +} + +#[cfg(test)] +mod tests { + + use ark_bn254::Fr; + use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, R1CSVar}; + use noname::backends::r1cs::R1csBn254Field; + + use crate::frontend::FCircuit; + + use super::NonameFCircuit; + use ark_relations::r1cs::ConstraintSystem; + + const NONAME_CIRCUIT_EXTERNAL_INPUTS: &str = + "fn main(pub ivc_inputs: [Field; 2], external_inputs: [Field; 2]) -> [Field; 2] { + let xx = external_inputs[0] + ivc_inputs[0]; + let yy = external_inputs[1] * ivc_inputs[1]; + assert_eq(yy, xx); + return [xx, yy]; +}"; + + const NONAME_CIRCUIT_NO_EXTERNAL_INPUTS: &str = + "fn main(pub ivc_inputs: [Field; 2]) -> [Field; 2] { + let out = ivc_inputs[0] * ivc_inputs[1]; + return [out, ivc_inputs[1]]; +}"; + + #[test] + fn test_step_native() { + let cs = ConstraintSystem::::new_ref(); + let params = (NONAME_CIRCUIT_EXTERNAL_INPUTS.to_owned(), 2, 2); + let circuit = NonameFCircuit::::new(params).unwrap(); + let inputs_public = vec![Fr::from(2), Fr::from(5)]; + let inputs_private = vec![Fr::from(8), Fr::from(2)]; + + let ivc_inputs_var = + Vec::>::new_witness(cs.clone(), || Ok(inputs_public.clone())).unwrap(); + let external_inputs_var = + Vec::>::new_witness(cs.clone(), || Ok(inputs_private.clone())).unwrap(); + + let z_i1 = circuit + .generate_step_constraints(cs.clone(), 0, ivc_inputs_var, external_inputs_var) + .unwrap(); + let z_i1_native = circuit + .step_native(0, inputs_public, inputs_private) + .unwrap(); + + assert_eq!(z_i1[0].value().unwrap(), z_i1_native[0]); + assert_eq!(z_i1[1].value().unwrap(), z_i1_native[1]); + } + + #[test] + fn test_step_constraints() { + let cs = ConstraintSystem::::new_ref(); + let params = (NONAME_CIRCUIT_EXTERNAL_INPUTS.to_owned(), 2, 2); + let circuit = NonameFCircuit::::new(params).unwrap(); + let inputs_public = vec![Fr::from(2), Fr::from(5)]; + let inputs_private = vec![Fr::from(8), Fr::from(2)]; + + let ivc_inputs_var = + Vec::>::new_witness(cs.clone(), || Ok(inputs_public)).unwrap(); + let external_inputs_var = + Vec::>::new_witness(cs.clone(), || Ok(inputs_private)).unwrap(); + + let z_i1 = circuit + .generate_step_constraints(cs.clone(), 0, ivc_inputs_var, external_inputs_var) + .unwrap(); + assert!(cs.is_satisfied().unwrap()); + assert_eq!(z_i1[0].value().unwrap(), Fr::from(10_u8)); + assert_eq!(z_i1[1].value().unwrap(), Fr::from(10_u8)); + } + + #[test] + fn test_generate_constraints_no_external_inputs() { + let cs = ConstraintSystem::::new_ref(); + let params = (NONAME_CIRCUIT_NO_EXTERNAL_INPUTS.to_owned(), 2, 0); + let inputs_public = vec![Fr::from(2), Fr::from(5)]; + + let ivc_inputs_var = + Vec::>::new_witness(cs.clone(), || Ok(inputs_public)).unwrap(); + + let f_circuit = NonameFCircuit::::new(params).unwrap(); + f_circuit + .generate_step_constraints(cs.clone(), 0, ivc_inputs_var, vec![]) + .unwrap(); + assert!(cs.is_satisfied().unwrap()); + } +} diff --git a/folding-schemes/src/frontend/noname/utils.rs b/folding-schemes/src/frontend/noname/utils.rs new file mode 100644 index 00000000..fdd92817 --- /dev/null +++ b/folding-schemes/src/frontend/noname/utils.rs @@ -0,0 +1,58 @@ +use std::collections::HashMap; + +use ark_ff::PrimeField; +use ark_r1cs_std::{fields::fp::FpVar, R1CSVar}; +use ark_relations::r1cs::SynthesisError; +use noname::inputs::JsonInputs; +use serde_json::json; + +pub struct NonameInputs(pub JsonInputs); + +impl From<(&Vec, String)> for NonameInputs { + fn from(value: (&Vec, String)) -> Self { + let (values, key) = value; + let mut inputs = HashMap::new(); + if values.is_empty() { + NonameInputs(JsonInputs(inputs)) + } else { + let field_elements: Vec = values + .iter() + .map(|value| { + if value.is_zero() { + "0".to_string() + } else { + value.to_string() + } + }) + .collect(); + inputs.insert(key, json!(field_elements)); + NonameInputs(JsonInputs(inputs)) + } + } +} + +impl NonameInputs { + pub fn from_fpvars( + value: (&Vec>, String), + ) -> Result { + let (values, key) = value; + let mut inputs = HashMap::new(); + if values.is_empty() { + Ok(NonameInputs(JsonInputs(inputs))) + } else { + let field_elements: Vec = values + .iter() + .map(|var| { + let value = var.value()?; + if value.is_zero() { + Ok("0".to_string()) + } else { + Ok(value.to_string()) + } + }) + .collect::, SynthesisError>>()?; + inputs.insert(key, json!(field_elements)); + Ok(NonameInputs(JsonInputs(inputs))) + } + } +} diff --git a/folding-schemes/src/lib.rs b/folding-schemes/src/lib.rs index 903e7b15..73b30e8d 100644 --- a/folding-schemes/src/lib.rs +++ b/folding-schemes/src/lib.rs @@ -1,7 +1,6 @@ #![allow(non_snake_case)] #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] -#![allow(clippy::upper_case_acronyms)] use ark_ec::{pairing::Pairing, CurveGroup}; use ark_ff::PrimeField; @@ -11,7 +10,7 @@ use thiserror::Error; use crate::frontend::FCircuit; -pub mod ccs; +pub mod arith; pub mod commitment; pub mod constants; pub mod folding; @@ -70,6 +69,8 @@ pub enum Error { NotEnoughSteps, #[error("Evaluation failed")] EvaluationFail, + #[error("{0} can not be zero")] + CantBeZero(String), // Commitment errors #[error("Pedersen parameters length is not sufficient (generators.len={0} < vector.len={1} unsatisfied)")] @@ -92,10 +93,16 @@ pub enum Error { NotSupported(String), #[error("max i-th step reached (usize limit reached)")] MaxStep, - #[error("Circom Witness calculation error: {0}")] + #[error("Witness calculation error: {0}")] WitnessCalculationError(String), #[error("BigInt to PrimeField conversion error: {0}")] BigIntConversionError(String), + #[error("Failed to serde: {0}")] + JSONSerdeError(String), + #[error("Multi instances folding not supported in this scheme")] + NoMultiInstances, + #[error("Missing 'other' instances, since this is a multi-instances folding scheme")] + MissingOtherInstances, } /// FoldingScheme defines trait that is implemented by the diverse folding schemes. It is defined @@ -110,23 +117,31 @@ where C2::BaseField: PrimeField, FC: FCircuit, { - type PreprocessorParam: Debug; - type ProverParam: Debug; - type VerifierParam: Debug; - type CommittedInstanceWithWitness: Debug; - type CFCommittedInstanceWithWitness: Debug; // CycleFold CommittedInstance & Witness + type PreprocessorParam: Debug + Clone; + type ProverParam: Debug + Clone; + type VerifierParam: Debug + Clone; + type RunningInstance: Debug; // contains the CommittedInstance + Witness + type IncomingInstance: Debug; // contains the CommittedInstance + Witness + type MultiCommittedInstanceWithWitness: Debug; // type used for the extra instances in the multi-instance folding setting + type CFInstance: Debug; // CycleFold CommittedInstance & Witness fn preprocess( + rng: impl RngCore, prep_param: &Self::PreprocessorParam, ) -> Result<(Self::ProverParam, Self::VerifierParam), Error>; fn init( - pp: &Self::ProverParam, + params: &(Self::ProverParam, Self::VerifierParam), step_circuit: FC, z_0: Vec, // initial state ) -> Result; - fn prove_step(&mut self, external_inputs: Vec) -> Result<(), Error>; + fn prove_step( + &mut self, + rng: impl RngCore, + external_inputs: Vec, + other_instances: Option, + ) -> Result<(), Error>; // returns the state at the current step fn state(&self) -> Vec; @@ -136,9 +151,9 @@ where fn instances( &self, ) -> ( - Self::CommittedInstanceWithWitness, - Self::CommittedInstanceWithWitness, - Self::CFCommittedInstanceWithWitness, + Self::RunningInstance, + Self::IncomingInstance, + Self::CFInstance, ); fn verify( @@ -147,12 +162,41 @@ where z_i: Vec, // last state // number of steps between the initial state and the last state num_steps: C1::ScalarField, - running_instance: Self::CommittedInstanceWithWitness, - incoming_instance: Self::CommittedInstanceWithWitness, - cyclefold_instance: Self::CFCommittedInstanceWithWitness, + running_instance: Self::RunningInstance, + incoming_instance: Self::IncomingInstance, + cyclefold_instance: Self::CFInstance, ) -> Result<(), Error>; } +/// Trait with auxiliary methods for multi-folding schemes (ie. HyperNova, ProtoGalaxy, etc), +/// allowing to create new instances for the multifold. +pub trait MultiFolding: Clone + Debug +where + C1: CurveGroup, + C2::BaseField: PrimeField, + FC: FCircuit, +{ + type RunningInstance: Debug; + type IncomingInstance: Debug; + type MultiInstance: Debug; + + /// Creates a new RunningInstance for the given state, to be folded in the multi-folding step. + fn new_running_instance( + &self, + rng: impl RngCore, + state: Vec, + external_inputs: Vec, + ) -> Result; + + /// Creates a new IncomingInstance for the given state, to be folded in the multi-folding step. + fn new_incoming_instance( + &self, + rng: impl RngCore, + state: Vec, + external_inputs: Vec, + ) -> Result; +} + pub trait Decider< C1: CurveGroup, C2: CurveGroup, @@ -162,16 +206,22 @@ pub trait Decider< C1: CurveGroup, C2::BaseField: PrimeField, { + type PreprocessorParam: Debug; type ProverParam: Clone; type Proof; type VerifierParam; type PublicInput: Debug; - type CommittedInstanceWithWitness: Debug; type CommittedInstance: Clone + Debug; + fn preprocess( + rng: impl RngCore + CryptoRng, + prep_param: &Self::PreprocessorParam, + fs: FS, + ) -> Result<(Self::ProverParam, Self::VerifierParam), Error>; + fn prove( - pp: Self::ProverParam, rng: impl RngCore + CryptoRng, + pp: Self::ProverParam, folding_scheme: FS, ) -> Result; diff --git a/folding-schemes/src/transcript/mod.rs b/folding-schemes/src/transcript/mod.rs index 0e22a114..2c728d46 100644 --- a/folding-schemes/src/transcript/mod.rs +++ b/folding-schemes/src/transcript/mod.rs @@ -1,31 +1,103 @@ -use crate::Error; +use ark_crypto_primitives::sponge::{constraints::CryptographicSpongeVar, CryptographicSponge}; use ark_ec::CurveGroup; use ark_ff::PrimeField; -use ark_r1cs_std::{boolean::Boolean, fields::fp::FpVar}; -use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; -use ark_std::fmt::Debug; +use ark_r1cs_std::{ + boolean::Boolean, fields::fp::FpVar, groups::CurveVar, ToConstraintFieldGadget, +}; +use ark_relations::r1cs::SynthesisError; pub mod poseidon; -pub trait Transcript { - type TranscriptConfig: Debug; +/// An interface for objects that can be absorbed by a `Transcript`. +/// +/// Matches `Absorb` in `ark-crypto-primitives`. +pub trait AbsorbNonNative { + /// Converts the object into field elements that can be absorbed by a `Transcript`. + /// Append the list to `dest` + fn to_native_sponge_field_elements(&self, dest: &mut Vec); - fn new(config: &Self::TranscriptConfig) -> Self; - fn absorb(&mut self, v: &C::ScalarField); - fn absorb_vec(&mut self, v: &[C::ScalarField]); - fn absorb_point(&mut self, v: &C) -> Result<(), Error>; - fn get_challenge(&mut self) -> C::ScalarField; + /// Converts the object into field elements that can be absorbed by a `Transcript`. + /// Return the list as `Vec` + fn to_native_sponge_field_elements_as_vec(&self) -> Vec { + let mut result = Vec::new(); + self.to_native_sponge_field_elements(&mut result); + result + } +} + +/// An interface for objects that can be absorbed by a `TranscriptVar` whose constraint field +/// is `F`. +/// +/// Matches `AbsorbGadget` in `ark-crypto-primitives`. +pub trait AbsorbNonNativeGadget { + /// Converts the object into field elements that can be absorbed by a `TranscriptVar`. + fn to_native_sponge_field_elements(&self) -> Result>, SynthesisError>; +} + +pub trait Transcript: CryptographicSponge { + /// `absorb_point` is for absorbing points whose `BaseField` is the field of + /// the sponge, i.e., the type `C` of these points should satisfy + /// `C::BaseField = F`. + /// + /// If the sponge field `F` is `C::ScalarField`, call `absorb_nonnative` + /// instead. + fn absorb_point>(&mut self, v: &C); + /// `absorb_nonnative` is for structs that contain non-native (field or + /// group) elements, including: + /// + /// - A field element of type `T: PrimeField` that will be absorbed into a + /// sponge that operates in another field `F != T`. + /// - A group element of type `C: CurveGroup` that will be absorbed into a + /// sponge that operates in another field `F != C::BaseField`, e.g., + /// `F = C::ScalarField`. + /// - A `CommittedInstance` on the secondary curve (used for CycleFold) that + /// will be absorbed into a sponge that operates in the (scalar field of + /// the) primary curve. + /// + /// Note that although a `CommittedInstance` for `AugmentedFCircuit` on + /// the primary curve also contains non-native elements, we still regard + /// it as native, because the sponge is on the same curve. + fn absorb_nonnative>(&mut self, v: &V); + + fn get_challenge(&mut self) -> F; /// get_challenge_nbits returns a field element of size nbits fn get_challenge_nbits(&mut self, nbits: usize) -> Vec; - fn get_challenges(&mut self, n: usize) -> Vec; + fn get_challenges(&mut self, n: usize) -> Vec; } -pub trait TranscriptVar { - type TranscriptVarConfig: Debug; +pub trait TranscriptVar: + CryptographicSpongeVar +{ + /// `absorb_point` is for absorbing points whose `BaseField` is the field of + /// the sponge, i.e., the type `C` of these points should satisfy + /// `C::BaseField = F`. + /// + /// If the sponge field `F` is `C::ScalarField`, call `absorb_nonnative` + /// instead. + fn absorb_point, GC: CurveVar + ToConstraintFieldGadget>( + &mut self, + v: &GC, + ) -> Result<(), SynthesisError>; + /// `absorb_nonnative` is for structs that contain non-native (field or + /// group) elements, including: + /// + /// - A field element of type `T: PrimeField` that will be absorbed into a + /// sponge that operates in another field `F != T`. + /// - A group element of type `C: CurveGroup` that will be absorbed into a + /// sponge that operates in another field `F != C::BaseField`, e.g., + /// `F = C::ScalarField`. + /// - A `CommittedInstance` on the secondary curve (used for CycleFold) that + /// will be absorbed into a sponge that operates in the (scalar field of + /// the) primary curve. + /// + /// Note that although a `CommittedInstance` for `AugmentedFCircuit` on + /// the primary curve also contains non-native elements, we still regard + /// it as native, because the sponge is on the same curve. + fn absorb_nonnative>( + &mut self, + v: &V, + ) -> Result<(), SynthesisError>; - fn new(cs: ConstraintSystemRef, poseidon_config: &Self::TranscriptVarConfig) -> Self; - fn absorb(&mut self, v: FpVar) -> Result<(), SynthesisError>; - fn absorb_vec(&mut self, v: &[FpVar]) -> Result<(), SynthesisError>; fn get_challenge(&mut self) -> Result, SynthesisError>; /// returns the bit representation of the challenge, we use its output in-circuit for the /// `GC.scalar_mul_le` method. diff --git a/folding-schemes/src/transcript/poseidon.rs b/folding-schemes/src/transcript/poseidon.rs index 40169c37..0496a17a 100644 --- a/folding-schemes/src/transcript/poseidon.rs +++ b/folding-schemes/src/transcript/poseidon.rs @@ -1,182 +1,223 @@ -mod bn254; -mod grumpkin; - -use ark_bn254::Fr as Bn254_Fr; use ark_crypto_primitives::sponge::{ constraints::CryptographicSpongeVar, poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge}, Absorb, CryptographicSponge, }; -use ark_ec::{AffineRepr, CurveGroup, Group}; -use ark_ff::{BigInteger, Field, PrimeField}; -use ark_grumpkin::Fr as Grumpkin_Fr; -use ark_r1cs_std::{boolean::Boolean, fields::fp::FpVar}; -use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; -use ark_std::Zero; - -use crate::transcript::Transcript; -use crate::Error; - -use super::TranscriptVar; - -/// PoseidonTranscript implements the Transcript trait using the Poseidon hash -pub struct PoseidonTranscript -where - ::ScalarField: Absorb, -{ - sponge: PoseidonSponge, -} +use ark_ec::{AffineRepr, CurveGroup}; +use ark_ff::{BigInteger, PrimeField}; +use ark_r1cs_std::{ + boolean::Boolean, fields::fp::FpVar, groups::CurveVar, ToConstraintFieldGadget, +}; +use ark_relations::r1cs::SynthesisError; -impl Transcript for PoseidonTranscript -where - ::ScalarField: Absorb, -{ - type TranscriptConfig = PoseidonConfig; +use super::{AbsorbNonNative, AbsorbNonNativeGadget, Transcript, TranscriptVar}; - fn new(poseidon_config: &Self::TranscriptConfig) -> Self { - let sponge = PoseidonSponge::::new(poseidon_config); - Self { sponge } - } - fn absorb(&mut self, v: &C::ScalarField) { - self.sponge.absorb(&v); +impl Transcript for PoseidonSponge { + // Compatible with the in-circuit `TranscriptVar::absorb_point` + fn absorb_point>(&mut self, p: &C) { + let (x, y) = match p.into_affine().xy() { + Some((&x, &y)) => (x, y), + None => (C::BaseField::zero(), C::BaseField::zero()), + }; + self.absorb(&x); + self.absorb(&y); } - fn absorb_vec(&mut self, v: &[C::ScalarField]) { - self.sponge.absorb(&v); + fn absorb_nonnative>(&mut self, v: &V) { + self.absorb(&v.to_native_sponge_field_elements_as_vec()); } - fn absorb_point(&mut self, p: &C) -> Result<(), Error> { - self.sponge.absorb(&prepare_point(p)?); - Ok(()) - } - fn get_challenge(&mut self) -> C::ScalarField { - let c = self.sponge.squeeze_field_elements(1); - self.sponge.absorb(&c[0]); + fn get_challenge(&mut self) -> F { + let c = self.squeeze_field_elements(1); + self.absorb(&c[0]); c[0] } fn get_challenge_nbits(&mut self, nbits: usize) -> Vec { - self.sponge.squeeze_bits(nbits) + let bits = self.squeeze_bits(nbits); + self.absorb(&F::from(F::BigInt::from_bits_le(&bits))); + bits } - fn get_challenges(&mut self, n: usize) -> Vec { - let c = self.sponge.squeeze_field_elements(n); - self.sponge.absorb(&c); + fn get_challenges(&mut self, n: usize) -> Vec { + let c = self.squeeze_field_elements(n); + self.absorb(&c); c } } -// Returns the point coordinates in Fr, so it can be absorbed by the transcript. It does not work -// over bytes in order to have a logic that can be reproduced in-circuit. -fn prepare_point(p: &C) -> Result, Error> { - let affine = p.into_affine(); - let zero_point = (&C::BaseField::zero(), &C::BaseField::zero()); - let xy = affine.xy().unwrap_or(zero_point); - - let x_bi = - xy.0.to_base_prime_field_elements() - .next() - .expect("a") - .into_bigint(); - let y_bi = - xy.1.to_base_prime_field_elements() - .next() - .expect("a") - .into_bigint(); - Ok(vec![ - C::ScalarField::from_le_bytes_mod_order(x_bi.to_bytes_le().as_ref()), - C::ScalarField::from_le_bytes_mod_order(y_bi.to_bytes_le().as_ref()), - ]) -} - -/// PoseidonTranscriptVar implements the gadget compatible with PoseidonTranscript -pub struct PoseidonTranscriptVar { - sponge: PoseidonSpongeVar, -} -impl TranscriptVar for PoseidonTranscriptVar { - type TranscriptVarConfig = PoseidonConfig; - - fn new(cs: ConstraintSystemRef, poseidon_config: &Self::TranscriptVarConfig) -> Self { - let sponge = PoseidonSpongeVar::::new(cs, poseidon_config); - Self { sponge } - } - fn absorb(&mut self, v: FpVar) -> Result<(), SynthesisError> { - self.sponge.absorb(&v) +impl TranscriptVar> for PoseidonSpongeVar { + fn absorb_point< + C: CurveGroup, + GC: CurveVar + ToConstraintFieldGadget, + >( + &mut self, + v: &GC, + ) -> Result<(), SynthesisError> { + let mut vec = v.to_constraint_field()?; + // The last element in the vector tells whether the point is infinity, + // but we can in fact avoid absorbing it without loss of soundness. + // This is because the `to_constraint_field` method internally invokes + // [`ProjectiveVar::to_afine`](https://github.com/arkworks-rs/r1cs-std/blob/4020fbc22625621baa8125ede87abaeac3c1ca26/src/groups/curves/short_weierstrass/mod.rs#L160-L195), + // which guarantees that an infinity point is represented as `(0, 0)`, + // but the y-coordinate of a non-infinity point is never 0 (for why, see + // https://crypto.stackexchange.com/a/108242 ). + vec.pop(); + self.absorb(&vec) } - fn absorb_vec(&mut self, v: &[FpVar]) -> Result<(), SynthesisError> { - self.sponge.absorb(&v) + fn absorb_nonnative>( + &mut self, + v: &V, + ) -> Result<(), SynthesisError> { + self.absorb(&v.to_native_sponge_field_elements()?) } fn get_challenge(&mut self) -> Result, SynthesisError> { - let c = self.sponge.squeeze_field_elements(1)?; - self.sponge.absorb(&c[0])?; + let c = self.squeeze_field_elements(1)?; + self.absorb(&c[0])?; Ok(c[0].clone()) } /// returns the bit representation of the challenge, we use its output in-circuit for the /// `GC.scalar_mul_le` method. fn get_challenge_nbits(&mut self, nbits: usize) -> Result>, SynthesisError> { - self.sponge.squeeze_bits(nbits) + let bits = self.squeeze_bits(nbits)?; + self.absorb(&Boolean::le_bits_to_fp_var(&bits)?)?; + Ok(bits) } fn get_challenges(&mut self, n: usize) -> Result>, SynthesisError> { - let c = self.sponge.squeeze_field_elements(n)?; - self.sponge.absorb(&c)?; + let c = self.squeeze_field_elements(n)?; + self.absorb(&c)?; Ok(c) } } -/// This Poseidon configuration generator agrees with Circom's Poseidon(4) in the case of BN254's scalar field and -/// makes use of the constants generated by the reference implementation script in the case of Grumpkin's scalar field. +/// This Poseidon configuration generator agrees with Circom's Poseidon(4) in the case of BN254's scalar field pub fn poseidon_canonical_config() -> PoseidonConfig { - let field_modulus_bytes = F::MODULUS.to_bytes_be(); - - if field_modulus_bytes == Bn254_Fr::MODULUS.to_bytes_be() { - bn254::poseidon_config::() - } else if field_modulus_bytes == Grumpkin_Fr::MODULUS.to_bytes_be() { - grumpkin::poseidon_config::() - } else { - let full_rounds = 8; - let partial_rounds = 31; - let alpha = 5; - let rate = 2; - - let (ark, mds) = ark_crypto_primitives::sponge::poseidon::find_poseidon_ark_and_mds::( - F::MODULUS_BIT_SIZE as u64, - rate, - full_rounds, - partial_rounds, - 0, - ); + // 120 bit security target as in + // https://eprint.iacr.org/2019/458.pdf + // t = rate + 1 - PoseidonConfig::new( - full_rounds as usize, - partial_rounds as usize, - alpha, - mds, - ark, - rate, - 1, - ) - } + let full_rounds = 8; + let partial_rounds = 60; + let alpha = 5; + let rate = 4; + + let (ark, mds) = ark_crypto_primitives::sponge::poseidon::find_poseidon_ark_and_mds::( + F::MODULUS_BIT_SIZE as u64, + rate, + full_rounds, + partial_rounds, + 0, + ); + + PoseidonConfig::new( + full_rounds as usize, + partial_rounds as usize, + alpha, + mds, + ark, + rate, + 1, + ) } #[cfg(test)] pub mod tests { + use crate::folding::circuits::nonnative::affine::NonNativeAffineVar; + use super::*; - use ark_bn254::{constraints::GVar, Fq, Fr, G1Projective as G1}; - use ark_grumpkin::Projective; - use ark_r1cs_std::{alloc::AllocVar, groups::CurveVar, R1CSVar}; + use ark_bn254::{constraints::GVar, g1::Config, Fq, Fr, G1Projective as G1}; + use ark_ec::Group; + use ark_ff::UniformRand; + use ark_r1cs_std::{ + alloc::AllocVar, groups::curves::short_weierstrass::ProjectiveVar, R1CSVar, + }; use ark_relations::r1cs::ConstraintSystem; - use std::ops::Mul; + use ark_std::test_rng; + + // Test with value taken from https://github.com/iden3/circomlibjs/blob/43cc582b100fc3459cf78d903a6f538e5d7f38ee/test/poseidon.js#L32 + #[test] + fn check_against_circom_poseidon() { + use ark_bn254::Fr; + use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, CryptographicSponge}; + use std::str::FromStr; + + let config = poseidon_canonical_config::(); + let mut poseidon_sponge: PoseidonSponge<_> = CryptographicSponge::new(&config); + let v: Vec = vec!["1", "2", "3", "4"] + .into_iter() + .map(|x| Fr::from_str(x).unwrap()) + .collect(); + poseidon_sponge.absorb(&v); + poseidon_sponge.squeeze_field_elements::(1); + assert!( + poseidon_sponge.state[0] + == Fr::from_str( + "18821383157269793795438455681495246036402687001665670618754263018637548127333" + ) + .unwrap() + ); + } + + #[test] + fn test_transcript_and_transcriptvar_absorb_native_point() { + // use 'native' transcript + let config = poseidon_canonical_config::(); + let mut tr = PoseidonSponge::::new(&config); + let rng = &mut test_rng(); + + let p = G1::rand(rng); + tr.absorb_point(&p); + let c = tr.get_challenge(); + + // use 'gadget' transcript + let cs = ConstraintSystem::::new_ref(); + let mut tr_var = PoseidonSpongeVar::::new(cs.clone(), &config); + let p_var = ProjectiveVar::>::new_witness( + ConstraintSystem::::new_ref(), + || Ok(p), + ) + .unwrap(); + tr_var.absorb_point(&p_var).unwrap(); + let c_var = tr_var.get_challenge().unwrap(); + + // assert that native & gadget transcripts return the same challenge + assert_eq!(c, c_var.value().unwrap()); + } + + #[test] + fn test_transcript_and_transcriptvar_absorb_nonnative_point() { + // use 'native' transcript + let config = poseidon_canonical_config::(); + let mut tr = PoseidonSponge::::new(&config); + let rng = &mut test_rng(); + + let p = G1::rand(rng); + tr.absorb_nonnative(&p); + let c = tr.get_challenge(); + + // use 'gadget' transcript + let cs = ConstraintSystem::::new_ref(); + let mut tr_var = PoseidonSpongeVar::::new(cs.clone(), &config); + let p_var = + NonNativeAffineVar::::new_witness(ConstraintSystem::::new_ref(), || Ok(p)) + .unwrap(); + tr_var.absorb_nonnative(&p_var).unwrap(); + let c_var = tr_var.get_challenge().unwrap(); + + // assert that native & gadget transcripts return the same challenge + assert_eq!(c, c_var.value().unwrap()); + } #[test] fn test_transcript_and_transcriptvar_get_challenge() { // use 'native' transcript let config = poseidon_canonical_config::(); - let mut tr = PoseidonTranscript::::new(&config); + let mut tr = PoseidonSponge::::new(&config); tr.absorb(&Fr::from(42_u32)); let c = tr.get_challenge(); // use 'gadget' transcript let cs = ConstraintSystem::::new_ref(); - let mut tr_var = PoseidonTranscriptVar::::new(cs.clone(), &config); + let mut tr_var = PoseidonSpongeVar::::new(cs.clone(), &config); let v = FpVar::::new_witness(cs.clone(), || Ok(Fr::from(42_u32))).unwrap(); - tr_var.absorb(v).unwrap(); + tr_var.absorb(&v).unwrap(); let c_var = tr_var.get_challenge().unwrap(); // assert that native & gadget transcripts return the same challenge @@ -189,7 +230,7 @@ pub mod tests { // use 'native' transcript let config = poseidon_canonical_config::(); - let mut tr = PoseidonTranscript::::new(&config); + let mut tr = PoseidonSponge::::new(&config); tr.absorb(&Fq::from(42_u32)); // get challenge from native transcript @@ -197,9 +238,9 @@ pub mod tests { // use 'gadget' transcript let cs = ConstraintSystem::::new_ref(); - let mut tr_var = PoseidonTranscriptVar::::new(cs.clone(), &config); + let mut tr_var = PoseidonSpongeVar::::new(cs.clone(), &config); let v = FpVar::::new_witness(cs.clone(), || Ok(Fq::from(42_u32))).unwrap(); - tr_var.absorb(v).unwrap(); + tr_var.absorb(&v).unwrap(); // get challenge from circuit transcript let c_var = tr_var.get_challenge_nbits(nbits).unwrap(); @@ -212,7 +253,7 @@ pub mod tests { // native c*P let c_Fr = Fr::from_bigint(BigInteger::from_bits_le(&c_bits)).unwrap(); - let cP_native = P.mul(c_Fr); + let cP_native = P * c_Fr; // native c*P using mul_bits_be (notice the .rev to convert the LE to BE) let cP_native_bits = P.mul_bits_be(c_bits.into_iter().rev()); diff --git a/folding-schemes/src/utils/espresso/sum_check/mod.rs b/folding-schemes/src/utils/espresso/sum_check/mod.rs index 86842c44..efb8b501 100644 --- a/folding-schemes/src/utils/espresso/sum_check/mod.rs +++ b/folding-schemes/src/utils/espresso/sum_check/mod.rs @@ -13,7 +13,7 @@ use crate::{ transcript::Transcript, utils::virtual_polynomial::{VPAuxInfo, VirtualPolynomial}, }; -use ark_ec::CurveGroup; +use ark_crypto_primitives::sponge::Absorb; use ark_ff::PrimeField; use ark_poly::univariate::DensePolynomial; use ark_poly::{DenseMultilinearExtension, DenseUVPolynomial, Polynomial}; @@ -22,7 +22,6 @@ use std::{fmt::Debug, marker::PhantomData, sync::Arc}; use crate::utils::sum_check::structs::IOPProverMessage; use crate::utils::sum_check::structs::IOPVerifierState; -use ark_ff::Field; use espresso_subroutines::poly_iop::prelude::PolyIOPErrors; use structs::{IOPProof, IOPProverState}; @@ -31,7 +30,7 @@ pub mod structs; pub mod verifier; /// A generic sum-check trait over a curve group -pub trait SumCheck { +pub trait SumCheck { type VirtualPolynomial; type VPAuxInfo; type MultilinearExtension; @@ -40,27 +39,27 @@ pub trait SumCheck { type SumCheckSubClaim: Clone + Debug + Default + PartialEq; /// Extract sum from the proof - fn extract_sum(proof: &Self::SumCheckProof) -> C::ScalarField; + fn extract_sum(proof: &Self::SumCheckProof) -> F; /// Generate proof of the sum of polynomial over {0,1}^`num_vars` /// /// The polynomial is represented in the form of a VirtualPolynomial. fn prove( poly: &Self::VirtualPolynomial, - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, ) -> Result; /// Verify the claimed sum using the proof fn verify( - sum: C::ScalarField, + sum: F, proof: &Self::SumCheckProof, aux_info: &Self::VPAuxInfo, - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, ) -> Result; } /// Trait for sum check protocol prover side APIs. -pub trait SumCheckProver +pub trait SumCheckProver where Self: Sized, { @@ -77,12 +76,12 @@ where /// Main algorithm used is from section 3.2 of [XZZPS19](https://eprint.iacr.org/2019/317.pdf#subsection.3.2). fn prove_round_and_update_state( &mut self, - challenge: &Option, + challenge: &Option, ) -> Result; } /// Trait for sum check protocol verifier side APIs. -pub trait SumCheckVerifier { +pub trait SumCheckVerifier { type VPAuxInfo; type ProverMessage; type Challenge; @@ -100,7 +99,7 @@ pub trait SumCheckVerifier { fn verify_round_and_update_state( &mut self, prover_msg: &Self::ProverMessage, - transcript: &mut impl Transcript, + transcript: &mut impl Transcript, ) -> Result; /// This function verifies the deferred checks in the interactive version of @@ -113,7 +112,7 @@ pub trait SumCheckVerifier { /// Larger field size guarantees smaller soundness error. fn check_and_generate_subclaim( &self, - asserted_sum: &C::ScalarField, + asserted_sum: &F, ) -> Result; } @@ -129,42 +128,42 @@ pub struct SumCheckSubClaim { } #[derive(Clone, Debug, Default, Copy, PartialEq, Eq)] -pub struct IOPSumCheck> { +pub struct IOPSumCheck> { #[doc(hidden)] - phantom: PhantomData, + phantom: PhantomData, #[doc(hidden)] phantom2: PhantomData, } -impl> SumCheck for IOPSumCheck { - type SumCheckProof = IOPProof; - type VirtualPolynomial = VirtualPolynomial; - type VPAuxInfo = VPAuxInfo; - type MultilinearExtension = Arc>; - type SumCheckSubClaim = SumCheckSubClaim; +impl> SumCheck for IOPSumCheck { + type SumCheckProof = IOPProof; + type VirtualPolynomial = VirtualPolynomial; + type VPAuxInfo = VPAuxInfo; + type MultilinearExtension = Arc>; + type SumCheckSubClaim = SumCheckSubClaim; - fn extract_sum(proof: &Self::SumCheckProof) -> C::ScalarField { + fn extract_sum(proof: &Self::SumCheckProof) -> F { let start = start_timer!(|| "extract sum"); let poly = DensePolynomial::from_coefficients_vec(proof.proofs[0].coeffs.clone()); - let res = poly.evaluate(&C::ScalarField::ONE) + poly.evaluate(&C::ScalarField::ZERO); + let res = poly.evaluate(&F::ONE) + poly.evaluate(&F::ZERO); end_timer!(start); res } fn prove( - poly: &VirtualPolynomial, - transcript: &mut impl Transcript, - ) -> Result, PolyIOPErrors> { - transcript.absorb(&C::ScalarField::from(poly.aux_info.num_variables as u64)); - transcript.absorb(&C::ScalarField::from(poly.aux_info.max_degree as u64)); - let mut prover_state: IOPProverState = IOPProverState::prover_init(poly)?; - let mut challenge: Option = None; - let mut prover_msgs: Vec> = + poly: &VirtualPolynomial, + transcript: &mut impl Transcript, + ) -> Result, PolyIOPErrors> { + transcript.absorb(&F::from(poly.aux_info.num_variables as u64)); + transcript.absorb(&F::from(poly.aux_info.max_degree as u64)); + let mut prover_state: IOPProverState = IOPProverState::prover_init(poly)?; + let mut challenge: Option = None; + let mut prover_msgs: Vec> = Vec::with_capacity(poly.aux_info.num_variables); for _ in 0..poly.aux_info.num_variables { - let prover_msg: IOPProverMessage = + let prover_msg: IOPProverMessage = IOPProverState::prove_round_and_update_state(&mut prover_state, &challenge)?; - transcript.absorb_vec(&prover_msg.coeffs); + transcript.absorb(&prover_msg.coeffs); prover_msgs.push(prover_msg); challenge = Some(transcript.get_challenge()); } @@ -178,17 +177,17 @@ impl> SumCheck for IOPSumCheck { } fn verify( - claimed_sum: C::ScalarField, - proof: &IOPProof, - aux_info: &VPAuxInfo, - transcript: &mut impl Transcript, - ) -> Result, PolyIOPErrors> { - transcript.absorb(&C::ScalarField::from(aux_info.num_variables as u64)); - transcript.absorb(&C::ScalarField::from(aux_info.max_degree as u64)); + claimed_sum: F, + proof: &IOPProof, + aux_info: &VPAuxInfo, + transcript: &mut impl Transcript, + ) -> Result, PolyIOPErrors> { + transcript.absorb(&F::from(aux_info.num_variables as u64)); + transcript.absorb(&F::from(aux_info.max_degree as u64)); let mut verifier_state = IOPVerifierState::verifier_init(aux_info); for i in 0..aux_info.num_variables { let prover_msg = proof.proofs.get(i).expect("proof is incomplete"); - transcript.absorb_vec(&prover_msg.coeffs); + transcript.absorb(&prover_msg.coeffs); IOPVerifierState::verify_round_and_update_state( &mut verifier_state, prover_msg, @@ -204,16 +203,15 @@ impl> SumCheck for IOPSumCheck { pub mod tests { use std::sync::Arc; + use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; + use ark_crypto_primitives::sponge::CryptographicSponge; use ark_ff::Field; use ark_pallas::Fr; - use ark_pallas::Projective; use ark_poly::DenseMultilinearExtension; use ark_poly::MultilinearExtension; use ark_std::test_rng; use crate::transcript::poseidon::poseidon_canonical_config; - use crate::transcript::poseidon::PoseidonTranscript; - use crate::transcript::Transcript; use crate::utils::sum_check::SumCheck; use crate::utils::virtual_polynomial::VirtualPolynomial; @@ -221,30 +219,39 @@ pub mod tests { #[test] pub fn sumcheck_poseidon() { + let n_vars = 5; + let mut rng = test_rng(); - let poly_mle = DenseMultilinearExtension::rand(5, &mut rng); + let poly_mle = DenseMultilinearExtension::rand(n_vars, &mut rng); let virtual_poly = VirtualPolynomial::new_from_mle(&Arc::new(poly_mle), Fr::ONE); + + sumcheck_poseidon_opt(virtual_poly); + + // test with zero poly + let poly_mle = DenseMultilinearExtension::from_evaluations_vec( + n_vars, + vec![Fr::ZERO; 2u32.pow(n_vars as u32) as usize], + ); + let virtual_poly = VirtualPolynomial::new_from_mle(&Arc::new(poly_mle), Fr::ONE); + sumcheck_poseidon_opt(virtual_poly); + } + + fn sumcheck_poseidon_opt(virtual_poly: VirtualPolynomial) { let poseidon_config = poseidon_canonical_config::(); // sum-check prove - let mut poseidon_transcript_prove: PoseidonTranscript = - PoseidonTranscript::::new(&poseidon_config); - let sum_check = IOPSumCheck::>::prove( - &virtual_poly, - &mut poseidon_transcript_prove, - ) - .unwrap(); + let mut transcript_p: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); + let sum_check = + IOPSumCheck::>::prove(&virtual_poly, &mut transcript_p).unwrap(); // sum-check verify - let claimed_sum = - IOPSumCheck::>::extract_sum(&sum_check); - let mut poseidon_transcript_verify: PoseidonTranscript = - PoseidonTranscript::::new(&poseidon_config); - let res_verify = IOPSumCheck::>::verify( + let claimed_sum = IOPSumCheck::>::extract_sum(&sum_check); + let mut transcript_v: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); + let res_verify = IOPSumCheck::>::verify( claimed_sum, &sum_check, &virtual_poly.aux_info, - &mut poseidon_transcript_verify, + &mut transcript_v, ); assert!(res_verify.is_ok()); diff --git a/folding-schemes/src/utils/espresso/sum_check/prover.rs b/folding-schemes/src/utils/espresso/sum_check/prover.rs index 46d2e0c3..24440d24 100644 --- a/folding-schemes/src/utils/espresso/sum_check/prover.rs +++ b/folding-schemes/src/utils/espresso/sum_check/prover.rs @@ -14,11 +14,9 @@ use crate::utils::{ lagrange_poly::compute_lagrange_interpolated_poly, multilinear_polynomial::fix_variables, virtual_polynomial::VirtualPolynomial, }; -use ark_ec::CurveGroup; -use ark_ff::Field; use ark_ff::{batch_inversion, PrimeField}; use ark_poly::DenseMultilinearExtension; -use ark_std::{cfg_into_iter, end_timer, start_timer, vec::Vec}; +use ark_std::{cfg_into_iter, end_timer, start_timer}; use rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator}; use std::sync::Arc; @@ -28,9 +26,9 @@ use espresso_subroutines::poly_iop::prelude::PolyIOPErrors; // #[cfg(feature = "parallel")] use rayon::iter::{IntoParallelRefMutIterator, ParallelIterator}; -impl SumCheckProver for IOPProverState { - type VirtualPolynomial = VirtualPolynomial; - type ProverMessage = IOPProverMessage; +impl SumCheckProver for IOPProverState { + type VirtualPolynomial = VirtualPolynomial; + type ProverMessage = IOPProverMessage; /// Initialize the prover state to argue for the sum of the input polynomial /// over {0,1}^`num_vars`. @@ -49,9 +47,7 @@ impl SumCheckProver for IOPProverState { poly: polynomial.clone(), extrapolation_aux: (1..polynomial.aux_info.max_degree) .map(|degree| { - let points = (0..1 + degree as u64) - .map(C::ScalarField::from) - .collect::>(); + let points = (0..1 + degree as u64).map(F::from).collect::>(); let weights = barycentric_weights(&points); (points, weights) }) @@ -65,7 +61,7 @@ impl SumCheckProver for IOPProverState { /// Main algorithm used is from section 3.2 of [XZZPS19](https://eprint.iacr.org/2019/317.pdf#subsection.3.2). fn prove_round_and_update_state( &mut self, - challenge: &Option, + challenge: &Option, ) -> Result { // let start = // start_timer!(|| format!("sum check prove {}-th round and update state", @@ -90,7 +86,7 @@ impl SumCheckProver for IOPProverState { // g(r_1, ..., r_{m-1}, x_m ... x_n) // // eval g over r_m, and mutate g to g(r_1, ... r_m,, x_{m+1}... x_n) - let mut flattened_ml_extensions: Vec> = self + let mut flattened_ml_extensions: Vec> = self .poly .flattened_ml_extensions .par_iter() @@ -124,7 +120,7 @@ impl SumCheckProver for IOPProverState { self.round += 1; let products_list = self.poly.products.clone(); - let mut products_sum = vec![C::ScalarField::ZERO; self.poly.aux_info.max_degree + 1]; + let mut products_sum = vec![F::ZERO; self.poly.aux_info.max_degree + 1]; // Step 2: generate sum for the partial evaluated polynomial: // f(r_1, ... r_m,, x_{m+1}... x_n) @@ -134,8 +130,8 @@ impl SumCheckProver for IOPProverState { .fold( || { ( - vec![(C::ScalarField::ZERO, C::ScalarField::ZERO); products.len()], - vec![C::ScalarField::ZERO; products.len() + 1], + vec![(F::ZERO, F::ZERO); products.len()], + vec![F::ZERO; products.len() + 1], ) }, |(mut buf, mut acc), b| { @@ -146,17 +142,17 @@ impl SumCheckProver for IOPProverState { *eval = table[b << 1]; *step = table[(b << 1) + 1] - table[b << 1]; }); - acc[0] += buf.iter().map(|(eval, _)| eval).product::(); + acc[0] += buf.iter().map(|(eval, _)| eval).product::(); acc[1..].iter_mut().for_each(|acc| { buf.iter_mut().for_each(|(eval, step)| *eval += step as &_); - *acc += buf.iter().map(|(eval, _)| eval).product::(); + *acc += buf.iter().map(|(eval, _)| eval).product::(); }); (buf, acc) }, ) .map(|(_, partial)| partial) .reduce( - || vec![C::ScalarField::ZERO; products.len() + 1], + || vec![F::ZERO; products.len() + 1], |mut sum, partial| { sum.iter_mut() .zip(partial.iter()) @@ -168,7 +164,7 @@ impl SumCheckProver for IOPProverState { let extraploation = cfg_into_iter!(0..self.poly.aux_info.max_degree - products.len()) .map(|i| { let (points, weights) = &self.extrapolation_aux[products.len() - 1]; - let at = C::ScalarField::from((products.len() + 1 + i) as u64); + let at = F::from((products.len() + 1 + i) as u64); extrapolate(points, weights, &sum, &at) }) .collect::>(); @@ -184,7 +180,7 @@ impl SumCheckProver for IOPProverState { .map(|x| Arc::new(x.clone())) .collect(); - let prover_poly = compute_lagrange_interpolated_poly::(&products_sum); + let prover_poly = compute_lagrange_interpolated_poly::(&products_sum); Ok(IOPProverMessage { coeffs: prover_poly.coeffs, }) diff --git a/folding-schemes/src/utils/espresso/sum_check/structs.rs b/folding-schemes/src/utils/espresso/sum_check/structs.rs index b34ed81d..de487d93 100644 --- a/folding-schemes/src/utils/espresso/sum_check/structs.rs +++ b/folding-schemes/src/utils/espresso/sum_check/structs.rs @@ -10,7 +10,6 @@ //! This module defines structs that are shared by all sub protocols. use crate::utils::virtual_polynomial::VirtualPolynomial; -use ark_ec::CurveGroup; use ark_ff::PrimeField; use ark_serialize::CanonicalSerialize; @@ -33,28 +32,28 @@ pub struct IOPProverMessage { /// Prover State of a PolyIOP. #[derive(Debug)] -pub struct IOPProverState { +pub struct IOPProverState { /// sampled randomness given by the verifier - pub challenges: Vec, + pub challenges: Vec, /// the current round number pub(crate) round: usize, /// pointer to the virtual polynomial - pub(crate) poly: VirtualPolynomial, + pub(crate) poly: VirtualPolynomial, /// points with precomputed barycentric weights for extrapolating smaller /// degree uni-polys to `max_degree + 1` evaluations. #[allow(clippy::type_complexity)] - pub(crate) extrapolation_aux: Vec<(Vec, Vec)>, + pub(crate) extrapolation_aux: Vec<(Vec, Vec)>, } /// Verifier State of a PolyIOP, generic over a curve group #[derive(Debug)] -pub struct IOPVerifierState { +pub struct IOPVerifierState { pub(crate) round: usize, pub(crate) num_vars: usize, pub(crate) finished: bool, /// a list storing the univariate polynomial in evaluation form sent by the /// prover at each round - pub(crate) polynomials_received: Vec>, + pub(crate) polynomials_received: Vec>, /// a list storing the randomness sampled by the verifier at each round - pub(crate) challenges: Vec, + pub(crate) challenges: Vec, } diff --git a/folding-schemes/src/utils/espresso/sum_check/verifier.rs b/folding-schemes/src/utils/espresso/sum_check/verifier.rs index 55fafb2d..074eec18 100644 --- a/folding-schemes/src/utils/espresso/sum_check/verifier.rs +++ b/folding-schemes/src/utils/espresso/sum_check/verifier.rs @@ -14,7 +14,7 @@ use super::{ SumCheckSubClaim, SumCheckVerifier, }; use crate::{transcript::Transcript, utils::virtual_polynomial::VPAuxInfo}; -use ark_ec::CurveGroup; +use ark_crypto_primitives::sponge::Absorb; use ark_ff::PrimeField; use ark_poly::Polynomial; use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; @@ -24,11 +24,11 @@ use espresso_subroutines::poly_iop::prelude::PolyIOPErrors; #[cfg(feature = "parallel")] use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; -impl SumCheckVerifier for IOPVerifierState { - type VPAuxInfo = VPAuxInfo; - type ProverMessage = IOPProverMessage; - type Challenge = C::ScalarField; - type SumCheckSubClaim = SumCheckSubClaim; +impl SumCheckVerifier for IOPVerifierState { + type VPAuxInfo = VPAuxInfo; + type ProverMessage = IOPProverMessage; + type Challenge = F; + type SumCheckSubClaim = SumCheckSubClaim; /// Initialize the verifier's state. fn verifier_init(index_info: &Self::VPAuxInfo) -> Self { @@ -46,9 +46,9 @@ impl SumCheckVerifier for IOPVerifierState { fn verify_round_and_update_state( &mut self, - prover_msg: & as SumCheckVerifier>::ProverMessage, - transcript: &mut impl Transcript, - ) -> Result< as SumCheckVerifier>::Challenge, PolyIOPErrors> { + prover_msg: & as SumCheckVerifier>::ProverMessage, + transcript: &mut impl Transcript, + ) -> Result< as SumCheckVerifier>::Challenge, PolyIOPErrors> { let start = start_timer!(|| format!("sum check verify {}-th round and update state", self.round)); @@ -83,7 +83,7 @@ impl SumCheckVerifier for IOPVerifierState { fn check_and_generate_subclaim( &self, - asserted_sum: &C::ScalarField, + asserted_sum: &F, ) -> Result { let start = start_timer!(|| "sum check check and generate subclaim"); if !self.finished { @@ -136,8 +136,12 @@ impl SumCheckVerifier for IOPVerifierState { .take(self.num_vars) { let poly = DensePolynomial::from_coefficients_slice(coeffs); - let eval_at_one: C::ScalarField = poly.iter().sum(); - let eval_at_zero: C::ScalarField = poly.coeffs[0]; + let eval_at_one: F = poly.iter().sum(); + let eval_at_zero: F = if poly.coeffs.is_empty() { + F::zero() + } else { + poly.coeffs[0] + }; let eval = eval_at_one + eval_at_zero; // the deferred check during the interactive phase: diff --git a/folding-schemes/src/utils/espresso/virtual_polynomial.rs b/folding-schemes/src/utils/espresso/virtual_polynomial.rs index 24f0b023..acc843b9 100644 --- a/folding-schemes/src/utils/espresso/virtual_polynomial.rs +++ b/folding-schemes/src/utils/espresso/virtual_polynomial.rs @@ -18,8 +18,6 @@ use rayon::prelude::*; use std::{cmp::max, collections::HashMap, marker::PhantomData, ops::Add, sync::Arc}; use thiserror::Error; -use ark_std::string::String; - //-- aritherrors /// A `enum` specifying the possible failure modes of the arithmetics. #[derive(Error, Debug)] diff --git a/folding-schemes/src/utils/lagrange_poly.rs b/folding-schemes/src/utils/lagrange_poly.rs index f3dfaee2..22a38e1b 100644 --- a/folding-schemes/src/utils/lagrange_poly.rs +++ b/folding-schemes/src/utils/lagrange_poly.rs @@ -52,7 +52,7 @@ mod tests { use crate::utils::lagrange_poly::compute_lagrange_interpolated_poly; use ark_pallas::Fr; use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; - use ark_std::{vec::Vec, UniformRand}; + use ark_std::UniformRand; use espresso_subroutines::poly_iop::prelude::PolyIOPErrors; #[test] diff --git a/folding-schemes/src/utils/mle.rs b/folding-schemes/src/utils/mle.rs index a044eca0..68c4aea8 100644 --- a/folding-schemes/src/utils/mle.rs +++ b/folding-schemes/src/utils/mle.rs @@ -104,7 +104,7 @@ pub fn dense_vec_to_mle(n_vars: usize, v: &[F]) -> SparseMultilin mod tests { use super::*; use crate::{ - ccs::tests::get_test_z, + arith::ccs::tests::get_test_z, utils::multilinear_polynomial::fix_variables, utils::multilinear_polynomial::tests::fix_last_variables, utils::{hypercube::BooleanHypercube, vec::tests::to_F_matrix}, diff --git a/folding-schemes/src/utils/mod.rs b/folding-schemes/src/utils/mod.rs index 53618049..ca017d54 100644 --- a/folding-schemes/src/utils/mod.rs +++ b/folding-schemes/src/utils/mod.rs @@ -1,4 +1,13 @@ +use ark_crypto_primitives::sponge::poseidon::PoseidonConfig; +use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::PrimeField; +use ark_serialize::CanonicalSerialize; +use ark_std::Zero; +use sha3::{Digest, Sha3_256}; + +use crate::arith::Arith; +use crate::commitment::CommitmentScheme; +use crate::Error; pub mod gadgets; pub mod hypercube; @@ -21,3 +30,73 @@ pub fn powers_of(x: F, n: usize) -> Vec { } c } + +/// returns the coordinates of a commitment point. This is compatible with the arkworks +/// GC.to_constraint_field()[..2] +pub fn get_cm_coordinates(cm: &C) -> Vec { + let zero = (&C::BaseField::zero(), &C::BaseField::zero()); + let cm = cm.into_affine(); + let (cm_x, cm_y) = cm.xy().unwrap_or(zero); + vec![*cm_x, *cm_y] +} + +/// returns the hash of the given public parameters of the Folding Scheme +pub fn pp_hash( + arith: &impl Arith, + cf_arith: &impl Arith, + cs_vp: &CS1::VerifierParams, + cf_cs_vp: &CS2::VerifierParams, + poseidon_config: &PoseidonConfig, +) -> Result +where + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ + let mut hasher = Sha3_256::new(); + + // Fr & Fq modulus bit size + hasher.update(C1::ScalarField::MODULUS_BIT_SIZE.to_le_bytes()); + hasher.update(C2::ScalarField::MODULUS_BIT_SIZE.to_le_bytes()); + // AugmentedFCircuit Arith params + hasher.update(arith.params_to_le_bytes()); + // CycleFold Circuit Arith params + hasher.update(cf_arith.params_to_le_bytes()); + // cs_vp & cf_cs_vp (commitments setup) + let mut cs_vp_bytes = Vec::new(); + cs_vp.serialize_uncompressed(&mut cs_vp_bytes)?; + hasher.update(cs_vp_bytes); + let mut cf_cs_vp_bytes = Vec::new(); + cf_cs_vp.serialize_uncompressed(&mut cf_cs_vp_bytes)?; + hasher.update(cf_cs_vp_bytes); + // poseidon params + let mut poseidon_config_bytes = Vec::new(); + poseidon_config + .full_rounds + .serialize_uncompressed(&mut poseidon_config_bytes)?; + poseidon_config + .partial_rounds + .serialize_uncompressed(&mut poseidon_config_bytes)?; + poseidon_config + .alpha + .serialize_uncompressed(&mut poseidon_config_bytes)?; + poseidon_config + .ark + .serialize_uncompressed(&mut poseidon_config_bytes)?; + poseidon_config + .mds + .serialize_uncompressed(&mut poseidon_config_bytes)?; + poseidon_config + .rate + .serialize_uncompressed(&mut poseidon_config_bytes)?; + poseidon_config + .capacity + .serialize_uncompressed(&mut poseidon_config_bytes)?; + hasher.update(poseidon_config_bytes); + + let public_params_hash = hasher.finalize(); + Ok(C1::ScalarField::from_le_bytes_mod_order( + &public_params_hash, + )) +} diff --git a/solidity-verifiers/Cargo.toml b/solidity-verifiers/Cargo.toml index f3ad089b..0252c147 100644 --- a/solidity-verifiers/Cargo.toml +++ b/solidity-verifiers/Cargo.toml @@ -29,6 +29,7 @@ ark-bn254 = {version="0.4.0", features=["r1cs"]} ark-grumpkin = {version="0.4.0", features=["r1cs"]} rand = "0.8.5" folding-schemes = { path = "../folding-schemes/", features=["light-test"]} +noname = { git = "https://github.com/dmpierre/noname" } [features] default = ["parallel"] @@ -47,3 +48,7 @@ path = "../examples/full_flow.rs" [[example]] name = "circom_full_flow" path = "../examples/circom_full_flow.rs" + +[[example]] +name = "noname_full_flow" +path = "../examples/noname_full_flow.rs" diff --git a/solidity-verifiers/src/verifiers/g16.rs b/solidity-verifiers/src/verifiers/g16.rs index 0063e142..eda66fa3 100644 --- a/solidity-verifiers/src/verifiers/g16.rs +++ b/solidity-verifiers/src/verifiers/g16.rs @@ -3,7 +3,7 @@ use crate::utils::encoding::{G1Repr, G2Repr}; use crate::utils::HeaderInclusion; use crate::{ProtocolVerifierKey, GPL3_SDPX_IDENTIFIER}; use ark_bn254::Bn254; -use ark_groth16::VerifyingKey; +use ark_groth16::VerifyingKey as ArkVerifyingKey; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use askama::Template; @@ -48,10 +48,10 @@ impl From for Groth16Verifier { // Ideally this would be linked to the `Decider` trait in FoldingSchemes. // For now, this is the easiest as NovaCycleFold isn't clear target from where we can get all it's needed arguments. #[derive(CanonicalDeserialize, CanonicalSerialize, Clone, PartialEq, Debug)] -pub struct Groth16VerifierKey(pub(crate) VerifyingKey); +pub struct Groth16VerifierKey(pub(crate) ArkVerifyingKey); -impl From> for Groth16VerifierKey { - fn from(value: VerifyingKey) -> Self { +impl From> for Groth16VerifierKey { + fn from(value: ArkVerifyingKey) -> Self { Self(value) } } @@ -95,7 +95,7 @@ mod tests { #[test] fn groth16_vk_serde_roundtrip() { - let (_, _, _, vk, _) = setup(DEFAULT_SETUP_LEN); + let (_, _, _, _, vk, _) = setup(DEFAULT_SETUP_LEN); let g16_vk = Groth16VerifierKey::from(vk); let mut bytes = vec![]; @@ -109,7 +109,7 @@ mod tests { #[test] fn test_groth16_verifier_accepts_and_rejects_proofs() { let mut rng = ark_std::rand::rngs::StdRng::seed_from_u64(test_rng().next_u64()); - let (_, _, g16_pk, g16_vk, circuit) = setup(DEFAULT_SETUP_LEN); + let (_, _, _, g16_pk, g16_vk, circuit) = setup(DEFAULT_SETUP_LEN); let g16_vk = Groth16VerifierKey::from(g16_vk); let proof = Groth16::::prove(&g16_pk, circuit, &mut rng).unwrap(); diff --git a/solidity-verifiers/src/verifiers/kzg.rs b/solidity-verifiers/src/verifiers/kzg.rs index a630235c..9dc156d5 100644 --- a/solidity-verifiers/src/verifiers/kzg.rs +++ b/solidity-verifiers/src/verifiers/kzg.rs @@ -78,7 +78,8 @@ mod tests { utils::HeaderInclusion, ProtocolVerifierKey, }; - use ark_bn254::{Bn254, Fr, G1Projective as G1}; + use ark_bn254::{Bn254, Fr}; + use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, CryptographicSponge}; use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{BigInteger, PrimeField}; use ark_std::rand::{RngCore, SeedableRng}; @@ -89,10 +90,7 @@ mod tests { use folding_schemes::{ commitment::{kzg::KZG, CommitmentScheme}, - transcript::{ - poseidon::{poseidon_canonical_config, PoseidonTranscript}, - Transcript, - }, + transcript::{poseidon::poseidon_canonical_config, Transcript}, }; use super::KZG10Verifier; @@ -102,7 +100,7 @@ mod tests { #[test] fn kzg_vk_serde_roundtrip() { - let (pk, vk, _, _, _) = setup(DEFAULT_SETUP_LEN); + let (_, pk, vk, _, _, _) = setup(DEFAULT_SETUP_LEN); let kzg_vk = KZG10VerifierKey::from((vk, pk.powers_of_g[0..3].to_vec())); let mut bytes = vec![]; @@ -115,7 +113,7 @@ mod tests { #[test] fn kzg_verifier_compiles() { - let (kzg_pk, kzg_vk, _, _, _) = setup(DEFAULT_SETUP_LEN); + let (_, kzg_pk, kzg_vk, _, _, _) = setup(DEFAULT_SETUP_LEN); let kzg_vk = KZG10VerifierKey::from((kzg_vk.clone(), kzg_pk.powers_of_g[0..3].to_vec())); let res = HeaderInclusion::::builder() @@ -133,10 +131,10 @@ mod tests { fn kzg_verifier_accepts_and_rejects_proofs() { let mut rng = ark_std::rand::rngs::StdRng::seed_from_u64(test_rng().next_u64()); let poseidon_config = poseidon_canonical_config::(); - let transcript_p = &mut PoseidonTranscript::::new(&poseidon_config); - let transcript_v = &mut PoseidonTranscript::::new(&poseidon_config); + let transcript_p = &mut PoseidonSponge::::new(&poseidon_config); + let transcript_v = &mut PoseidonSponge::::new(&poseidon_config); - let (kzg_pk, kzg_vk, _, _, _) = setup(DEFAULT_SETUP_LEN); + let (_, kzg_pk, kzg_vk, _, _, _) = setup(DEFAULT_SETUP_LEN); let kzg_vk = KZG10VerifierKey::from((kzg_vk.clone(), kzg_pk.powers_of_g[0..3].to_vec())); let v: Vec = std::iter::repeat_with(|| Fr::rand(&mut rng)) @@ -159,7 +157,7 @@ mod tests { let (x_proof, y_proof) = proof_affine.xy().unwrap(); let y = proof.eval.into_bigint().to_bytes_be(); - transcript_v.absorb_point(&cm).unwrap(); + transcript_v.absorb_nonnative(&cm); let x = transcript_v.get_challenge(); let x = x.into_bigint().to_bytes_be(); diff --git a/solidity-verifiers/src/verifiers/mod.rs b/solidity-verifiers/src/verifiers/mod.rs index db283a4f..8a3bf0d6 100644 --- a/solidity-verifiers/src/verifiers/mod.rs +++ b/solidity-verifiers/src/verifiers/mod.rs @@ -97,6 +97,7 @@ pub mod tests { pub fn setup<'a>( n: usize, ) -> ( + Fr, // public params hash KZGProverKey<'a, G1>, KZGVerifierKey, ark_groth16::ProvingKey, @@ -115,6 +116,7 @@ pub mod tests { let (kzg_pk, kzg_vk): (KZGProverKey, KZGVerifierKey) = KZG::::setup(&mut rng, n).unwrap(); - (kzg_pk, kzg_vk, g16_pk, g16_vk, circuit) + let pp_hash = Fr::from(42u32); // only for test + (pp_hash, kzg_pk, kzg_vk, g16_pk, g16_vk, circuit) } } diff --git a/solidity-verifiers/src/verifiers/nova_cyclefold.rs b/solidity-verifiers/src/verifiers/nova_cyclefold.rs index 55c94993..afe7686a 100644 --- a/solidity-verifiers/src/verifiers/nova_cyclefold.rs +++ b/solidity-verifiers/src/verifiers/nova_cyclefold.rs @@ -1,9 +1,10 @@ #![allow(non_snake_case)] #![allow(non_camel_case_types)] +#![allow(clippy::upper_case_acronyms)] -use ark_bn254::{Bn254, Fq, G1Affine}; -use ark_groth16::VerifyingKey; -use ark_poly_commit::kzg10::VerifierKey; +use ark_bn254::{Bn254, Fq, Fr, G1Affine}; +use ark_groth16::VerifyingKey as ArkG16VerifierKey; +use ark_poly_commit::kzg10::VerifierKey as ArkKZG10VerifierKey; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use askama::Template; @@ -27,6 +28,7 @@ pub fn get_decider_template_for_cyclefold_decider( #[derive(Template, Default)] #[template(path = "nova_cyclefold_decider.askama.sol", ext = "sol")] pub struct NovaCycleFoldDecider { + pp_hash: Fr, // public params hash groth16_verifier: Groth16Verifier, kzg10_verifier: KZG10Verifier, // z_len denotes the FCircuit state (z_i) length @@ -42,6 +44,7 @@ impl From for NovaCycleFoldDecider { let public_inputs_len = groth16_verifier.gamma_abc_len; let bits_per_limb = NonNativeUintVar::::bits_per_limb(); Self { + pp_hash: value.pp_hash, groth16_verifier, kzg10_verifier: KZG10Verifier::from(value.kzg_vk), z_len: value.z_len, @@ -54,6 +57,7 @@ impl From for NovaCycleFoldDecider { #[derive(CanonicalDeserialize, CanonicalSerialize, PartialEq, Debug, Clone)] pub struct NovaCycleFoldVerifierKey { + pp_hash: Fr, g16_vk: Groth16VerifierKey, kzg_vk: KZG10VerifierKey, z_len: usize, @@ -73,39 +77,54 @@ impl ProtocolVerifierKey for NovaCycleFoldVerifierKey { } } -impl From<(Groth16VerifierKey, KZG10VerifierKey, usize)> for NovaCycleFoldVerifierKey { - fn from(value: (Groth16VerifierKey, KZG10VerifierKey, usize)) -> Self { +impl From<(Fr, Groth16VerifierKey, KZG10VerifierKey, usize)> for NovaCycleFoldVerifierKey { + fn from(value: (Fr, Groth16VerifierKey, KZG10VerifierKey, usize)) -> Self { Self { - g16_vk: value.0, - kzg_vk: value.1, - z_len: value.2, + pp_hash: value.0, + g16_vk: value.1, + kzg_vk: value.2, + z_len: value.3, } } } // implements From assuming that the 'batchCheck' method from the KZG10 template will not be used // in the NovaCycleFoldDecider verifier contract -impl From<(VerifyingKey, VerifierKey, usize)> for NovaCycleFoldVerifierKey { - fn from(value: (VerifyingKey, VerifierKey, usize)) -> Self { - let g16_vk = Groth16VerifierKey::from(value.0); +impl + From<( + (Fr, ArkG16VerifierKey, ArkKZG10VerifierKey), + usize, + )> for NovaCycleFoldVerifierKey +{ + fn from( + value: ( + (Fr, ArkG16VerifierKey, ArkKZG10VerifierKey), + usize, + ), + ) -> Self { + let decider_vp = value.0; + let g16_vk = Groth16VerifierKey::from(decider_vp.1); // pass `Vec::new()` since batchCheck will not be used - let kzg_vk = KZG10VerifierKey::from((value.1, Vec::new())); + let kzg_vk = KZG10VerifierKey::from((decider_vp.2, Vec::new())); Self { + pp_hash: decider_vp.0, g16_vk, kzg_vk, - z_len: value.2, + z_len: value.1, } } } impl NovaCycleFoldVerifierKey { pub fn new( - vkey_g16: VerifyingKey, - vkey_kzg: VerifierKey, + pp_hash: Fr, + vkey_g16: ArkG16VerifierKey, + vkey_kzg: ArkKZG10VerifierKey, crs_points: Vec, z_len: usize, ) -> Self { Self { + pp_hash, g16_vk: Groth16VerifierKey::from(vkey_g16), kzg_vk: KZG10VerifierKey::from((vkey_kzg, crs_points)), z_len, @@ -116,12 +135,9 @@ impl NovaCycleFoldVerifierKey { #[cfg(test)] mod tests { use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as G1}; - use ark_crypto_primitives::snark::SNARK; use ark_ff::PrimeField; - use ark_groth16::VerifyingKey as G16VerifierKey; - use ark_groth16::{Groth16, ProvingKey}; + use ark_groth16::Groth16; use ark_grumpkin::{constraints::GVar as GVar2, Projective as G2}; - use ark_poly_commit::kzg10::VerifierKey as KZGVerifierKey; use ark_r1cs_std::alloc::AllocVar; use ark_r1cs_std::fields::fp::FpVar; use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; @@ -131,15 +147,10 @@ mod tests { use std::time::Instant; use folding_schemes::{ - commitment::{ - kzg::{ProverKey as KZGProverKey, KZG}, - pedersen::Pedersen, - CommitmentScheme, - }, + commitment::{kzg::KZG, pedersen::Pedersen}, folding::nova::{ decider_eth::{prepare_calldata, Decider as DeciderEth}, - decider_eth_circuit::DeciderEthCircuit, - get_cs_params_len, Nova, ProverParams, + Nova, PreprocessorParam, }, frontend::FCircuit, transcript::poseidon::poseidon_canonical_config, @@ -155,6 +166,24 @@ mod tests { NovaCycleFoldVerifierKey, ProtocolVerifierKey, }; + type NOVA = Nova, Pedersen>; + type DECIDER = DeciderEth< + G1, + GVar, + G2, + GVar2, + FC, + KZG<'static, Bn254>, + Pedersen, + Groth16, + NOVA, + >; + + type FS_PP = as FoldingScheme>::ProverParam; + type FS_VP = as FoldingScheme>::VerifierParam; + type DECIDER_PP = as Decider>>::ProverParam; + type DECIDER_VP = as Decider>>::VerifierParam; + /// Test circuit to be folded #[derive(Clone, Copy, Debug)] pub struct CubicFCircuit { @@ -255,10 +284,10 @@ mod tests { #[test] fn nova_cyclefold_vk_serde_roundtrip() { - let (_, kzg_vk, _, g16_vk, _) = setup(DEFAULT_SETUP_LEN); + let (pp_hash, _, kzg_vk, _, g16_vk, _) = setup(DEFAULT_SETUP_LEN); let mut bytes = vec![]; - let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from((g16_vk, kzg_vk, 1)); + let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from(((pp_hash, g16_vk, kzg_vk), 1)); nova_cyclefold_vk .serialize_protocol_verifier_key(&mut bytes) @@ -271,8 +300,8 @@ mod tests { #[test] fn nova_cyclefold_decider_template_renders() { - let (_, kzg_vk, _, g16_vk, _) = setup(DEFAULT_SETUP_LEN); - let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from((g16_vk, kzg_vk, 1)); + let (pp_hash, _, kzg_vk, _, g16_vk, _) = setup(DEFAULT_SETUP_LEN); + let nova_cyclefold_vk = NovaCycleFoldVerifierKey::from(((pp_hash, g16_vk, kzg_vk), 1)); let decider_solidity_code = HeaderInclusion::::builder() .template(nova_cyclefold_vk) @@ -281,59 +310,29 @@ mod tests { save_solidity("NovaDecider.sol", &decider_solidity_code.render().unwrap()); } - #[allow(clippy::type_complexity)] - fn init_test_prover_params>() -> ( - ProverParams, Pedersen>, - KZGVerifierKey, - ) { - let mut rng = ark_std::test_rng(); - let poseidon_config = poseidon_canonical_config::(); - let f_circuit = FC::new(()).unwrap(); - let (cs_len, cf_cs_len) = - get_cs_params_len::(&poseidon_config, f_circuit).unwrap(); - let (kzg_pk, kzg_vk): (KZGProverKey, KZGVerifierKey) = - KZG::::setup(&mut rng, cs_len).unwrap(); - let (cf_pedersen_params, _) = Pedersen::::setup(&mut rng, cf_cs_len).unwrap(); - let fs_prover_params = ProverParams::, Pedersen> { - poseidon_config: poseidon_config.clone(), - cs_params: kzg_pk.clone(), - cf_cs_params: cf_pedersen_params, - }; - (fs_prover_params, kzg_vk) - } - /// Initializes Nova parameters and DeciderEth parameters. Only for test purposes. #[allow(clippy::type_complexity)] - fn init_params>() -> ( - ProverParams, Pedersen>, - KZGVerifierKey, - ProvingKey, - G16VerifierKey, - ) { + fn init_params>( + ) -> ((FS_PP, FS_VP), (DECIDER_PP, DECIDER_VP)) { let mut rng = rand::rngs::OsRng; - let start = Instant::now(); - let (fs_prover_params, kzg_vk) = init_test_prover_params::(); - println!("generated Nova folding params: {:?}", start.elapsed()); - let f_circuit = FC::new(()).unwrap(); - - pub type NOVA_FCircuit = - Nova, Pedersen>; - let z_0 = vec![Fr::zero(); f_circuit.state_len()]; - let nova = NOVA_FCircuit::init(&fs_prover_params, f_circuit, z_0.clone()).unwrap(); + let poseidon_config = poseidon_canonical_config::(); - let decider_circuit = - DeciderEthCircuit::, Pedersen>::from_nova::( - nova.clone(), - ) - .unwrap(); - let start = Instant::now(); - let (g16_pk, g16_vk) = - Groth16::::circuit_specific_setup(decider_circuit.clone(), &mut rng).unwrap(); - println!( - "generated G16 (Decider circuit) params: {:?}", - start.elapsed() + let f_circuit = FC::new(()).unwrap(); + let prep_param = PreprocessorParam::, Pedersen>::new( + poseidon_config, + f_circuit.clone(), ); - (fs_prover_params, kzg_vk, g16_pk, g16_vk) + let nova_params = NOVA::preprocess(&mut rng, &prep_param).unwrap(); + let nova = NOVA::init( + &nova_params, + f_circuit.clone(), + vec![Fr::zero(); f_circuit.state_len()].clone(), + ) + .unwrap(); + let decider_params = + DECIDER::preprocess(&mut rng, &nova_params.clone(), nova.clone()).unwrap(); + + (nova_params, decider_params) } /// This function allows to define which FCircuit to use for the test, and how many prove_step @@ -345,52 +344,31 @@ mod tests { /// - modifies the z_0 and checks that it does not pass the EVM check #[allow(clippy::type_complexity)] fn nova_cyclefold_solidity_verifier_opt>( - params: ( - ProverParams, Pedersen>, - KZGVerifierKey, - ProvingKey, - G16VerifierKey, - ), + fs_params: (FS_PP, FS_VP), + decider_params: (DECIDER_PP, DECIDER_VP), z_0: Vec, n_steps: usize, ) { - let (fs_prover_params, kzg_vk, g16_pk, g16_vk) = params.clone(); - - pub type NOVA_FCircuit = - Nova, Pedersen>; - pub type DECIDERETH_FCircuit = DeciderEth< - G1, - GVar, - G2, - GVar2, - FC, - KZG<'static, Bn254>, - Pedersen, - Groth16, - NOVA_FCircuit, - >; + let (decider_pp, decider_vp) = decider_params; + let f_circuit = FC::new(()).unwrap(); let nova_cyclefold_vk = - NovaCycleFoldVerifierKey::from((g16_vk.clone(), kzg_vk.clone(), f_circuit.state_len())); + NovaCycleFoldVerifierKey::from((decider_vp.clone(), f_circuit.state_len())); + + let mut rng = rand::rngs::OsRng; - let mut nova = NOVA_FCircuit::init(&fs_prover_params, f_circuit, z_0).unwrap(); + let mut nova = NOVA::::init(&fs_params, f_circuit, z_0).unwrap(); for _ in 0..n_steps { - nova.prove_step(vec![]).unwrap(); + nova.prove_step(&mut rng, vec![], None).unwrap(); } - let rng = rand::rngs::OsRng; let start = Instant::now(); - let proof = DECIDERETH_FCircuit::prove( - (g16_pk, fs_prover_params.cs_params.clone()), - rng, - nova.clone(), - ) - .unwrap(); + let proof = DECIDER::::prove(rng, decider_pp, nova.clone()).unwrap(); println!("generated Decider proof: {:?}", start.elapsed()); - let verified = DECIDERETH_FCircuit::::verify( - (g16_vk, kzg_vk), + let verified = DECIDER::::verify( + decider_vp, nova.i, nova.z_0.clone(), nova.z_i.clone(), @@ -447,12 +425,22 @@ mod tests { #[test] fn nova_cyclefold_solidity_verifier() { - let params = init_params::>(); + let (nova_params, decider_params) = init_params::>(); let z_0 = vec![Fr::from(3_u32)]; - nova_cyclefold_solidity_verifier_opt::>(params.clone(), z_0.clone(), 2); - nova_cyclefold_solidity_verifier_opt::>(params.clone(), z_0.clone(), 3); + nova_cyclefold_solidity_verifier_opt::>( + nova_params.clone(), + decider_params.clone(), + z_0.clone(), + 2, + ); + nova_cyclefold_solidity_verifier_opt::>( + nova_params, + decider_params, + z_0, + 3, + ); - let params = init_params::>(); + let (nova_params, decider_params) = init_params::>(); let z_0 = vec![ Fr::from(1_u32), Fr::from(1_u32), @@ -461,12 +449,14 @@ mod tests { Fr::from(1_u32), ]; nova_cyclefold_solidity_verifier_opt::>( - params.clone(), + nova_params.clone(), + decider_params.clone(), z_0.clone(), 2, ); nova_cyclefold_solidity_verifier_opt::>( - params.clone(), + nova_params, + decider_params, z_0.clone(), 3, ); diff --git a/solidity-verifiers/templates/nova_cyclefold_decider.askama.sol b/solidity-verifiers/templates/nova_cyclefold_decider.askama.sol index 7f15957b..a82893a1 100644 --- a/solidity-verifiers/templates/nova_cyclefold_decider.askama.sol +++ b/solidity-verifiers/templates/nova_cyclefold_decider.askama.sol @@ -78,10 +78,11 @@ contract NovaDecider is Groth16Verifier, KZG10Verifier { // from gamma_abc_len, we subtract 1. uint256[{{ public_inputs_len - 1 }}] memory public_inputs; - public_inputs[0] = i_z0_zi[0]; + public_inputs[0] = {{pp_hash}}; + public_inputs[1] = i_z0_zi[0]; for (uint i = 0; i < {{ z_len * 2 }}; i++) { - public_inputs[1 + i] = i_z0_zi[1 + i]; + public_inputs[2 + i] = i_z0_zi[1 + i]; } { @@ -91,9 +92,9 @@ contract NovaDecider is Groth16Verifier, KZG10Verifier { uint256 x0 = rlc(U_i_x_u_i_cmW[0], U_i_u_u_i_u_r[2], u_i_x_cmT[0]); uint256 x1 = rlc(U_i_x_u_i_cmW[1], U_i_u_u_i_u_r[2], u_i_x_cmT[1]); - public_inputs[{{ z_len * 2 + 1 }}] = u; - public_inputs[{{ z_len * 2 + 2 }}] = x0; - public_inputs[{{ z_len * 2 + 3 }}] = x1; + public_inputs[{{ z_len * 2 + 2 }}] = u; + public_inputs[{{ z_len * 2 + 3 }}] = x0; + public_inputs[{{ z_len * 2 + 4 }}] = x1; } { @@ -106,8 +107,8 @@ contract NovaDecider is Groth16Verifier, KZG10Verifier { uint256[{{num_limbs}}] memory cmE_y_limbs = LimbsDecomposition.decompose(cmE[1]); for (uint8 k = 0; k < {{num_limbs}}; k++) { - public_inputs[{{ z_len * 2 + 4 }} + k] = cmE_x_limbs[k]; - public_inputs[{{ z_len * 2 + 4 + num_limbs }} + k] = cmE_y_limbs[k]; + public_inputs[{{ z_len * 2 + 5 }} + k] = cmE_x_limbs[k]; + public_inputs[{{ z_len * 2 + 5 + num_limbs }} + k] = cmE_y_limbs[k]; } } @@ -124,8 +125,8 @@ contract NovaDecider is Groth16Verifier, KZG10Verifier { uint256[{{num_limbs}}] memory cmW_y_limbs = LimbsDecomposition.decompose(cmW[1]); for (uint8 k = 0; k < {{num_limbs}}; k++) { - public_inputs[{{ z_len * 2 + 4 + num_limbs * 2 }} + k] = cmW_x_limbs[k]; - public_inputs[{{ z_len * 2 + 4 + num_limbs * 3 }} + k] = cmW_y_limbs[k]; + public_inputs[{{ z_len * 2 + 5 + num_limbs * 2 }} + k] = cmW_x_limbs[k]; + public_inputs[{{ z_len * 2 + 5 + num_limbs * 3 }} + k] = cmW_y_limbs[k]; } } @@ -134,10 +135,10 @@ contract NovaDecider is Groth16Verifier, KZG10Verifier { { // add challenges - public_inputs[{{ z_len * 2 + 4 + num_limbs * 4 }}] = challenge_W_challenge_E_kzg_evals[0]; - public_inputs[{{ z_len * 2 + 4 + num_limbs * 4 + 1 }}] = challenge_W_challenge_E_kzg_evals[1]; - public_inputs[{{ z_len * 2 + 4 + num_limbs * 4 + 2 }}] = challenge_W_challenge_E_kzg_evals[2]; - public_inputs[{{ z_len * 2 + 4 + num_limbs * 4 + 3 }}] = challenge_W_challenge_E_kzg_evals[3]; + public_inputs[{{ z_len * 2 + 5 + num_limbs * 4 }}] = challenge_W_challenge_E_kzg_evals[0]; + public_inputs[{{ z_len * 2 + 5 + num_limbs * 4 + 1 }}] = challenge_W_challenge_E_kzg_evals[1]; + public_inputs[{{ z_len * 2 + 5 + num_limbs * 4 + 2 }}] = challenge_W_challenge_E_kzg_evals[2]; + public_inputs[{{ z_len * 2 + 5 + num_limbs * 4 + 3 }}] = challenge_W_challenge_E_kzg_evals[3]; uint256[{{num_limbs}}] memory cmT_x_limbs; uint256[{{num_limbs}}] memory cmT_y_limbs; @@ -146,8 +147,8 @@ contract NovaDecider is Groth16Verifier, KZG10Verifier { cmT_y_limbs = LimbsDecomposition.decompose(u_i_x_cmT[3]); for (uint8 k = 0; k < {{num_limbs}}; k++) { - public_inputs[{{ z_len * 2 + 4 + num_limbs * 4 }} + 4 + k] = cmT_x_limbs[k]; - public_inputs[{{ z_len * 2 + 4 + num_limbs * 5}} + 4 + k] = cmT_y_limbs[k]; + public_inputs[{{ z_len * 2 + 5 + num_limbs * 4 }} + 4 + k] = cmT_x_limbs[k]; + public_inputs[{{ z_len * 2 + 5 + num_limbs * 5}} + 4 + k] = cmT_y_limbs[k]; } // last element of the groth16 proof's public inputs is `r` From eb8c6799f7a5c03b57a755929e70f040d9390984 Mon Sep 17 00:00:00 2001 From: Nick Dimitriou Date: Fri, 26 Jul 2024 11:22:54 +0300 Subject: [PATCH 2/4] Cleaning up --- examples/bench_utils.rs | 248 -------- examples/mova.rs | 135 ----- examples/nova.rs | 106 ---- folding-schemes/src/folding/mova/mod.rs | 711 ----------------------- folding-schemes/src/folding/mova/nifs.rs | 356 ------------ 5 files changed, 1556 deletions(-) delete mode 100644 examples/bench_utils.rs delete mode 100644 examples/mova.rs delete mode 100644 examples/nova.rs diff --git a/examples/bench_utils.rs b/examples/bench_utils.rs deleted file mode 100644 index aeba01dc..00000000 --- a/examples/bench_utils.rs +++ /dev/null @@ -1,248 +0,0 @@ -use ark_ff::{BigInteger, PrimeField}; -use folding_schemes::ccs::r1cs::R1CS; -use folding_schemes::utils::vec::{dense_matrix_to_sparse, SparseMatrix}; -use folding_schemes::Error; -use num_bigint::BigUint; -use num_traits::One; -use rand::Rng; - -// would be best to move this to other file -// pub fn get_test_r1cs() -> R1CS { -// // R1CS for: x^3 + x + 5 = y (example from article -// // https://www.vitalik.ca/general/2016/12/10/qap.html ) -// let A = to_F_matrix::(vec![ -// vec![0, 1, 0, 0, 0, 0], -// vec![0, 0, 0, 1, 0, 0], -// vec![0, 1, 0, 0, 1, 0], -// vec![5, 0, 0, 0, 0, 1], -// ]); -// let B = to_F_matrix::(vec![ -// vec![0, 1, 0, 0, 0, 0], -// vec![0, 1, 0, 0, 0, 0], -// vec![1, 0, 0, 0, 0, 0], -// vec![1, 0, 0, 0, 0, 0], -// ]); -// let C = to_F_matrix::(vec![ -// vec![0, 0, 0, 1, 0, 0], -// vec![0, 0, 0, 0, 1, 0], -// vec![0, 0, 0, 0, 0, 1], -// vec![0, 0, 1, 0, 0, 0], -// ]); -// -// R1CS:: { l: 1, A, B, C } -// } -const POWER: usize = 16; -fn create_large_diagonal_matrix() -> SparseMatrix { - let size = 1 << POWER; - let mut coeffs: Vec> = Vec::with_capacity(size); - - // Populate the diagonal elements - for i in 0..size { - // Each row has one non-zero entry at (i, i) with a value of 2 - coeffs.push(vec![(F::from(1u64), i)]); - } - - // Instantiate SparseMatrix directly - SparseMatrix { - n_rows: size, - n_cols: size, - coeffs - } -} - -pub fn get_test_r1cs() -> R1CS { - // Define matrices A, B, and C as specified - let A = create_large_diagonal_matrix::(); - let B = create_large_diagonal_matrix::(); - let C = create_large_diagonal_matrix::(); - - // let A = create_large_diagonal_matrix_2::(); - // let B = create_large_diagonal_matrix_2::(); - // let C = create_large_diagonal_matrix_2::(); - - // println!("A: {:?} {:?}", A.coeffs, A.n_rows); - // println!("B: {:?} {:?}", B.coeffs, B.n_rows); - // println!("C: {:?} {:?}", C.coeffs, C.n_rows); - - // Return the R1CS structure - R1CS { l: 1, A, B, C } -} - -// pub fn get_test_z(input: BigUint) -> Vec { -// let one = BigUint::one(); -// let five = &one + &one + &one + &one + &one; -// to_F_vec_2(vec![ -// one, // 1 -// input.clone(), // io -// &input * &input * &input + &input + &five, // x^3 + x + 5 -// &input * &input, // x^2 -// &input * &input * &input, // x^3 -// &input * &input * &input + &input, -// &input * &input * &input + &input, // x^3 + x -// &input * &input * &input + &input, // x^3 + x -// // x^3 + x -// ]) -// } - -pub fn get_test_z(input: BigUint) -> Vec { - let one = BigUint::one(); - let five = &one + &one + &one + &one + &one; - let size = 1 << POWER; // Calculate size only once - let mut z_vec = Vec::with_capacity(size); // Preallocate memory for efficiency - - // Add initial elements - z_vec.push(one); // 1 - z_vec.push(input.clone()); // input - - // Pre-compute input cubed since it does not change in the loop - let input_cubed = &input * &input * &input; - - // Fill the rest of the vector - z_vec.extend(std::iter::repeat(input_cubed).take(size - 2)); - to_F_vec_2(z_vec) -} - -pub fn get_test_z_albert(input: BigUint) -> Vec { - // let one = BigUint::one(); - // let five = &one + &one + &one + &one + &one; - // let size = 1 << POWER; // Calculate size only once - // let mut z_vec = Vec::with_capacity(size); // Preallocate memory for // efficiency -// - // // Add initial elements - // // z_vec.push(one); // 1 - // z_vec.push(input.clone()); // input -// - // // Pre-compute input cubed since it does not change in the loop - // let input_cubed = &input * &input * &input; -// - // // Fill the rest of the vector - // // z_vec.extend(std::iter::repeat(input_cubed).take(size - 2)); - // z_vec.extend((0..size - 1).map(|i| &input_cubed * (i as u32) + i*i*i*i + i*i + i*i*i)); - - - let z_vec = create_random_biguints(1< Vec { - let mut rng = rand::thread_rng(); - - (0..count) - .map(|_| { - let bit_size = rng.gen_range(1..=max_bits); - let mut bytes = vec![0u8; (bit_size as usize + 7) / 8]; - rng.fill(&mut bytes[..]); - BigUint::from_bytes_le(&bytes) % (BigUint::from(1u32) << bit_size) - }) - .collect() -} - -pub fn to_F_matrix(M: Vec>) -> SparseMatrix { - dense_matrix_to_sparse(to_F_dense_matrix(M)) -} -pub fn to_F_dense_matrix(M: Vec>) -> Vec> { - M.iter() - .map(|m| m.iter().map(|r| F::from(*r as u64)).collect()) - .collect() -} -pub fn to_F_vec(z: Vec) -> Vec { - let mut result = Vec::with_capacity(z.len()); // Pre-allocate space for efficiency - for bigint in z { - // Convert each BigUint to F::BigInt - // match F::try_from(bigint) { - match num_bigint_to_ark_bigint::(&bigint) { - Ok(f_bigint) => { - // Attempt to convert F::BigInt to the prime field element - if let Some(field_element) = F::from_bigint(f_bigint) { - result.push(field_element); - } else { - // Handle the case where the conversion is not possible - eprintln!( - "Conversion to field element failed for bigint: {:?}", - f_bigint - ); - continue; // Optionally skip or handle differently - } - // result.push(f_bigint); - } - Err(e) => { - // Handle errors from bigint conversion - eprintln!("Error converting bigint: {:?}", e); - continue; // Optionally skip or handle differently - } - } - } - result -} -pub fn to_F_vec_2(z: Vec) -> Vec { - let mut result = Vec::with_capacity(z.len()); // Pre-allocate space for efficiency - for bigint in z { - // Convert each BigUint to F::BigInt - match F::try_from(bigint) { - // match num_bigint_to_ark_bigint::(&bigint) { - Ok(f_bigint) => { - // // Attempt to convert F::BigInt to the prime field element - // if let Some(field_element) = F::from_bigint(f_bigint) { - // result.push(field_element); - // } else { - // // Handle the case where the conversion is not possible - // eprintln!("Conversion to field element failed for bigint: {:?}", f_bigint); - // continue; // Optionally skip or handle differently - // } - result.push(f_bigint); - } - Err(e) => { - // Handle errors from bigint conversion - eprintln!("Error converting bigint: {:?}", e); - continue; // Optionally skip or handle differently - } - } - } - result -} - -pub fn num_bigint_to_ark_bigint(value: &BigUint) -> Result { - F::BigInt::try_from(value.clone()).map_err(|_| { - Error::BigIntConversionError("Failed to convert to PrimeField::BigInt".to_string()) - }) -} - -// pub const BIG_NUM: BigUint = BigUint::one() << 80; -// pub const FOUR: BigUint = BigUint::one() + BigUint::one() + BigUint::one() + BigUint::one(); - -fn create_large_diagonal_matrix_2() -> SparseMatrix { - let size = 1 << POWER; // 2^16 - let mut matrix = vec![vec![0; size]; size]; - - for i in 0..size { - matrix[i][i] = 2; // Set diagonal elements to 2 - } - - // println!("{:?}", matrix); - to_F_matrix(matrix) -} -// -// pub fn get_test_r1cs() -> R1CS { -// // Define matrices A, B, and C as specified -// let A = create_large_diagonal_matrix::(); -// let B = create_large_diagonal_matrix::(); -// let C = create_large_diagonal_matrix::(); -// println!("A: {:?} {:?}", A.n_cols, A.n_rows); -// println!("B: {:?} {:?}", B.n_cols, B.n_rows); -// println!("C: {:?} {:?}", C.n_cols, C.n_rows); -// -// // Return the R1CS structure -// R1CS:: { l: 1, A, B, C } -// } -// -// pub fn to_F_matrix(M: Vec>) -> SparseMatrix { -// dense_matrix_to_sparse(to_F_dense_matrix(M)) -// } -// pub fn to_F_dense_matrix(M: Vec>) -> Vec> { -// M.iter() -// .map(|m| m.iter().map(|r| F::from(*r as u64)).collect()) -// .collect() -// } \ No newline at end of file diff --git a/examples/mova.rs b/examples/mova.rs deleted file mode 100644 index 7bf76369..00000000 --- a/examples/mova.rs +++ /dev/null @@ -1,135 +0,0 @@ -use ark_ff::PrimeField; -use ark_pallas::{Fr, Projective}; -use ark_std::log2; -use ark_std::UniformRand; -use folding_schemes::ccs::r1cs::R1CS; -use folding_schemes::commitment::pedersen::Pedersen; -use folding_schemes::commitment::CommitmentScheme; -use folding_schemes::folding::mova::homogenization::{PointVsLineHomogenization, SumCheckHomogenization}; -use folding_schemes::folding::mova::nifs::NIFS; -use folding_schemes::folding::mova::Witness; -use folding_schemes::transcript::poseidon::{poseidon_canonical_config, PoseidonTranscript}; -use folding_schemes::transcript::Transcript; -use num_bigint::{ BigUint, RandBigInt}; -use rand::Rng; -use std::mem::size_of_val; -use std::time::Instant; - -use crate::bench_utils::{get_test_r1cs, get_test_z_albert}; -use ark_ff::BigInteger; -use folding_schemes::folding::mova::traits::MovaR1CS; -use num_traits::{One, Zero}; - -mod bench_utils; - -fn main() { - println!("starting"); - let size = 2usize.pow(16); - - - - // define r1cs and parameters - let r1cs: R1CS = get_test_r1cs(); - let mut rng = ark_std::test_rng(); - let (pedersen_params, _) = Pedersen::::setup(&mut rng, r1cs.A.n_cols).unwrap(); - let poseidon_config = poseidon_canonical_config::(); - let mut transcript_p = PoseidonTranscript::::new(&poseidon_config); - - // let big_number: BigUint = One::one() ; // This creates a 250-bit number. - - // // INSTANCE 1 - let big_num: BigUint = BigUint::one() << 100; - - println!("Big_Num {:?}", big_num); - - let z_2: Vec = get_test_z_albert(big_num.clone()); - - // println!("Z Instance {:?}", z_2); - - let (w_1, x_1) = r1cs.split_z(&z_2); - - let mut witness_1 = Witness::::new(w_1.clone(), r1cs.A.n_rows); - let vector = vec![1; size]; - - witness_1.E = vector.into_iter().map(|x| Fr::from(x)).collect(); - - // witness_1.E = vec![1, 2, 3, 4].into_iter().map(|x| Fr::from(x)).collect(); - - - - // generate a random evaluation point for MLE - let size_rE_1 = log2(witness_1.E.len()); - let rE_1: Vec<_> = (0..size_rE_1).map(|_| Fr::rand(&mut rng)).collect(); - - let committed_instance_1 = witness_1 - .commit::>(&pedersen_params, x_1, rE_1) - .unwrap(); - - // INSTANCE 2 - let four = BigUint::one() + BigUint::one() + BigUint::one() + BigUint::one(); - - let z_2 = get_test_z_albert(four); - let (w_2, x_2) = r1cs.split_z(&z_2); - let mut witness_2 = Witness::::new(w_2.clone(), r1cs.A.n_rows); - // - // // - // witness_2.E = vec![5, 6, 7, 8].into_iter().map(|x| Fr::from(x)).collect(); - - let vector = vec![2; size]; - // - witness_2.E = vector.into_iter().map(|x| Fr::from(x)).collect(); - - let size_rE_2 = log2(witness_2.E.len()); - let rE_2: Vec<_> = (0..size_rE_2).map(|_| Fr::rand(&mut rng)).collect(); - - let mut committed_instance_2 = witness_2 - .commit::>(&pedersen_params, x_2, rE_2) - .unwrap(); - - let start = Instant::now(); - // NIFS.P - let result = NIFS::< - Projective, - Pedersen, - PoseidonTranscript, - PointVsLineHomogenization> - >::prove( - &pedersen_params, - &r1cs, - &mut transcript_p, - &committed_instance_1, - &committed_instance_2, - &witness_1, - &witness_2, - ) - .unwrap(); - - println!( - "Mova prove time (point-vs-line variant) {:?}", - start.elapsed() - ); - println!("Mova bytes used {:?}", size_of_val(&result)); - - //NIFS.V - let poseidon_config = poseidon_canonical_config::(); - let mut transcript_p = PoseidonTranscript::::new(&poseidon_config); - let (proof, instance_witness) = result; - - let folded_committed_instance = NIFS::< - Projective, - Pedersen, - PoseidonTranscript, - PointVsLineHomogenization>, - >::verify( - &mut transcript_p, - &committed_instance_1, - &committed_instance_2, - &proof, - ) - .unwrap(); - let check = r1cs.check_relaxed_instance_relation(&instance_witness.w, &folded_committed_instance); - match check { - Ok(_) => println!("The relation check was successful."), - Err(e) => println!("The relation check failed: {:?}", e), - } -} diff --git a/examples/nova.rs b/examples/nova.rs deleted file mode 100644 index 05790e7a..00000000 --- a/examples/nova.rs +++ /dev/null @@ -1,106 +0,0 @@ -use crate::bench_utils::{get_test_r1cs, get_test_z_albert}; -use ark_ff::{ BigInteger, Field, PrimeField}; -use ark_pallas::{Fr, Projective}; -use ark_std::{log2, UniformRand}; -use folding_schemes::commitment::pedersen::Pedersen; -use folding_schemes::commitment::CommitmentScheme; -use folding_schemes::folding::nova::nifs::NIFS; -use folding_schemes::folding::nova::traits::NovaR1CS; -use folding_schemes::folding::nova::Witness; -use folding_schemes::transcript::poseidon::{poseidon_canonical_config, PoseidonTranscript}; -use folding_schemes::transcript::Transcript; -use folding_schemes::utils::sum_check::{ SumCheck}; -use num_bigint::BigUint; -use num_traits::{One, Zero}; -use rand::Rng; -use std::mem::size_of_val; -use std::time::Instant; - -mod bench_utils; - -fn main() { - println!("starting"); - - let big_num = BigUint::one() << 100; - let r1cs = get_test_r1cs(); - - let z = get_test_z_albert(big_num); - - let (w, x) = r1cs.split_z(&z); - - let mut rng = ark_std::test_rng(); - let (pedersen_params, _) = Pedersen::::setup(&mut rng, r1cs.A.n_cols).unwrap(); - - let running_instance_w = Witness::::new(w.clone(), r1cs.A.n_rows); - let running_committed_instance = running_instance_w - .commit::>(&pedersen_params, x) - .unwrap(); - - let four = BigUint::one() + BigUint::one() + BigUint::one() + BigUint::one(); - let incoming_instance_z = get_test_z_albert(four); - let (w, x) = r1cs.split_z(&incoming_instance_z); - let incoming_instance_w = Witness::::new(w.clone(), r1cs.A.n_rows); - let incoming_committed_instance = incoming_instance_w - .commit::>(&pedersen_params, x) - .unwrap(); - - let poseidon_config = poseidon_canonical_config::(); - let mut transcript_p = PoseidonTranscript::::new(&poseidon_config); - // let vector = vec![1; size]; - // // - // witness_1.E = vector.into_iter().map(|x| Fr::from(x)).collect(); - // - // let vector = vec![2; size]; - // // - // witness_2.E = vector.into_iter().map(|x| Fr::from(x)).collect(); - // NIFS.P - let start = Instant::now(); - - let (T, cmT) = NIFS::>::compute_cmT( - &pedersen_params, - &r1cs, - &running_instance_w, - &running_committed_instance, - &incoming_instance_w, - &incoming_committed_instance, - ) - .unwrap(); - - match transcript_p.absorb_point(&cmT) { - Ok(_) => { - // - } - Err(e) => { - println!("Absorbed failed: {:?}", e); - } - } - - let r = transcript_p.get_challenge(); - let result = NIFS::>::fold_instances( - r, - &running_instance_w, - &running_committed_instance, - &incoming_instance_w, - &incoming_committed_instance, - &T, - cmT, - ) - .unwrap(); - - println!("Nova prove time {:?}", start.elapsed()); - println!("Nova bytes used {:?}", size_of_val(&result)); - - let (folded_w, _) = result; - - let folded_committed_instance = NIFS::>::verify( - r, - &running_committed_instance, - &incoming_committed_instance, - &cmT, - ); - let check = r1cs.check_relaxed_instance_relation(&folded_w, &folded_committed_instance); - match check { - Ok(_) => println!("The relation check was successful."), - Err(e) => println!("The relation check failed: {:?}", e), - } -} diff --git a/folding-schemes/src/folding/mova/mod.rs b/folding-schemes/src/folding/mova/mod.rs index 0e1b4f71..796cfa10 100644 --- a/folding-schemes/src/folding/mova/mod.rs +++ b/folding-schemes/src/folding/mova/mod.rs @@ -167,714 +167,3 @@ where } } -// #[derive(Debug, Clone)] -// pub struct ProverParams -// where -// C1: CurveGroup, -// C2: CurveGroup, -// CS1: CommitmentScheme, -// CS2: CommitmentScheme, -// { -// pub poseidon_config: PoseidonConfig, -// pub cs_params: CS1::ProverParams, -// pub cf_cs_params: CS2::ProverParams, -// } - -// #[derive(Debug, Clone)] -// pub struct VerifierParams { -// pub poseidon_config: PoseidonConfig, -// pub r1cs: R1CS, -// pub cf_r1cs: R1CS, -// } - -// /// Implements Nova+CycleFold's IVC, described in [Nova](https://eprint.iacr.org/2021/370.pdf) and -// /// [CycleFold](https://eprint.iacr.org/2023/1192.pdf), following the FoldingScheme trait -// #[derive(Clone, Debug)] -// pub struct Nova -// where -// C1: CurveGroup, -// GC1: CurveVar> + ToConstraintFieldGadget>, -// C2: CurveGroup, -// GC2: CurveVar>, -// FC: FCircuit, -// CS1: CommitmentScheme, -// CS2: CommitmentScheme, -// { -// _gc1: PhantomData, -// _c2: PhantomData, -// _gc2: PhantomData, -// /// R1CS of the Augmented Function circuit -// pub r1cs: R1CS, -// /// R1CS of the CycleFold circuit -// pub cf_r1cs: R1CS, -// pub poseidon_config: PoseidonConfig, -// /// CommitmentScheme::ProverParams over C1 -// pub cs_params: CS1::ProverParams, -// /// CycleFold CommitmentScheme::ProverParams, over C2 -// pub cf_cs_params: CS2::ProverParams, -// /// F circuit, the circuit that is being folded -// pub F: FC, -// pub i: C1::ScalarField, -// /// initial state -// pub z_0: Vec, -// /// current i-th state -// pub z_i: Vec, -// /// Nova instances -// pub w_i: Witness, -// pub u_i: CommittedInstance, -// pub W_i: Witness, -// pub U_i: CommittedInstance, - -// /// CycleFold running instance -// pub cf_W_i: Witness, -// pub cf_U_i: CommittedInstance, -// } - -// impl FoldingScheme -// for Nova -// where -// C1: CurveGroup, -// GC1: CurveVar> + ToConstraintFieldGadget>, -// C2: CurveGroup, -// GC2: CurveVar> + ToConstraintFieldGadget>, -// FC: FCircuit, -// CS1: CommitmentScheme, -// CS2: CommitmentScheme, -// ::BaseField: PrimeField, -// ::BaseField: PrimeField, -// ::ScalarField: Absorb, -// ::ScalarField: Absorb, -// C1: CurveGroup, -// for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>, -// for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, -// { -// type PreprocessorParam = (Self::ProverParam, FC); -// type ProverParam = ProverParams; -// type VerifierParam = VerifierParams; -// type CommittedInstanceWithWitness = (CommittedInstance, Witness); -// type CFCommittedInstanceWithWitness = (CommittedInstance, Witness); - -// fn preprocess( -// prep_param: &Self::PreprocessorParam, -// ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { -// let (prover_params, F_circuit) = prep_param; - -// let (r1cs, cf_r1cs) = -// get_r1cs::(&prover_params.poseidon_config, F_circuit.clone())?; - -// let verifier_params = VerifierParams:: { -// poseidon_config: prover_params.poseidon_config.clone(), -// r1cs, -// cf_r1cs, -// }; -// Ok((prover_params.clone(), verifier_params)) -// } - -// /// Initializes the Nova+CycleFold's IVC for the given parameters and initial state `z_0`. -// fn init(pp: &Self::ProverParam, F: FC, z_0: Vec) -> Result { -// // prepare the circuit to obtain its R1CS -// let cs = ConstraintSystem::::new_ref(); -// let cs2 = ConstraintSystem::::new_ref(); - -// let augmented_F_circuit = -// AugmentedFCircuit::::empty(&pp.poseidon_config, F.clone()); -// let cf_circuit = CycleFoldCircuit::::empty(); - -// augmented_F_circuit.generate_constraints(cs.clone())?; -// cs.finalize(); -// let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; -// let r1cs = extract_r1cs::(&cs); - -// cf_circuit.generate_constraints(cs2.clone())?; -// cs2.finalize(); -// let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; -// let cf_r1cs = extract_r1cs::(&cs2); - -// // setup the dummy instances -// let (w_dummy, u_dummy) = r1cs.dummy_instance(); -// let (cf_w_dummy, cf_u_dummy) = cf_r1cs.dummy_instance(); - -// // W_dummy=W_0 is a 'dummy witness', all zeroes, but with the size corresponding to the -// // R1CS that we're working with. -// Ok(Self { -// _gc1: PhantomData, -// _c2: PhantomData, -// _gc2: PhantomData, -// r1cs, -// cf_r1cs, -// poseidon_config: pp.poseidon_config.clone(), -// cs_params: pp.cs_params.clone(), -// cf_cs_params: pp.cf_cs_params.clone(), -// F, -// i: C1::ScalarField::zero(), -// z_0: z_0.clone(), -// z_i: z_0, -// w_i: w_dummy.clone(), -// u_i: u_dummy.clone(), -// W_i: w_dummy, -// U_i: u_dummy, -// // cyclefold running instance -// cf_W_i: cf_w_dummy.clone(), -// cf_U_i: cf_u_dummy.clone(), -// }) -// } - -// /// Implements IVC.P of Nova+CycleFold -// fn prove_step(&mut self, external_inputs: Vec) -> Result<(), Error> { -// let augmented_F_circuit: AugmentedFCircuit; - -// if self.z_i.len() != self.F.state_len() { -// return Err(Error::NotSameLength( -// "z_i.len()".to_string(), -// self.z_i.len(), -// "F.state_len()".to_string(), -// self.F.state_len(), -// )); -// } -// if external_inputs.len() != self.F.external_inputs_len() { -// return Err(Error::NotSameLength( -// "F.external_inputs_len()".to_string(), -// self.F.external_inputs_len(), -// "external_inputs.len()".to_string(), -// external_inputs.len(), -// )); -// } - -// if self.i > C1::ScalarField::from_le_bytes_mod_order(&usize::MAX.to_le_bytes()) { -// return Err(Error::MaxStep); -// } -// let mut i_bytes: [u8; 8] = [0; 8]; -// i_bytes.copy_from_slice(&self.i.into_bigint().to_bytes_le()[..8]); -// let i_usize: usize = usize::from_le_bytes(i_bytes); - -// let z_i1 = self -// .F -// .step_native(i_usize, self.z_i.clone(), external_inputs.clone())?; - -// // compute T and cmT for AugmentedFCircuit -// let (T, cmT) = self.compute_cmT()?; - -// // r_bits is the r used to the RLC of the F' instances -// let r_bits = ChallengeGadget::::get_challenge_native( -// &self.poseidon_config, -// self.U_i.clone(), -// self.u_i.clone(), -// cmT, -// )?; -// let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)) -// .ok_or(Error::OutOfBounds)?; -// let r_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&r_bits)) -// .ok_or(Error::OutOfBounds)?; - -// // fold Nova instances -// let (W_i1, U_i1): (Witness, CommittedInstance) = NIFS::::fold_instances( -// r_Fr, &self.W_i, &self.U_i, &self.w_i, &self.u_i, &T, cmT, -// )?; - -// // folded instance output (public input, x) -// // u_{i+1}.x[0] = H(i+1, z_0, z_{i+1}, U_{i+1}) -// let u_i1_x = U_i1.hash( -// &self.poseidon_config, -// self.i + C1::ScalarField::one(), -// self.z_0.clone(), -// z_i1.clone(), -// )?; -// // u_{i+1}.x[1] = H(cf_U_{i+1}) -// let cf_u_i1_x: C1::ScalarField; - -// if self.i == C1::ScalarField::zero() { -// cf_u_i1_x = self.cf_U_i.hash_cyclefold(&self.poseidon_config)?; -// // base case -// augmented_F_circuit = AugmentedFCircuit:: { -// _gc2: PhantomData, -// poseidon_config: self.poseidon_config.clone(), -// i: Some(C1::ScalarField::zero()), // = i=0 -// i_usize: Some(0), -// z_0: Some(self.z_0.clone()), // = z_i -// z_i: Some(self.z_i.clone()), -// external_inputs: Some(external_inputs.clone()), -// u_i_cmW: Some(self.u_i.cmW), // = dummy -// U_i: Some(self.U_i.clone()), // = dummy -// U_i1_cmE: Some(U_i1.cmE), -// U_i1_cmW: Some(U_i1.cmW), -// cmT: Some(cmT), -// F: self.F.clone(), -// x: Some(u_i1_x), -// cf1_u_i_cmW: None, -// cf2_u_i_cmW: None, -// cf_U_i: None, -// cf1_cmT: None, -// cf2_cmT: None, -// cf_x: Some(cf_u_i1_x), -// }; - -// #[cfg(test)] -// NIFS::::verify_folded_instance(r_Fr, &self.U_i, &self.u_i, &U_i1, &cmT)?; -// } else { -// // CycleFold part: -// // get the vector used as public inputs 'x' in the CycleFold circuit -// // cyclefold circuit for cmW -// let cfW_u_i_x = [ -// vec![r_Fq], -// get_cm_coordinates(&self.U_i.cmW), -// get_cm_coordinates(&self.u_i.cmW), -// get_cm_coordinates(&U_i1.cmW), -// ] -// .concat(); -// // cyclefold circuit for cmE -// let cfE_u_i_x = [ -// vec![r_Fq], -// get_cm_coordinates(&self.U_i.cmE), -// get_cm_coordinates(&cmT), -// get_cm_coordinates(&U_i1.cmE), -// ] -// .concat(); - -// let cfW_circuit = CycleFoldCircuit:: { -// _gc: PhantomData, -// r_bits: Some(r_bits.clone()), -// p1: Some(self.U_i.clone().cmW), -// p2: Some(self.u_i.clone().cmW), -// x: Some(cfW_u_i_x.clone()), -// }; -// let cfE_circuit = CycleFoldCircuit:: { -// _gc: PhantomData, -// r_bits: Some(r_bits.clone()), -// p1: Some(self.U_i.clone().cmE), -// p2: Some(cmT), -// x: Some(cfE_u_i_x.clone()), -// }; - -// // fold self.cf_U_i + cfW_U -> folded running with cfW -// let (_cfW_w_i, cfW_u_i, cfW_W_i1, cfW_U_i1, cfW_cmT, _) = self.fold_cyclefold_circuit( -// self.cf_W_i.clone(), // CycleFold running instance witness -// self.cf_U_i.clone(), // CycleFold running instance -// cfW_u_i_x, -// cfW_circuit, -// )?; -// // fold [the output from folding self.cf_U_i + cfW_U] + cfE_U = folded_running_with_cfW + cfE -// let (_cfE_w_i, cfE_u_i, cf_W_i1, cf_U_i1, cf_cmT, _) = -// self.fold_cyclefold_circuit(cfW_W_i1, cfW_U_i1.clone(), cfE_u_i_x, cfE_circuit)?; - -// cf_u_i1_x = cf_U_i1.hash_cyclefold(&self.poseidon_config)?; - -// augmented_F_circuit = AugmentedFCircuit:: { -// _gc2: PhantomData, -// poseidon_config: self.poseidon_config.clone(), -// i: Some(self.i), -// i_usize: Some(i_usize), -// z_0: Some(self.z_0.clone()), -// z_i: Some(self.z_i.clone()), -// external_inputs: Some(external_inputs.clone()), -// u_i_cmW: Some(self.u_i.cmW), -// U_i: Some(self.U_i.clone()), -// U_i1_cmE: Some(U_i1.cmE), -// U_i1_cmW: Some(U_i1.cmW), -// cmT: Some(cmT), -// F: self.F.clone(), -// x: Some(u_i1_x), -// // cyclefold values -// cf1_u_i_cmW: Some(cfW_u_i.cmW), -// cf2_u_i_cmW: Some(cfE_u_i.cmW), -// cf_U_i: Some(self.cf_U_i.clone()), -// cf1_cmT: Some(cfW_cmT), -// cf2_cmT: Some(cf_cmT), -// cf_x: Some(cf_u_i1_x), -// }; - -// self.cf_W_i = cf_W_i1; -// self.cf_U_i = cf_U_i1; - -// #[cfg(test)] -// { -// self.cf_r1cs.check_instance_relation(&_cfW_w_i, &cfW_u_i)?; -// self.cf_r1cs.check_instance_relation(&_cfE_w_i, &cfE_u_i)?; -// self.cf_r1cs -// .check_relaxed_instance_relation(&self.cf_W_i, &self.cf_U_i)?; -// } -// } - -// let cs = ConstraintSystem::::new_ref(); - -// augmented_F_circuit.generate_constraints(cs.clone())?; - -// #[cfg(test)] -// assert!(cs.is_satisfied().unwrap()); - -// let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; -// let (w_i1, x_i1) = extract_w_x::(&cs); -// if x_i1[0] != u_i1_x || x_i1[1] != cf_u_i1_x { -// return Err(Error::NotEqual); -// } - -// #[cfg(test)] -// if x_i1.len() != 2 { -// return Err(Error::NotExpectedLength(x_i1.len(), 2)); -// } - -// // set values for next iteration -// self.i += C1::ScalarField::one(); -// self.z_i = z_i1; -// self.w_i = Witness::::new(w_i1, self.r1cs.A.n_rows); -// self.u_i = self.w_i.commit::(&self.cs_params, x_i1)?; -// self.W_i = W_i1; -// self.U_i = U_i1; - -// #[cfg(test)] -// { -// self.r1cs.check_instance_relation(&self.w_i, &self.u_i)?; -// self.r1cs -// .check_relaxed_instance_relation(&self.W_i, &self.U_i)?; -// } - -// Ok(()) -// } - -// fn state(&self) -> Vec { -// self.z_i.clone() -// } -// fn instances( -// &self, -// ) -> ( -// Self::CommittedInstanceWithWitness, -// Self::CommittedInstanceWithWitness, -// Self::CFCommittedInstanceWithWitness, -// ) { -// ( -// (self.U_i.clone(), self.W_i.clone()), -// (self.u_i.clone(), self.w_i.clone()), -// (self.cf_U_i.clone(), self.cf_W_i.clone()), -// ) -// } - -// /// Implements IVC.V of Nova+CycleFold -// fn verify( -// vp: Self::VerifierParam, -// z_0: Vec, // initial state -// z_i: Vec, // last state -// num_steps: C1::ScalarField, -// running_instance: Self::CommittedInstanceWithWitness, -// incoming_instance: Self::CommittedInstanceWithWitness, -// cyclefold_instance: Self::CFCommittedInstanceWithWitness, -// ) -> Result<(), Error> { -// let (U_i, W_i) = running_instance; -// let (u_i, w_i) = incoming_instance; -// let (cf_U_i, cf_W_i) = cyclefold_instance; - -// if u_i.x.len() != 2 || U_i.x.len() != 2 { -// return Err(Error::IVCVerificationFail); -// } - -// // check that u_i's output points to the running instance -// // u_i.X[0] == H(i, z_0, z_i, U_i) -// let expected_u_i_x = U_i.hash(&vp.poseidon_config, num_steps, z_0, z_i.clone())?; -// if expected_u_i_x != u_i.x[0] { -// return Err(Error::IVCVerificationFail); -// } -// // u_i.X[1] == H(cf_U_i) -// let expected_cf_u_i_x = cf_U_i.hash_cyclefold(&vp.poseidon_config)?; -// if expected_cf_u_i_x != u_i.x[1] { -// return Err(Error::IVCVerificationFail); -// } - -// // check u_i.cmE==0, u_i.u==1 (=u_i is a un-relaxed instance) -// if !u_i.cmE.is_zero() || !u_i.u.is_one() { -// return Err(Error::IVCVerificationFail); -// } - -// // check R1CS satisfiability -// vp.r1cs.check_instance_relation(&w_i, &u_i)?; -// // check RelaxedR1CS satisfiability -// vp.r1cs.check_relaxed_instance_relation(&W_i, &U_i)?; - -// // check CycleFold RelaxedR1CS satisfiability -// vp.cf_r1cs -// .check_relaxed_instance_relation(&cf_W_i, &cf_U_i)?; - -// Ok(()) -// } -// } - -// impl Nova -// where -// C1: CurveGroup, -// GC1: CurveVar> + ToConstraintFieldGadget>, -// C2: CurveGroup, -// GC2: CurveVar>, -// FC: FCircuit, -// CS1: CommitmentScheme, -// CS2: CommitmentScheme, -// ::BaseField: PrimeField, -// ::ScalarField: Absorb, -// ::ScalarField: Absorb, -// C1: CurveGroup, -// { -// // computes T and cmT for the AugmentedFCircuit -// fn compute_cmT(&self) -> Result<(Vec, C1), Error> { -// NIFS::::compute_cmT( -// &self.cs_params, -// &self.r1cs, -// &self.w_i, -// &self.u_i, -// &self.W_i, -// &self.U_i, -// ) -// } -// // computes T* and cmT* for the CycleFoldCircuit -// fn compute_cf_cmT( -// &self, -// cf_w_i: &Witness, -// cf_u_i: &CommittedInstance, -// cf_W_i: &Witness, -// cf_U_i: &CommittedInstance, -// ) -> Result<(Vec, C2), Error> { -// NIFS::::compute_cyclefold_cmT( -// &self.cf_cs_params, -// &self.cf_r1cs, -// cf_w_i, -// cf_u_i, -// cf_W_i, -// cf_U_i, -// ) -// } -// } - -// impl Nova -// where -// C1: CurveGroup, -// GC1: CurveVar> + ToConstraintFieldGadget>, -// C2: CurveGroup, -// GC2: CurveVar> + ToConstraintFieldGadget>, -// FC: FCircuit, -// CS1: CommitmentScheme, -// CS2: CommitmentScheme, -// ::BaseField: PrimeField, -// ::BaseField: PrimeField, -// ::ScalarField: Absorb, -// ::ScalarField: Absorb, -// C1: CurveGroup, -// for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>, -// for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, -// { -// // folds the given cyclefold circuit and its instances -// #[allow(clippy::type_complexity)] -// fn fold_cyclefold_circuit( -// &self, -// cf_W_i: Witness, // witness of the running instance -// cf_U_i: CommittedInstance, // running instance -// cf_u_i_x: Vec, -// cf_circuit: CycleFoldCircuit, -// ) -> Result< -// ( -// Witness, -// CommittedInstance, // u_i -// Witness, // W_i1 -// CommittedInstance, // U_i1 -// C2, // cmT -// C2::ScalarField, // r_Fq -// ), -// Error, -// > { -// let cs2 = ConstraintSystem::::new_ref(); -// cf_circuit.generate_constraints(cs2.clone())?; - -// let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; -// let (cf_w_i, cf_x_i) = extract_w_x::(&cs2); -// if cf_x_i != cf_u_i_x { -// return Err(Error::NotEqual); -// } - -// #[cfg(test)] -// if cf_x_i.len() != CF_IO_LEN { -// return Err(Error::NotExpectedLength(cf_x_i.len(), CF_IO_LEN)); -// } - -// // fold cyclefold instances -// let cf_w_i = Witness::::new(cf_w_i.clone(), self.cf_r1cs.A.n_rows); -// let cf_u_i: CommittedInstance = -// cf_w_i.commit::(&self.cf_cs_params, cf_x_i.clone())?; - -// // compute T* and cmT* for CycleFoldCircuit -// let (cf_T, cf_cmT) = self.compute_cf_cmT(&cf_w_i, &cf_u_i, &cf_W_i, &cf_U_i)?; - -// let cf_r_bits = CycleFoldChallengeGadget::::get_challenge_native( -// &self.poseidon_config, -// cf_U_i.clone(), -// cf_u_i.clone(), -// cf_cmT, -// )?; -// let cf_r_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&cf_r_bits)) -// .ok_or(Error::OutOfBounds)?; - -// let (cf_W_i1, cf_U_i1) = NIFS::::fold_instances( -// cf_r_Fq, &cf_W_i, &cf_U_i, &cf_w_i, &cf_u_i, &cf_T, cf_cmT, -// )?; -// Ok((cf_w_i, cf_u_i, cf_W_i1, cf_U_i1, cf_cmT, cf_r_Fq)) -// } -// } - -// /// helper method to get the r1cs from the ConstraintSynthesizer -// pub fn get_r1cs_from_cs( -// circuit: impl ConstraintSynthesizer, -// ) -> Result, Error> { -// let cs = ConstraintSystem::::new_ref(); -// circuit.generate_constraints(cs.clone())?; -// cs.finalize(); -// let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; -// let r1cs = extract_r1cs::(&cs); -// Ok(r1cs) -// } - -// /// helper method to get the R1CS for both the AugmentedFCircuit and the CycleFold circuit -// #[allow(clippy::type_complexity)] -// pub fn get_r1cs( -// poseidon_config: &PoseidonConfig, -// F_circuit: FC, -// ) -> Result<(R1CS, R1CS), Error> -// where -// C1: CurveGroup, -// GC1: CurveVar> + ToConstraintFieldGadget>, -// C2: CurveGroup, -// GC2: CurveVar> + ToConstraintFieldGadget>, -// FC: FCircuit, -// ::BaseField: PrimeField, -// ::BaseField: PrimeField, -// ::ScalarField: Absorb, -// ::ScalarField: Absorb, -// C1: CurveGroup, -// for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>, -// for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, -// { -// let augmented_F_circuit = -// AugmentedFCircuit::::empty(poseidon_config, F_circuit); -// let cf_circuit = CycleFoldCircuit::::empty(); -// let r1cs = get_r1cs_from_cs::(augmented_F_circuit)?; -// let cf_r1cs = get_r1cs_from_cs::(cf_circuit)?; -// Ok((r1cs, cf_r1cs)) -// } - -// /// helper method to get the pedersen params length for both the AugmentedFCircuit and the -// /// CycleFold circuit -// pub fn get_cs_params_len( -// poseidon_config: &PoseidonConfig, -// F_circuit: FC, -// ) -> Result<(usize, usize), Error> -// where -// C1: CurveGroup, -// GC1: CurveVar> + ToConstraintFieldGadget>, -// C2: CurveGroup, -// GC2: CurveVar> + ToConstraintFieldGadget>, -// FC: FCircuit, -// ::BaseField: PrimeField, -// ::BaseField: PrimeField, -// ::ScalarField: Absorb, -// ::ScalarField: Absorb, -// C1: CurveGroup, -// for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>, -// for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, -// { -// let (r1cs, cf_r1cs) = get_r1cs::(poseidon_config, F_circuit)?; -// Ok((r1cs.A.n_rows, cf_r1cs.A.n_rows)) -// } - -// /// returns the coordinates of a commitment point. This is compatible with the arkworks -// /// GC.to_constraint_field()[..2] -// pub(crate) fn get_cm_coordinates(cm: &C) -> Vec { -// let zero = (&C::BaseField::zero(), &C::BaseField::zero()); -// let cm = cm.into_affine(); -// let (cm_x, cm_y) = cm.xy().unwrap_or(zero); -// vec![*cm_x, *cm_y] -// } - -// #[cfg(test)] -// pub mod tests { -// use super::*; -// use crate::commitment::kzg::{ProverKey as KZGProverKey, KZG}; -// use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; -// use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; -// use ark_poly_commit::kzg10::VerifierKey as KZGVerifierKey; - -// use crate::commitment::pedersen::Pedersen; -// use crate::frontend::tests::CubicFCircuit; -// use crate::transcript::poseidon::poseidon_canonical_config; - -// /// This test tests the Nova+CycleFold IVC, and by consequence it is also testing the -// /// AugmentedFCircuit -// #[test] -// fn test_ivc() { -// let mut rng = ark_std::test_rng(); -// let poseidon_config = poseidon_canonical_config::(); - -// let F_circuit = CubicFCircuit::::new(()).unwrap(); - -// let (cs_len, cf_cs_len) = -// get_cs_params_len::>( -// &poseidon_config, -// F_circuit, -// ) -// .unwrap(); -// let (kzg_pk, _): (KZGProverKey, KZGVerifierKey) = -// KZG::::setup(&mut rng, cs_len).unwrap(); -// let (pedersen_params, _) = Pedersen::::setup(&mut rng, cs_len).unwrap(); -// let (cf_pedersen_params, _) = Pedersen::::setup(&mut rng, cf_cs_len).unwrap(); - -// // run the test using Pedersen commitments on both sides of the curve cycle -// test_ivc_opt::, Pedersen>( -// poseidon_config.clone(), -// pedersen_params, -// cf_pedersen_params.clone(), -// F_circuit, -// ); -// // run the test using KZG for the commitments on the main curve, and Pedersen for the -// // commitments on the secondary curve -// test_ivc_opt::, Pedersen>( -// poseidon_config, -// kzg_pk, -// cf_pedersen_params, -// F_circuit, -// ); -// } - -// // test_ivc allowing to choose the CommitmentSchemes -// fn test_ivc_opt, CS2: CommitmentScheme>( -// poseidon_config: PoseidonConfig, -// cs_params: CS1::ProverParams, -// cf_cs_params: CS2::ProverParams, -// F_circuit: CubicFCircuit, -// ) { -// type NOVA = -// Nova, CS1, CS2>; - -// let prover_params = ProverParams:: { -// poseidon_config: poseidon_config.clone(), -// cs_params, -// cf_cs_params, -// }; - -// let z_0 = vec![Fr::from(3_u32)]; -// let mut nova = NOVA::init(&prover_params, F_circuit, z_0.clone()).unwrap(); - -// let num_steps: usize = 3; -// for _ in 0..num_steps { -// nova.prove_step(vec![]).unwrap(); -// } -// assert_eq!(Fr::from(num_steps as u32), nova.i); - -// let verifier_params = VerifierParams:: { -// poseidon_config, -// r1cs: nova.clone().r1cs, -// cf_r1cs: nova.clone().cf_r1cs, -// }; -// let (running_instance, incoming_instance, cyclefold_instance) = nova.instances(); -// NOVA::::verify( -// verifier_params, -// z_0, -// nova.z_i, -// nova.i, -// running_instance, -// incoming_instance, -// cyclefold_instance, -// ) -// .unwrap(); -// } -// } diff --git a/folding-schemes/src/folding/mova/nifs.rs b/folding-schemes/src/folding/mova/nifs.rs index ead664bc..39afc037 100644 --- a/folding-schemes/src/folding/mova/nifs.rs +++ b/folding-schemes/src/folding/mova/nifs.rs @@ -135,48 +135,6 @@ NIFS let cmT = CS::commit(cs_prover_params, &T, &C::ScalarField::zero())?; Ok((T, cmT)) } - // pub fn compute_cyclefold_cmT( - // cs_prover_params: &CS::ProverParams, - // r1cs: &R1CS, // R1CS over C2.Fr=C1.Fq (here C=C2) - // w1: &Witness, - // ci1: &CommittedInstance, - // w2: &Witness, - // ci2: &CommittedInstance, - // ) -> Result<(Vec, C), Error> - // where - // ::BaseField: ark_ff::PrimeField, - // { - // let z1: Vec = [vec![ci1.u], ci1.x.to_vec(), w1.W.to_vec()].concat(); - // let z2: Vec = [vec![ci2.u], ci2.x.to_vec(), w2.W.to_vec()].concat(); - - // // compute cross terms - // let T = Self::compute_T(r1cs, ci1.u, ci2.u, &z1, &z2)?; - // // use r_T=0 since we don't need hiding property for cm(T) - // let cmT = CS::commit(cs_prover_params, &T, &C::ScalarField::zero())?; - // Ok((T, cmT)) - // } - - /// fold_instances is part of the NIFS.P logic described in - /// [Nova](https://eprint.iacr.org/2021/370.pdf)'s section 4. It returns the folded Committed - /// Instances and the Witness. - // pub fn fold_instances( - // r: C::ScalarField, - // w1: &Witness, - // ci1: &CommittedInstance, - // w2: &Witness, - // ci2: &CommittedInstance, - // T: &[C::ScalarField], - // cmT: C, - // ) -> Result<(Witness, CommittedInstance), Error> { - // // fold witness - // // use r_T=0 since we don't need hiding property for cm(T) - // let w3 = NIFS::::fold_witness(r, w1, w2, T, C::ScalarField::zero())?; - - // // fold committed instances - // let ci3 = NIFS::::fold_committed_instance(r, ci1, ci2, &cmT); - - // Ok((w3, ci3)) - // } #[allow(clippy::type_complexity)] pub fn prove( @@ -345,318 +303,4 @@ NIFS x: ci.x.clone(), }) } - - // /// Verify committed folded instance (ci) relations. Notice that this method does not open the - // /// commitments, but just checks that the given committed instances (ci1, ci2) when folded - // /// result in the folded committed instance (ci3) values. - // pub fn verify_folded_instance( - // r: C::ScalarField, - // ci1: &CommittedInstance, - // ci2: &CommittedInstance, - // ci3: &CommittedInstance, - // cmT: &C, - // ) -> Result<(), Error> { - // let expected = Self::fold_committed_instance(r, ci1, ci2, cmT); - // if ci3.cmE != expected.cmE - // || ci3.u != expected.u - // || ci3.cmW != expected.cmW - // || ci3.x != expected.x - // { - // return Err(Error::NotSatisfied); - // } - // Ok(()) - // } - - // pub fn prove_commitments( - // tr: &mut impl Transcript, - // cs_prover_params: &CS::ProverParams, - // w: &Witness, - // ci: &CommittedInstance, - // T: Vec, - // cmT: &C, - // ) -> Result<[CS::Proof; 3], Error> { - // let cmE_proof = CS::prove(cs_prover_params, tr, &ci.cmE, &w.E, &w.rE, None)?; - // let cmW_proof = CS::prove(cs_prover_params, tr, &ci.cmW, &w.W, &w.rW, None)?; - // let cmT_proof = CS::prove(cs_prover_params, tr, cmT, &T, &C::ScalarField::zero(), None)?; // cm(T) is committed with rT=0 - // Ok([cmE_proof, cmW_proof, cmT_proof]) - // } -} - -#[cfg(test)] -pub mod tests { - // use super::*; - // use ark_crypto_primitives::sponge::poseidon::PoseidonConfig; - // use ark_ff::{BigInteger, PrimeField}; - // use ark_pallas::{Fr, Projective}; - // use ark_std::{ops::Mul, UniformRand}; - - // use crate::ccs::r1cs::tests::{get_test_r1cs, get_test_z}; - // use crate::commitment::pedersen::{Params as PedersenParams, Pedersen}; - // use crate::folding::nova::circuits::ChallengeGadget; - // use crate::folding::nova::traits::NovaR1CS; - // use crate::transcript::poseidon::{poseidon_canonical_config, PoseidonTranscript}; - - // #[allow(clippy::type_complexity)] - // pub(crate) fn prepare_simple_fold_inputs() -> ( - // PedersenParams, - // PoseidonConfig, - // R1CS, - // Witness, // w1 - // CommittedInstance, // ci1 - // Witness, // w2 - // CommittedInstance, // ci2 - // Witness, // w3 - // CommittedInstance, // ci3 - // Vec, // T - // C, // cmT - // Vec, // r_bits - // C::ScalarField, // r_Fr - // ) - // where - // C: CurveGroup, - // ::BaseField: PrimeField, - // C::ScalarField: Absorb, - // { - // let r1cs = get_test_r1cs(); - // let z1 = get_test_z(3); - // let z2 = get_test_z(4); - // let (w1, x1) = r1cs.split_z(&z1); - // let (w2, x2) = r1cs.split_z(&z2); - - // let w1 = Witness::::new(w1.clone(), r1cs.A.n_rows); - // let w2 = Witness::::new(w2.clone(), r1cs.A.n_rows); - - // let mut rng = ark_std::test_rng(); - // let (pedersen_params, _) = Pedersen::::setup(&mut rng, r1cs.A.n_cols).unwrap(); - - // // compute committed instances - // let ci1 = w1 - // .commit::>(&pedersen_params, x1.clone()) - // .unwrap(); - // let ci2 = w2 - // .commit::>(&pedersen_params, x2.clone()) - // .unwrap(); - - // // NIFS.P - // let (T, cmT) = - // NIFS::>::compute_cmT(&pedersen_params, &r1cs, &w1, &ci1, &w2, &ci2) - // .unwrap(); - - // let poseidon_config = poseidon_canonical_config::(); - - // let r_bits = ChallengeGadget::::get_challenge_native( - // &poseidon_config, - // ci1.clone(), - // ci2.clone(), - // cmT, - // ) - // .unwrap(); - // let r_Fr = C::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap(); - - // let (w3, ci3) = - // NIFS::>::fold_instances(r_Fr, &w1, &ci1, &w2, &ci2, &T, cmT).unwrap(); - - // ( - // pedersen_params, - // poseidon_config, - // r1cs, - // w1, - // ci1, - // w2, - // ci2, - // w3, - // ci3, - // T, - // cmT, - // r_bits, - // r_Fr, - // ) - // } - - // // fold 2 dummy instances and check that the folded instance holds the relaxed R1CS relation - // #[test] - // fn test_nifs_fold_dummy() { - // let r1cs = get_test_r1cs::(); - // let z1 = get_test_z(3); - // let (w1, x1) = r1cs.split_z(&z1); - - // let mut rng = ark_std::test_rng(); - // let (pedersen_params, _) = Pedersen::::setup(&mut rng, r1cs.A.n_cols).unwrap(); - - // // dummy instance, witness and public inputs zeroes - // let w_dummy = Witness::::new(vec![Fr::zero(); w1.len()], r1cs.A.n_rows); - // let mut u_dummy = w_dummy - // .commit::>(&pedersen_params, vec![Fr::zero(); x1.len()]) - // .unwrap(); - // u_dummy.u = Fr::zero(); - - // let w_i = w_dummy.clone(); - // let u_i = u_dummy.clone(); - // let W_i = w_dummy.clone(); - // let U_i = u_dummy.clone(); - // r1cs.check_relaxed_instance_relation(&w_i, &u_i).unwrap(); - // r1cs.check_relaxed_instance_relation(&W_i, &U_i).unwrap(); - - // let r_Fr = Fr::from(3_u32); - - // let (T, cmT) = NIFS::>::compute_cmT( - // &pedersen_params, - // &r1cs, - // &w_i, - // &u_i, - // &W_i, - // &U_i, - // ) - // .unwrap(); - // let (W_i1, U_i1) = NIFS::>::fold_instances( - // r_Fr, &w_i, &u_i, &W_i, &U_i, &T, cmT, - // ) - // .unwrap(); - // r1cs.check_relaxed_instance_relation(&W_i1, &U_i1).unwrap(); - // } - - // // fold 2 instances into one - // #[test] - // fn test_nifs_one_fold() { - // let (pedersen_params, poseidon_config, r1cs, w1, ci1, w2, ci2, w3, ci3, T, cmT, _, r) = - // prepare_simple_fold_inputs(); - - // // NIFS.V - // let ci3_v = NIFS::>::verify(r, &ci1, &ci2, &cmT); - // assert_eq!(ci3_v, ci3); - - // // check that relations hold for the 2 inputted instances and the folded one - // r1cs.check_relaxed_instance_relation(&w1, &ci1).unwrap(); - // r1cs.check_relaxed_instance_relation(&w2, &ci2).unwrap(); - // r1cs.check_relaxed_instance_relation(&w3, &ci3).unwrap(); - - // // check that folded commitments from folded instance (ci) are equal to folding the - // // use folded rE, rW to commit w3 - // let ci3_expected = w3 - // .commit::>(&pedersen_params, ci3.x.clone()) - // .unwrap(); - // assert_eq!(ci3_expected.cmE, ci3.cmE); - // assert_eq!(ci3_expected.cmW, ci3.cmW); - - // // next equalities should hold since we started from two cmE of zero-vector E's - // assert_eq!(ci3.cmE, cmT.mul(r)); - // assert_eq!(w3.E, vec_scalar_mul(&T, &r)); - - // // NIFS.Verify_Folded_Instance: - // NIFS::>::verify_folded_instance(r, &ci1, &ci2, &ci3, &cmT) - // .unwrap(); - - // // init Prover's transcript - // let mut transcript_p = PoseidonTranscript::::new(&poseidon_config); - // // init Verifier's transcript - // let mut transcript_v = PoseidonTranscript::::new(&poseidon_config); - - // // prove the ci3.cmE, ci3.cmW, cmT commitments - // let cm_proofs = NIFS::>::prove_commitments( - // &mut transcript_p, - // &pedersen_params, - // &w3, - // &ci3, - // T, - // &cmT, - // ) - // .unwrap(); - - // // verify the ci3.cmE, ci3.cmW, cmT commitments - // assert_eq!(cm_proofs.len(), 3); - // Pedersen::::verify( - // &pedersen_params, - // &mut transcript_v, - // &ci3.cmE, - // &cm_proofs[0].clone(), - // ) - // .unwrap(); - // Pedersen::::verify( - // &pedersen_params, - // &mut transcript_v, - // &ci3.cmW, - // &cm_proofs[1].clone(), - // ) - // .unwrap(); - // Pedersen::::verify( - // &pedersen_params, - // &mut transcript_v, - // &cmT, - // &cm_proofs[2].clone(), - // ) - // .unwrap(); - // } - - // #[test] - // fn test_nifs_fold_loop() { - // let r1cs = get_test_r1cs(); - // let z = get_test_z(3); - // let (w, x) = r1cs.split_z(&z); - - // let mut rng = ark_std::test_rng(); - // let (pedersen_params, _) = Pedersen::::setup(&mut rng, r1cs.A.n_cols).unwrap(); - - // // prepare the running instance - // let mut running_instance_w = Witness::::new(w.clone(), r1cs.A.n_rows); - // let mut running_committed_instance = running_instance_w - // .commit::>(&pedersen_params, x) - // .unwrap(); - - // r1cs.check_relaxed_instance_relation(&running_instance_w, &running_committed_instance) - // .unwrap(); - - // let num_iters = 10; - // for i in 0..num_iters { - // // prepare the incoming instance - // let incoming_instance_z = get_test_z(i + 4); - // let (w, x) = r1cs.split_z(&incoming_instance_z); - // let incoming_instance_w = Witness::::new(w.clone(), r1cs.A.n_rows); - // let incoming_committed_instance = incoming_instance_w - // .commit::>(&pedersen_params, x) - // .unwrap(); - // r1cs.check_relaxed_instance_relation( - // &incoming_instance_w, - // &incoming_committed_instance, - // ) - // .unwrap(); - - // let r = Fr::rand(&mut rng); // folding challenge would come from the RO - - // // NIFS.P - // let (T, cmT) = NIFS::>::compute_cmT( - // &pedersen_params, - // &r1cs, - // &running_instance_w, - // &running_committed_instance, - // &incoming_instance_w, - // &incoming_committed_instance, - // ) - // .unwrap(); - // let (folded_w, _) = NIFS::>::fold_instances( - // r, - // &running_instance_w, - // &running_committed_instance, - // &incoming_instance_w, - // &incoming_committed_instance, - // &T, - // cmT, - // ) - // .unwrap(); - - // // NIFS.V - // let folded_committed_instance = NIFS::>::verify( - // r, - // &running_committed_instance, - // &incoming_committed_instance, - // &cmT, - // ); - - // r1cs.check_relaxed_instance_relation(&folded_w, &folded_committed_instance) - // .unwrap(); - - // // set running_instance for next loop iteration - // running_instance_w = folded_w; - // running_committed_instance = folded_committed_instance; - // } - // } } From 64db0cb2f559fbbbfbe55be958dce954a572f28b Mon Sep 17 00:00:00 2001 From: Nick Dimitriou Date: Fri, 26 Jul 2024 11:32:46 +0300 Subject: [PATCH 3/4] Fixes after big merge with main --- README.md | 8 ++++++++ .../analyse_proving_time.py | 0 .../bench_utils.rs | 2 +- .../hypernova.rs | 7 +------ .../{multiple_inputs => folding_benchmarks}/mova.rs | 0 .../{multiple_inputs => folding_benchmarks}/nova.rs | 0 examples/multiple_inputs/mova_prove_times.csv | 3 --- examples/multiple_inputs/nova_prove_times.csv | 3 --- folding-schemes/Cargo.toml | 10 +++++----- 9 files changed, 15 insertions(+), 18 deletions(-) rename examples/{multiple_inputs => folding_benchmarks}/analyse_proving_time.py (100%) rename examples/{multiple_inputs => folding_benchmarks}/bench_utils.rs (97%) rename examples/{multiple_inputs => folding_benchmarks}/hypernova.rs (94%) rename examples/{multiple_inputs => folding_benchmarks}/mova.rs (100%) rename examples/{multiple_inputs => folding_benchmarks}/nova.rs (100%) delete mode 100644 examples/multiple_inputs/mova_prove_times.csv delete mode 100644 examples/multiple_inputs/nova_prove_times.csv diff --git a/README.md b/README.md index 0e592dd8..9420865f 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,11 @@ +# Benchmarks + +To run the benchmarks use + +`cargo run --release --example ` + +**Folding options:** mova, nova, hp + # sonobe Experimental folding schemes library implemented jointly by [0xPARC](https://0xparc.org/) and [PSE](https://pse.dev). diff --git a/examples/multiple_inputs/analyse_proving_time.py b/examples/folding_benchmarks/analyse_proving_time.py similarity index 100% rename from examples/multiple_inputs/analyse_proving_time.py rename to examples/folding_benchmarks/analyse_proving_time.py diff --git a/examples/multiple_inputs/bench_utils.rs b/examples/folding_benchmarks/bench_utils.rs similarity index 97% rename from examples/multiple_inputs/bench_utils.rs rename to examples/folding_benchmarks/bench_utils.rs index 368f100d..f6c05861 100644 --- a/examples/multiple_inputs/bench_utils.rs +++ b/examples/folding_benchmarks/bench_utils.rs @@ -83,7 +83,7 @@ pub fn to_F_vec(z: Vec) -> Vec { } pub fn write_to_csv(pows: &[usize], prove_times: &[Duration], file_path: String) -> Result<(), Box> { - let path = env::current_dir()?.join("examples/multiple_inputs").join(file_path); + let path = env::current_dir()?.join("").join(file_path); let mut writer = Writer::from_path(path)?; writer.write_record(&["pow", "prove_time"])?; diff --git a/examples/multiple_inputs/hypernova.rs b/examples/folding_benchmarks/hypernova.rs similarity index 94% rename from examples/multiple_inputs/hypernova.rs rename to examples/folding_benchmarks/hypernova.rs index f0f7f86d..83725439 100644 --- a/examples/multiple_inputs/hypernova.rs +++ b/examples/folding_benchmarks/hypernova.rs @@ -24,18 +24,13 @@ mod bench_utils; fn hypernova_benchmarks(power: usize, prove_times: &mut Vec) { let size = 1 << power; - // let r1cs = get_test_r1cs_2(); let r1cs: R1CS = get_test_r1cs(power); let mut rng = ark_std::test_rng(); let ccs = CCS::::from_r1cs(r1cs); let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); - // Generate a satisfying witness let z_1 = get_test_z(power); - // let z_1 = get_test_z_2(3); - // Generate another satisfying witness let z_2 = get_test_z(power); - // let z_2 = get_test_z_2(4); let (running_instance, w1) = ccs @@ -69,7 +64,7 @@ fn hypernova_benchmarks(power: usize, prove_times: &mut Vec) { let prove_time = start.elapsed(); prove_times.push(prove_time); println!( - "Mova prove time {:?}", + "Hypernova prove time {:?}", prove_time ); diff --git a/examples/multiple_inputs/mova.rs b/examples/folding_benchmarks/mova.rs similarity index 100% rename from examples/multiple_inputs/mova.rs rename to examples/folding_benchmarks/mova.rs diff --git a/examples/multiple_inputs/nova.rs b/examples/folding_benchmarks/nova.rs similarity index 100% rename from examples/multiple_inputs/nova.rs rename to examples/folding_benchmarks/nova.rs diff --git a/examples/multiple_inputs/mova_prove_times.csv b/examples/multiple_inputs/mova_prove_times.csv deleted file mode 100644 index 35480cfd..00000000 --- a/examples/multiple_inputs/mova_prove_times.csv +++ /dev/null @@ -1,3 +0,0 @@ -pow,prove_time -16,47384 -20,806294 diff --git a/examples/multiple_inputs/nova_prove_times.csv b/examples/multiple_inputs/nova_prove_times.csv deleted file mode 100644 index 7864a568..00000000 --- a/examples/multiple_inputs/nova_prove_times.csv +++ /dev/null @@ -1,3 +0,0 @@ -pow,prove_time -16,408332 -20,5748832 diff --git a/folding-schemes/Cargo.toml b/folding-schemes/Cargo.toml index 3e9ca003..752bac23 100644 --- a/folding-schemes/Cargo.toml +++ b/folding-schemes/Cargo.toml @@ -70,12 +70,12 @@ path = "../examples/external_inputs.rs" [[example]] name = "hp" -path = "../examples/multiple_inputs/hypernova.rs" +path = "../examples/folding_benchmarks/hypernova.rs" [[example]] -name = "nova-multi-inputs" -path = "../examples/multiple_inputs/nova.rs" +name = "nova" +path = "../examples/folding_benchmarks/nova.rs" [[example]] -name = "mova-multi-inputs" -path = "../examples/multiple_inputs/mova.rs" +name = "mova" +path = "../examples/folding_benchmarks/mova.rs" From 9e20a08e53561bd8b7fed33ccd32a1d16642bbb8 Mon Sep 17 00:00:00 2001 From: Nick Dimitriou Date: Fri, 26 Jul 2024 18:13:52 +0300 Subject: [PATCH 4/4] Removed homogenization and added pointvsline --- examples/folding_benchmarks/bench_utils.rs | 16 +++-- examples/folding_benchmarks/hypernova.rs | 36 +++++------- examples/folding_benchmarks/mova.rs | 33 +++++------ examples/folding_benchmarks/nova.rs | 39 ++++++------- folding-schemes/src/folding/mova/mod.rs | 2 +- folding-schemes/src/folding/mova/nifs.rs | 58 ++++++------------- .../{homogenization.rs => pointvsline.rs} | 48 ++++----------- 7 files changed, 87 insertions(+), 145 deletions(-) rename folding-schemes/src/folding/mova/{homogenization.rs => pointvsline.rs} (86%) diff --git a/examples/folding_benchmarks/bench_utils.rs b/examples/folding_benchmarks/bench_utils.rs index f6c05861..c066fcc2 100644 --- a/examples/folding_benchmarks/bench_utils.rs +++ b/examples/folding_benchmarks/bench_utils.rs @@ -83,16 +83,20 @@ pub fn to_F_vec(z: Vec) -> Vec { } pub fn write_to_csv(pows: &[usize], prove_times: &[Duration], file_path: String) -> Result<(), Box> { - let path = env::current_dir()?.join("").join(file_path); + let path = env::current_dir()?.join("examples/folding_benchmarks").join(file_path); let mut writer = Writer::from_path(path)?; writer.write_record(&["pow", "prove_time"])?; - for (pow, prove_time) in pows.iter().zip(prove_times) { - writer.write_record(&[ - pow.to_string(), - prove_time.as_micros().to_string(), - ])?; + let mut pows_cycle = pows.iter().cycle(); + + for prove_time in prove_times { + if let Some(pow) = pows_cycle.next() { + writer.write_record(&[ + pow.to_string(), + prove_time.as_micros().to_string(), + ])?; + } } writer.flush()?; diff --git a/examples/folding_benchmarks/hypernova.rs b/examples/folding_benchmarks/hypernova.rs index 83725439..dadc84e6 100644 --- a/examples/folding_benchmarks/hypernova.rs +++ b/examples/folding_benchmarks/hypernova.rs @@ -44,7 +44,6 @@ fn hypernova_benchmarks(power: usize, prove_times: &mut Vec) { let poseidon_config = poseidon_canonical_config::(); let mut transcript_p: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); - transcript_p.absorb(&Fr::from_le_bytes_mod_order(b"init init")); let start = Instant::now(); @@ -90,32 +89,29 @@ fn hypernova_benchmarks(power: usize, prove_times: &mut Vec) { fn main() { - println!("starting"); - + // let pows: Vec = (10..24).collect(); let pows: Vec = vec![16, 20]; - println!("{:?}", pows); + let iter = 10; + let mut prove_times: Vec = Vec::with_capacity(pows.len() * iter); + for i in 0..iter { + println!("starting {:}", i); - let mut prove_times: Vec = Vec::with_capacity(pows.len()); - for pow in &pows { - println!("{}", pow); - hypernova_benchmarks(*pow, &mut prove_times); - } - println!("Powers {:?}", pows); + println!("{:?}", pows); - println!("Prove times {:?}", prove_times); - println!( - "| {0: <10} | {1: <10} |", - "2^pow", "prove time" - ); - println!("| {0: <10} | {1: <10} |", "2^pow", "prove time"); - for (pow, prove_time) in pows.iter().zip(prove_times.iter()) { - println!("| {0: <10} | {1:?} |", pow, prove_time); - } - if let Err(e) = write_to_csv(&pows, &prove_times, String::from("hypernova_prove_times.csv")) { + for pow in &pows { + println!("{}", pow); + hypernova_benchmarks(*pow, &mut prove_times); + } + + println!("Powers {:?}", pows); + println!("Prove times {:?}", prove_times); + + } + if let Err(e) = write_to_csv(&pows, &prove_times, format!("hypernova_prove_times.csv")) { eprintln!("Failed to write to CSV: {}", e); } else { println!("CSV file has been successfully written."); diff --git a/examples/folding_benchmarks/mova.rs b/examples/folding_benchmarks/mova.rs index 87ccc33c..46530cb2 100644 --- a/examples/folding_benchmarks/mova.rs +++ b/examples/folding_benchmarks/mova.rs @@ -4,7 +4,6 @@ use ark_std::log2; use ark_std::UniformRand; use folding_schemes::commitment::pedersen::Pedersen; use folding_schemes::commitment::CommitmentScheme; -use folding_schemes::folding::mova::homogenization::{Homogenization, PointVsLineHomogenization}; use folding_schemes::folding::mova::nifs::NIFS; use folding_schemes::folding::mova::Witness; use folding_schemes::transcript::poseidon::{poseidon_canonical_config}; @@ -78,7 +77,6 @@ fn mova_benchmark(power: usize, prove_times: &mut Vec) { Projective, Pedersen, PoseidonSponge, - PointVsLineHomogenization> >::prove( &pedersen_params, &r1cs, @@ -107,7 +105,6 @@ fn mova_benchmark(power: usize, prove_times: &mut Vec) { Projective, Pedersen, PoseidonSponge, - PointVsLineHomogenization> >::verify( &mut transcript_v, &committed_instance_1, @@ -125,30 +122,28 @@ fn mova_benchmark(power: usize, prove_times: &mut Vec) { fn main() { - - - println!("starting"); - // let pows: Vec = (10..24).collect(); let pows: Vec = vec![16, 20]; + let iter = 10; + let mut prove_times: Vec = Vec::with_capacity(pows.len() * iter); + for i in 0..iter { + println!("starting {:}", i); - println!("{:?}", pows); - let mut prove_times: Vec = Vec::with_capacity(pows.len()); - for pow in &pows { - println!("{}", pow); - mova_benchmark(*pow, &mut prove_times); - } + println!("{:?}", pows); - println!("Powers {:?}", pows); - println!("Prove times {:?}", prove_times); - println!("| {0: <10} | {1: <10} |", "2^pow", "prove time"); - for (pow, prove_time) in pows.iter().zip(prove_times.iter()) { - println!("| {0: <10} | {1:?} |", pow, prove_time); - } + for pow in &pows { + println!("{}", pow); + mova_benchmark(*pow, &mut prove_times); + } + + println!("Powers {:?}", pows); + println!("Prove times {:?}", prove_times); + + } if let Err(e) = write_to_csv(&pows, &prove_times, format!("mova_prove_times.csv")) { eprintln!("Failed to write to CSV: {}", e); } else { diff --git a/examples/folding_benchmarks/nova.rs b/examples/folding_benchmarks/nova.rs index 630d5688..c7b640e4 100644 --- a/examples/folding_benchmarks/nova.rs +++ b/examples/folding_benchmarks/nova.rs @@ -45,8 +45,8 @@ fn nova_benchmark(power: usize, prove_times: &mut Vec) { .commit::>(&pedersen_params, x) .unwrap(); - let poseidon_config = poseidon_canonical_config::(); - let mut transcript_p = PoseidonTranscript::::new(&poseidon_config); + let poseidon_config = poseidon_canonical_config::(); + let mut transcript_p: PoseidonSponge = PoseidonSponge::::new(&poseidon_config); let vector = vec![1; size]; // witness_1.E = vector.into_iter().map(|x| Fr::from(x)).collect(); @@ -78,7 +78,7 @@ fn nova_benchmark(power: usize, prove_times: &mut Vec) { let elapsed = start.elapsed(); println!("Time before starting folding {:?}", elapsed); - let result = NIFS::>::fold_instances( + let result = NIFS::>::fold_instances( r, &witness_1, &running_committed_instance, @@ -114,33 +114,28 @@ fn nova_benchmark(power: usize, prove_times: &mut Vec) { fn main() { - println!("starting"); - // let pows: Vec = (10..24).collect(); let pows: Vec = vec![16, 20]; - println!("{:?}", pows); + let iter = 10; + let mut prove_times: Vec = Vec::with_capacity(pows.len() * iter); + for i in 0..iter { + println!("starting {:}", i); - let mut prove_times: Vec = Vec::with_capacity(pows.len()); - for pow in &pows { - println!("{}", pow); - nova_benchmark(*pow, &mut prove_times); - } - println!("Powers {:?}", pows); + println!("{:?}", pows); - println!("Prove times {:?}", prove_times); - println!( - "| {0: <10} | {1: <10} |", - "2^pow", "prove time" - ); - println!("| {0: <10} | {1: <10} |", "2^pow", "prove time"); - for (pow, prove_time) in pows.iter().zip(prove_times.iter()) { - println!("| {0: <10} | {1:?} |", pow, prove_time); - } - if let Err(e) = write_to_csv(&pows, &prove_times, String::from("nova_prove_times.csv")) { + for pow in &pows { + println!("{}", pow); + nova_benchmark(*pow, &mut prove_times); + } + + println!("Powers {:?}", pows); + println!("Prove times {:?}", prove_times); + } + if let Err(e) = write_to_csv(&pows, &prove_times, format!("nova_prove_times.csv")) { eprintln!("Failed to write to CSV: {}", e); } else { println!("CSV file has been successfully written."); diff --git a/folding-schemes/src/folding/mova/mod.rs b/folding-schemes/src/folding/mova/mod.rs index 796cfa10..2e433636 100644 --- a/folding-schemes/src/folding/mova/mod.rs +++ b/folding-schemes/src/folding/mova/mod.rs @@ -22,7 +22,7 @@ use crate::transcript::{AbsorbNonNative, Transcript}; use crate::utils::mle::dense_vec_to_dense_mle; -pub mod homogenization; +pub mod pointvsline; pub mod nifs; pub mod traits; pub mod utils; diff --git a/folding-schemes/src/folding/mova/nifs.rs b/folding-schemes/src/folding/mova/nifs.rs index 39afc037..072a3ba3 100644 --- a/folding-schemes/src/folding/mova/nifs.rs +++ b/folding-schemes/src/folding/mova/nifs.rs @@ -7,8 +7,6 @@ use ark_std::{log2, Zero}; use std::marker::PhantomData; use std::time::Instant; -use super::homogenization::{HomogeneousEvaluationClaim, Homogenization}; - use super::{CommittedInstance, InstanceWitness, Witness}; use crate::arith::r1cs::R1CS; use crate::commitment::CommitmentScheme; @@ -19,11 +17,11 @@ use crate::utils::mle::dense_vec_to_dense_mle; use crate::utils::vec::{hadamard, mat_vec_mul, vec_add, vec_scalar_mul, vec_sub}; use crate::Error; +use crate::folding::mova::pointvsline::{PointVsLine, PointvsLineEvaluationClaim, PointVsLineProof}; /// Proof defines a multifolding proof -#[derive(Clone, Debug)] -pub struct Proof, H: Homogenization> { - pub hg_proof: H::Proof, +pub struct Proof { + pub hg_proof: PointVsLineProof, pub mleE1_prime: C::ScalarField, pub mleE2_prime: C::ScalarField, pub mleT: C::ScalarField, @@ -31,15 +29,14 @@ pub struct Proof, H: Homogenization /// Implements the Non-Interactive Folding Scheme described in section 4 of /// [Nova](https://eprint.iacr.org/2021/370.pdf) -pub struct NIFS, T: Transcript, H: Homogenization> { +pub struct NIFS, T: Transcript, > { _c: PhantomData, _cp: PhantomData, _ct: PhantomData, - _ch: PhantomData, } -impl, T: Transcript, H: Homogenization> -NIFS +impl, T: Transcript, > +NIFS where ::ScalarField: Absorb, { @@ -86,7 +83,7 @@ NIFS Ok(Witness:: { E, W, rW }) } - pub fn fold_homogenized_committed_instance( + pub fn fold_committed_instance( rho: C::ScalarField, ci1: &CommittedInstance, // U_i ci2: &CommittedInstance, // u_i @@ -115,27 +112,6 @@ NIFS }) } - /// NIFS.P is the consecutive combination of compute_cmT with fold_instances - - /// compute_cmT is part of the NIFS.P logic - pub fn compute_cmT( - cs_prover_params: &CS::ProverParams, - r1cs: &R1CS, - w1: &Witness, - ci1: &CommittedInstance, - w2: &Witness, - ci2: &CommittedInstance, - ) -> Result<(Vec, C), Error> { - let z1: Vec = [vec![ci1.u], ci1.x.to_vec(), w1.W.to_vec()].concat(); - let z2: Vec = [vec![ci2.u], ci2.x.to_vec(), w2.W.to_vec()].concat(); - - // compute cross terms - let T = Self::compute_T(r1cs, ci1.u, ci2.u, &z1, &z2)?; - // use r_T=0 since we don't need hiding property for cm(T) - let cmT = CS::commit(cs_prover_params, &T, &C::ScalarField::zero())?; - Ok((T, cmT)) - } - #[allow(clippy::type_complexity)] pub fn prove( _cs_prover_params: &CS::ProverParams, @@ -145,18 +121,18 @@ NIFS ci2: &CommittedInstance, w1: &Witness, w2: &Witness, - ) -> Result<(Proof, InstanceWitness), Error> { + ) -> Result<(Proof, InstanceWitness), Error> { let start = Instant::now(); let elapsed = start.elapsed(); println!("Time before homogenization point-vs-line {:?}", elapsed); let ( hg_proof, - HomogeneousEvaluationClaim { + PointvsLineEvaluationClaim { mleE1_prime, mleE2_prime, rE_prime, }, - ) = H::prove(transcript, ci1, ci2, w1, w2)?; + ) = PointVsLine::::prove(transcript, ci1, ci2, w1, w2)?; let elapsed = start.elapsed(); println!("Time after homogenization point-vs-line {:?}", elapsed); @@ -199,15 +175,15 @@ NIFS let elapsed = start.elapsed(); println!("Time before start folding {:?}", elapsed); - let temp = Ok(( - Proof { + let fold = Ok(( + Proof:: { hg_proof, mleE1_prime, mleE2_prime, mleT: mleT_evaluated, }, InstanceWitness { - ci: Self::fold_homogenized_committed_instance( + ci: Self::fold_committed_instance( rho, ci1, ci2, @@ -221,7 +197,7 @@ NIFS )); let elapsed = start.elapsed(); println!("Time after folding {:?}", elapsed); - temp + fold } /// verify implements NIFS.V logic described in [Nova](https://eprint.iacr.org/2021/370.pdf)'s @@ -230,9 +206,9 @@ NIFS transcript: &mut impl Transcript, ci1: &CommittedInstance, ci2: &CommittedInstance, - proof: &Proof, + proof: &Proof ) -> Result, Error> { - let rE_prime = H::verify( + let rE_prime = PointVsLine::::verify( transcript, ci1, ci2, @@ -249,7 +225,7 @@ NIFS transcript.absorb(&rho_scalar); let rho: C::ScalarField = transcript.get_challenge(); - NIFS::::fold_homogenized_committed_instance( + NIFS::::fold_committed_instance( rho, ci1, ci2, diff --git a/folding-schemes/src/folding/mova/homogenization.rs b/folding-schemes/src/folding/mova/pointvsline.rs similarity index 86% rename from folding-schemes/src/folding/mova/homogenization.rs rename to folding-schemes/src/folding/mova/pointvsline.rs index 2ce5fe95..eea147b7 100644 --- a/folding-schemes/src/folding/mova/homogenization.rs +++ b/folding-schemes/src/folding/mova/pointvsline.rs @@ -26,36 +26,13 @@ use crate::utils::sum_check::SumCheck; use crate::utils::virtual_polynomial::VPAuxInfo; use crate::Error; -pub struct HomogeneousEvaluationClaim { + +pub struct PointvsLineEvaluationClaim { pub mleE1_prime: C::ScalarField, pub mleE2_prime: C::ScalarField, pub rE_prime: Vec, } -pub trait Homogenization> { - type Proof: Clone + Debug; - - fn prove( - transcript: &mut impl Transcript, - ci1: &CommittedInstance, - ci2: &CommittedInstance, - w1: &Witness, - w2: &Witness, - ) -> Result<(Self::Proof, HomogeneousEvaluationClaim), Error>; - - fn verify( - transcript: &mut impl Transcript, - ci1: &CommittedInstance, - ci2: &CommittedInstance, - proof: &Self::Proof, - mleE1_prime: &C::ScalarField, - mleE2_prime: &C::ScalarField, - ) -> Result< - Vec, // rE=rE1'=rE2'. - Error, - >; -} - #[derive(Clone, Debug)] pub struct PointVsLineProof { pub h1: DensePolynomial, @@ -63,24 +40,22 @@ pub struct PointVsLineProof { } #[derive(Clone, Debug, Default)] -pub struct PointVsLineHomogenization> { +pub struct PointVsLine> { _phantom_C: std::marker::PhantomData, _phantom_T: std::marker::PhantomData, } -impl> Homogenization for PointVsLineHomogenization -where +impl> PointVsLine + where ::ScalarField: Absorb, { - type Proof = PointVsLineProof; - - fn prove( + pub fn prove( transcript: &mut impl Transcript, ci1: &CommittedInstance, ci2: &CommittedInstance, w1: &Witness, w2: &Witness, - ) -> Result<(Self::Proof, HomogeneousEvaluationClaim), Error> { + ) -> Result<(PointVsLineProof, PointvsLineEvaluationClaim), Error> { let vars = log2(w1.E.len()) as usize; let mleE1 = dense_vec_to_dense_mle(vars, &w1.E); @@ -112,8 +87,8 @@ where let rE_prime = compute_l(&ci1.rE, &ci2.rE, beta)?; Ok(( - Self::Proof { h1, h2 }, - HomogeneousEvaluationClaim { + PointVsLineProof { h1, h2 }, + PointvsLineEvaluationClaim { mleE1_prime, mleE2_prime, rE_prime, @@ -121,11 +96,11 @@ where )) } - fn verify( + pub fn verify( transcript: &mut impl Transcript, ci1: &CommittedInstance, ci2: &CommittedInstance, - proof: &Self::Proof, + proof: &PointVsLineProof, mleE1_prime: &::ScalarField, mleE2_prime: &::ScalarField, ) -> Result< @@ -159,6 +134,7 @@ where Ok(rE_prime) } + } // TODO: Test this.