diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0f4be64..cf71f18 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -74,7 +74,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: test - args: --release + args: --release -- --test-threads 1 check_no_std: name: Check no_std diff --git a/Cargo.toml b/Cargo.toml index 5b86e90..cc29a2f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,34 +20,35 @@ license = "MIT/Apache-2.0" edition = "2018" [dependencies] -ark-serialize = { version = "^0.2.0", default-features = false, features = [ "derive" ] } -ark-ff = { version = "^0.2.0", default-features = false } -ark-std = { version = "^0.2.0", default-features = false } -ark-poly = { version = "^0.2.0", default-features = false } -ark-relations = { version = "^0.2.0", default-features = false } -ark-poly-commit = { git = "https://github.com/arkworks-rs/poly-commit", branch = "constraints", default-features = false, features = [ "r1cs" ] } +ark-serialize = { version = "^0.3.0", default-features = false, features = [ "derive" ] } +ark-ff = { version = "^0.3.0", default-features = false } +ark-std = { version = "^0.3.0", default-features = false } +ark-poly = { version = "^0.3.0", default-features = false } +ark-relations = { version = "^0.3.0", default-features = false } +ark-poly-commit = { git = "https://github.com/arkworks-rs/poly-commit", branch = "vlopes11/constraints-rng", default-features = false, features = [ "r1cs" ] } +ark-sponge = { version = "^0.3.0", default-features = false } -rand_chacha = { version = "0.2.1", default-features = false } +rand_chacha = { version = "0.3.1", default-features = false } rayon = { version = "1", optional = true } digest = { version = "0.9" } derivative = { version = "2", features = ["use_core"] } -ark-ec = { version = "^0.2.0", default-features = false } -ark-crypto-primitives = { version = "^0.2.0", default-features = false, features = [ "r1cs" ] } -ark-r1cs-std = { version = "^0.2.0", default-features = false } -ark-nonnative-field = { version = "^0.2.0", default-features = false } -ark-snark = { version = "^0.2.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-crypto-primitives = { version = "^0.3.0", default-features = false, features = [ "r1cs" ] } +ark-r1cs-std = { version = "^0.3.0", default-features = false } +ark-nonnative-field = { version = "^0.3.0", default-features = false } +ark-snark = { version = "^0.3.0", default-features = false } hashbrown = "0.9" tracing = { version = "0.1", default-features = false, features = [ "attributes" ] } tracing-subscriber = { version = "0.2", default-features = false, optional = true } -[dev-dependencies] +#[dev-dependencies] blake2 = { version = "0.9", default-features = false } -ark-bls12-381 = { version = "^0.2.0", default-features = false, features = [ "curve" ] } -ark-mnt4-298 = { version = "^0.2.0", default-features = false, features = ["r1cs", "curve"] } -ark-mnt6-298 = { version = "^0.2.0", default-features = false, features = ["r1cs"] } -ark-mnt4-753 = { version = "^0.2.0", default-features = false, features = ["r1cs", "curve"] } -ark-mnt6-753 = { version = "^0.2.0", default-features = false, features = ["r1cs"] } +ark-bls12-381 = { version = "^0.3.0", default-features = false, features = [ "curve" ] } +ark-mnt4-298 = { version = "^0.3.0", default-features = false, features = ["r1cs", "curve"] } +ark-mnt6-298 = { version = "^0.3.0", default-features = false, features = ["r1cs"] } +ark-mnt4-753 = { version = "^0.3.0", default-features = false, features = ["r1cs", "curve"] } +ark-mnt6-753 = { version = "^0.3.0", default-features = false, features = ["r1cs"] } [profile.release] opt-level = 3 @@ -76,4 +77,4 @@ parallel = [ "std", "ark-ff/parallel", "ark-poly/parallel", "ark-std/parallel", name = "marlin-benches" path = "benches/bench.rs" harness = false -required-features = ["std"] \ No newline at end of file +required-features = ["std"] diff --git a/benches/bench.rs b/benches/bench.rs index 6230302..b32ebbd 100644 --- a/benches/bench.rs +++ b/benches/bench.rs @@ -4,9 +4,9 @@ use ark_bls12_381::{Bls12_381, Fq as BlsFq, Fr as BlsFr}; use ark_ff::PrimeField; -use ark_marlin::fiat_shamir::FiatShamirChaChaRng; use ark_marlin::Marlin; use ark_marlin::MarlinDefaultConfig; +use ark_marlin::{FiatShamirSpongeRng, PoseidonSpongeWithDefaultRate}; use ark_mnt4_298::{Fq as MNT4Fq, Fr as MNT4Fr, MNT4_298}; use ark_mnt4_753::{Fq as MNT4BigFq, Fr as MNT4BigFr, MNT4_753}; use ark_mnt6_298::{Fq as MNT6Fq, Fr as MNT6Fr, MNT6_298}; @@ -18,7 +18,6 @@ use ark_relations::{ r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}, }; use ark_std::{ops::Mul, UniformRand}; -use blake2::Blake2s; const NUM_PROVE_REPEATITIONS: usize = 10; const NUM_VERIFY_REPEATITIONS: usize = 50; @@ -80,16 +79,40 @@ macro_rules! marlin_prove_bench { let srs = Marlin::< $bench_field, $base_field, - MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, - FiatShamirChaChaRng<$bench_field, $base_field, Blake2s>, + MarlinKZG10< + $bench_pairing_engine, + DensePolynomial<$bench_field>, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, + >, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, MarlinDefaultConfig, >::universal_setup(65536, 65536, 65536, rng) .unwrap(); let (pk, _) = Marlin::< $bench_field, $base_field, - MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, - FiatShamirChaChaRng<$bench_field, $base_field, Blake2s>, + MarlinKZG10< + $bench_pairing_engine, + DensePolynomial<$bench_field>, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, + >, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, MarlinDefaultConfig, >::index(&srs, c) .unwrap(); @@ -100,8 +123,20 @@ macro_rules! marlin_prove_bench { let _ = Marlin::< $bench_field, $base_field, - MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, - FiatShamirChaChaRng<$bench_field, $base_field, Blake2s>, + MarlinKZG10< + $bench_pairing_engine, + DensePolynomial<$bench_field>, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, + >, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, MarlinDefaultConfig, >::prove(&pk, c.clone(), rng) .unwrap(); @@ -128,24 +163,60 @@ macro_rules! marlin_verify_bench { let srs = Marlin::< $bench_field, $base_field, - MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, - FiatShamirChaChaRng<$bench_field, $base_field, Blake2s>, + MarlinKZG10< + $bench_pairing_engine, + DensePolynomial<$bench_field>, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, + >, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, MarlinDefaultConfig, >::universal_setup(65536, 65536, 65536, rng) .unwrap(); let (pk, vk) = Marlin::< $bench_field, $base_field, - MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, - FiatShamirChaChaRng<$bench_field, $base_field, Blake2s>, + MarlinKZG10< + $bench_pairing_engine, + DensePolynomial<$bench_field>, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, + >, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, MarlinDefaultConfig, >::index(&srs, c) .unwrap(); let proof = Marlin::< $bench_field, $base_field, - MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, - FiatShamirChaChaRng<$bench_field, $base_field, Blake2s>, + MarlinKZG10< + $bench_pairing_engine, + DensePolynomial<$bench_field>, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, + >, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, MarlinDefaultConfig, >::prove(&pk, c.clone(), rng) .unwrap(); @@ -158,8 +229,20 @@ macro_rules! marlin_verify_bench { let _ = Marlin::< $bench_field, $base_field, - MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, - FiatShamirChaChaRng<$bench_field, $base_field, Blake2s>, + MarlinKZG10< + $bench_pairing_engine, + DensePolynomial<$bench_field>, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, + >, + FiatShamirSpongeRng< + $bench_field, + $base_field, + PoseidonSpongeWithDefaultRate<$base_field>, + >, MarlinDefaultConfig, >::verify(&vk, &vec![v], &proof) .unwrap(); diff --git a/src/ahp/prover.rs b/src/ahp/prover.rs index 17e1c2e..96f1b35 100644 --- a/src/ahp/prover.rs +++ b/src/ahp/prover.rs @@ -444,10 +444,9 @@ impl AHPForR1CS { } /// Output the second round message and the next state. - pub fn prover_second_round<'a, R: RngCore>( + pub fn prover_second_round<'a>( ver_message: &VerifierFirstMsg, mut state: ProverState<'a, F>, - _r: &mut R, hiding: bool, ) -> (ProverMsg, ProverSecondOracles, ProverState<'a, F>) { let round_time = start_timer!(|| "AHP::Prover::SecondRound"); @@ -598,10 +597,9 @@ impl AHPForR1CS { } /// Output the third round message and the next state. - pub fn prover_third_round<'a, R: RngCore>( + pub fn prover_third_round<'a>( ver_message: &VerifierSecondMsg, prover_state: ProverState<'a, F>, - _r: &mut R, ) -> Result<(ProverMsg, ProverThirdOracles), Error> { let round_time = start_timer!(|| "AHP::Prover::ThirdRound"); diff --git a/src/ahp/verifier.rs b/src/ahp/verifier.rs index b1dfce5..206b580 100644 --- a/src/ahp/verifier.rs +++ b/src/ahp/verifier.rs @@ -6,7 +6,6 @@ use crate::ahp::*; use crate::fiat_shamir::FiatShamirRng; use ark_ff::PrimeField; use ark_nonnative_field::params::OptimizationType; -use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; use ark_poly_commit::QuerySet; /// State of the AHP verifier @@ -56,7 +55,7 @@ impl AHPForR1CS { let domain_k = GeneralEvaluationDomain::new(index_info.num_non_zero) .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; - let elems = fs_rng.squeeze_nonnative_field_elements(4, OptimizationType::Weight); + let elems = fs_rng.squeeze_nonnative(4, OptimizationType::Weight); let alpha = elems[0]; let eta_a = elems[1]; let eta_b = elems[2]; @@ -86,7 +85,7 @@ impl AHPForR1CS { mut state: VerifierState, fs_rng: &mut R, ) -> (VerifierSecondMsg, VerifierState) { - let elems = fs_rng.squeeze_nonnative_field_elements(1, OptimizationType::Weight); + let elems = fs_rng.squeeze_nonnative(1, OptimizationType::Weight); let beta = elems[0]; assert!(!state.domain_h.evaluate_vanishing_polynomial(beta).is_zero()); @@ -101,7 +100,7 @@ impl AHPForR1CS { mut state: VerifierState, fs_rng: &mut R, ) -> VerifierState { - let elems = fs_rng.squeeze_nonnative_field_elements(1, OptimizationType::Weight); + let elems = fs_rng.squeeze_nonnative(1, OptimizationType::Weight); let gamma = elems[0]; state.gamma = Some(gamma); @@ -109,9 +108,8 @@ impl AHPForR1CS { } /// Output the query state and next round state. - pub fn verifier_query_set<'a, FSF: PrimeField, R: FiatShamirRng>( + pub fn verifier_query_set<'a, FSF: PrimeField>( state: VerifierState, - _: &'a mut R, with_vanishing: bool, ) -> (QuerySet, VerifierState) { let alpha = state.first_round_msg.unwrap().alpha; diff --git a/src/constraints/ahp.rs b/src/constraints/ahp.rs index 7d315ba..4e31ba5 100644 --- a/src/constraints/ahp.rs +++ b/src/constraints/ahp.rs @@ -57,14 +57,18 @@ pub struct VerifierThirdMsgVar { pub struct AHPForR1CS< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, + PR: FiatShamirRng, + R: FiatShamirRngVar, + PC: PolynomialCommitment, PR>, + PCG: PCCheckVar, PC, CF, PR>, > where PCG::VerifierKeyVar: ToConstraintFieldGadget, PCG::CommitmentVar: ToConstraintFieldGadget, { field: PhantomData, constraint_field: PhantomData, + fs_rng: PhantomData, + fs_var: PhantomData, polynomial_commitment: PhantomData, pc_check: PhantomData, } @@ -72,9 +76,11 @@ pub struct AHPForR1CS< impl< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, - > AHPForR1CS + PR: FiatShamirRng, + R: FiatShamirRngVar, + PC: PolynomialCommitment, PR>, + PCG: PCCheckVar, PC, CF, PR>, + > AHPForR1CS where PCG::VerifierKeyVar: ToConstraintFieldGadget, PCG::CommitmentVar: ToConstraintFieldGadget, @@ -82,11 +88,7 @@ where /// Output the first message and next round state. #[tracing::instrument(target = "r1cs", skip(fs_rng, comms))] #[allow(clippy::type_complexity)] - pub fn verifier_first_round< - CommitmentVar: ToConstraintFieldGadget, - PR: FiatShamirRng, - R: FiatShamirRngVar, - >( + pub fn verifier_first_round>( domain_h_size: u64, domain_k_size: u64, fs_rng: &mut R, @@ -99,16 +101,16 @@ where comms.iter().for_each(|comm| { elems.append(&mut comm.to_constraint_field().unwrap()); }); - fs_rng.absorb_native_field_elements(&elems)?; - fs_rng.absorb_nonnative_field_elements(&message, OptimizationType::Weight)?; + fs_rng.absorb(&elems)?; + fs_rng.absorb_nonnative(&message, OptimizationType::Weight)?; } - // obtain four elements from the sponge - let elems = fs_rng.squeeze_field_elements(4)?; - let alpha = elems[0].clone(); - let eta_a = elems[1].clone(); - let eta_b = elems[2].clone(); - let eta_c = elems[3].clone(); + // obtain four elements from the sponge_var + let elems = fs_rng.squeeze_nonnative_field_elements(4)?; + let alpha = elems.0[0].clone(); + let eta_a = elems.0[1].clone(); + let eta_b = elems.0[2].clone(); + let eta_c = elems.0[3].clone(); let msg = VerifierFirstMsgVar { alpha, @@ -130,11 +132,7 @@ where #[tracing::instrument(target = "r1cs", skip(state, fs_rng, comms))] #[allow(clippy::type_complexity)] - pub fn verifier_second_round< - CommitmentVar: ToConstraintFieldGadget, - PR: FiatShamirRng, - R: FiatShamirRngVar, - >( + pub fn verifier_second_round>( state: VerifierStateVar, fs_rng: &mut R, comms: &[CommitmentVar], @@ -153,13 +151,13 @@ where comms.iter().for_each(|comm| { elems.append(&mut comm.to_constraint_field().unwrap()); }); - fs_rng.absorb_native_field_elements(&elems)?; - fs_rng.absorb_nonnative_field_elements(&message, OptimizationType::Weight)?; + fs_rng.absorb(&elems)?; + fs_rng.absorb_nonnative(&message, OptimizationType::Weight)?; } - // obtain one element from the sponge - let elems = fs_rng.squeeze_field_elements(1)?; - let beta = elems[0].clone(); + // obtain one element from the sponge_var + let elems = fs_rng.squeeze_nonnative_field_elements(1)?; + let beta = elems.0[0].clone(); let msg = VerifierSecondMsgVar { beta }; @@ -175,11 +173,7 @@ where } #[tracing::instrument(target = "r1cs", skip(state, fs_rng, comms))] - pub fn verifier_third_round< - CommitmentVar: ToConstraintFieldGadget, - PR: FiatShamirRng, - R: FiatShamirRngVar, - >( + pub fn verifier_third_round>( state: VerifierStateVar, fs_rng: &mut R, comms: &[CommitmentVar], @@ -199,13 +193,13 @@ where comms.iter().for_each(|comm| { elems.append(&mut comm.to_constraint_field().unwrap()); }); - fs_rng.absorb_native_field_elements(&elems)?; - fs_rng.absorb_nonnative_field_elements(&message, OptimizationType::Weight)?; + fs_rng.absorb(&elems)?; + fs_rng.absorb_nonnative(&message, OptimizationType::Weight)?; } - // obtain one element from the sponge - let elems = fs_rng.squeeze_field_elements(1)?; - let gamma = elems[0].clone(); + // obtain one element from the sponge_var + let elems = fs_rng.squeeze_nonnative_field_elements(1)?; + let gamma = elems.0[0].clone(); let new_state = VerifierStateVar { domain_h_size, @@ -525,12 +519,9 @@ where #[tracing::instrument(target = "r1cs", skip(index_pvk, proof, state))] #[allow(clippy::type_complexity)] - pub fn verifier_comm_query_eval_set< - PR: FiatShamirRng, - R: FiatShamirRngVar, - >( + pub fn verifier_comm_query_eval_set( index_pvk: &PreparedIndexVerifierKeyVar, - proof: &ProofVar, + proof: &ProofVar, state: &VerifierStateVar, ) -> Result< ( diff --git a/src/constraints/data_structures.rs b/src/constraints/data_structures.rs index ef7d3ec..64c836e 100644 --- a/src/constraints/data_structures.rs +++ b/src/constraints/data_structures.rs @@ -17,16 +17,19 @@ use ark_r1cs_std::{ R1CSVar, ToBytesGadget, ToConstraintFieldGadget, }; use ark_relations::r1cs::{ConstraintSystemRef, Namespace}; +use ark_sponge::CryptographicSponge; use ark_std::borrow::Borrow; use hashbrown::HashMap; -pub type UniversalSRS = >>::UniversalParams; +pub type UniversalSRS = + , S>>::UniversalParams; pub struct IndexVerifierKeyVar< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, + S: CryptographicSponge, + PC: PolynomialCommitment, S>, + PCG: PCCheckVar, PC, CF, S>, > { pub cs: ConstraintSystemRef, pub domain_h_size: u64, @@ -40,9 +43,10 @@ pub struct IndexVerifierKeyVar< impl< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, - > IndexVerifierKeyVar + S: CryptographicSponge, + PC: PolynomialCommitment, S>, + PCG: PCCheckVar, PC, CF, S>, + > IndexVerifierKeyVar { fn cs(&self) -> ConstraintSystemRef { self.cs.clone() @@ -52,9 +56,10 @@ impl< impl< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, - > AllocVar, CF> for IndexVerifierKeyVar + S: CryptographicSponge, + PC: PolynomialCommitment, S>, + PCG: PCCheckVar, PC, CF, S>, + > AllocVar, CF> for IndexVerifierKeyVar { #[tracing::instrument(target = "r1cs", skip(cs, f))] fn new_variable( @@ -63,7 +68,7 @@ impl< mode: AllocationMode, ) -> Result where - T: Borrow>, + T: Borrow>, { let t = f()?; let ivk = t.borrow(); @@ -117,9 +122,10 @@ impl< impl< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, - > ToBytesGadget for IndexVerifierKeyVar + S: CryptographicSponge, + PC: PolynomialCommitment, S>, + PCG: PCCheckVar, PC, CF, S>, + > ToBytesGadget for IndexVerifierKeyVar { #[tracing::instrument(target = "r1cs", skip(self))] fn to_bytes(&self) -> Result>, SynthesisError> { @@ -140,9 +146,10 @@ impl< impl< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, - > Clone for IndexVerifierKeyVar + S: CryptographicSponge, + PC: PolynomialCommitment, S>, + PCG: PCCheckVar, PC, CF, S>, + > Clone for IndexVerifierKeyVar { fn clone(&self) -> Self { Self { @@ -160,9 +167,10 @@ impl< impl< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, - > IndexVerifierKeyVar + S: CryptographicSponge, + PC: PolynomialCommitment, S>, + PCG: PCCheckVar, PC, CF, S>, + > IndexVerifierKeyVar { pub fn iter(&self) -> impl Iterator { self.index_comms.iter() @@ -172,8 +180,8 @@ impl< pub struct PreparedIndexVerifierKeyVar< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, + PC: PolynomialCommitment, PR>, + PCG: PCCheckVar, PC, CF, PR>, PR: FiatShamirRng, R: FiatShamirRngVar, > { @@ -192,8 +200,8 @@ pub struct PreparedIndexVerifierKeyVar< impl< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, + PC: PolynomialCommitment, PR>, + PCG: PCCheckVar, PC, CF, PR>, PR: FiatShamirRng, R: FiatShamirRngVar, > Clone for PreparedIndexVerifierKeyVar @@ -217,23 +225,24 @@ impl PreparedIndexVerifierKeyVar where F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, + PC: PolynomialCommitment, PR>, + PCG: PCCheckVar, PC, CF, PR>, PR: FiatShamirRng, R: FiatShamirRngVar, PCG::VerifierKeyVar: ToConstraintFieldGadget, PCG::CommitmentVar: ToConstraintFieldGadget, { #[tracing::instrument(target = "r1cs", skip(vk))] - pub fn prepare(vk: &IndexVerifierKeyVar) -> Result { + pub fn prepare(vk: &IndexVerifierKeyVar) -> Result { let cs = vk.cs(); - let mut fs_rng_raw = PR::new(); - fs_rng_raw - .absorb_bytes(&to_bytes![&MarlinVerifierVar::::PROTOCOL_NAME].unwrap()); + let mut fs_rng_raw = PR::default(); + fs_rng_raw.absorb_bytes( + &to_bytes![&MarlinVerifierVar::::PROTOCOL_NAME].unwrap(), + ); let index_vk_hash = { - let mut vk_hash_rng = PR::new(); + let mut vk_hash_rng = PR::default(); let mut vk_elems = Vec::::new(); vk.index_comms.iter().for_each(|index_comm| { @@ -246,16 +255,17 @@ where .collect(), ); }); - vk_hash_rng.absorb_native_field_elements(&vk_elems); + vk_hash_rng.absorb_native(&vk_elems); + FpVar::::new_witness(ark_relations::ns!(cs, "alloc#vk_hash"), || { - Ok(vk_hash_rng.squeeze_native_field_elements(1)[0]) + Ok(vk_hash_rng.squeeze_native(1)[0]) }) .unwrap() }; let fs_rng = { let mut fs_rng = R::constant(cs, &fs_rng_raw); - fs_rng.absorb_native_field_elements(&[index_vk_hash])?; + fs_rng.absorb_native(&[index_vk_hash])?; fs_rng }; @@ -280,13 +290,13 @@ where } } -impl AllocVar, CF> +impl AllocVar, CF> for PreparedIndexVerifierKeyVar where F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, + PC: PolynomialCommitment, PR>, + PCG: PCCheckVar, PC, CF, PR>, PR: FiatShamirRng, R: FiatShamirRngVar, PC::VerifierKey: ToConstraintField, @@ -301,7 +311,7 @@ where mode: AllocationMode, ) -> Result where - T: Borrow>, + T: Borrow>, { let t = f()?; let obj = t.borrow(); @@ -330,24 +340,26 @@ where }); let index_vk_hash = { - let mut vk_hash_rng = PR::new(); + let mut vk_hash_rng = PR::default(); + + vk_hash_rng.absorb_native(&vk_elems); - vk_hash_rng.absorb_native_field_elements(&vk_elems); FpVar::::new_variable( ark_relations::ns!(cs, "alloc#vk_hash"), - || Ok(vk_hash_rng.squeeze_native_field_elements(1)[0]), + || Ok(vk_hash_rng.squeeze_native(1)[0]), mode, ) .unwrap() }; - let mut fs_rng_raw = PR::new(); - fs_rng_raw - .absorb_bytes(&to_bytes![&MarlinVerifierVar::::PROTOCOL_NAME].unwrap()); + let mut fs_rng_raw = PR::default(); + fs_rng_raw.absorb_bytes( + &to_bytes![&MarlinVerifierVar::::PROTOCOL_NAME].unwrap(), + ); let fs_rng = { let mut fs_rng = R::constant(cs.clone(), &fs_rng_raw); - fs_rng.absorb_native_field_elements(&[index_vk_hash])?; + fs_rng.absorb_native(&[index_vk_hash])?; fs_rng }; @@ -379,8 +391,9 @@ where pub struct ProofVar< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, + S: CryptographicSponge, + PC: PolynomialCommitment, S>, + PCG: PCCheckVar, PC, CF, S>, > { pub cs: ConstraintSystemRef, pub commitments: Vec>, @@ -392,9 +405,10 @@ pub struct ProofVar< impl< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, - > ProofVar + S: CryptographicSponge, + PC: PolynomialCommitment, S>, + PCG: PCCheckVar, PC, CF, S>, + > ProofVar { pub fn new( cs: ConstraintSystemRef, @@ -413,12 +427,13 @@ impl< } } -impl AllocVar, CF> for ProofVar +impl AllocVar, CF> for ProofVar where F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, + S: CryptographicSponge, + PC: PolynomialCommitment, S>, + PCG: PCCheckVar, PC, CF, S>, PC::VerifierKey: ToConstraintField, PC::Commitment: ToConstraintField, PCG::VerifierKeyVar: ToConstraintFieldGadget, @@ -431,7 +446,7 @@ where mode: AllocationMode, ) -> Result where - T: Borrow>, + T: Borrow>, { let ns = cs.into(); let cs = ns.cs(); @@ -534,9 +549,10 @@ where impl< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, - > Clone for ProofVar + S: CryptographicSponge, + PC: PolynomialCommitment, S>, + PCG: PCCheckVar, PC, CF, S>, + > Clone for ProofVar { fn clone(&self) -> Self { ProofVar { diff --git a/src/constraints/snark.rs b/src/constraints/snark.rs index 4e9a428..88fdd60 100644 --- a/src/constraints/snark.rs +++ b/src/constraints/snark.rs @@ -49,7 +49,7 @@ impl Debug for MarlinBound { pub struct MarlinSNARK< F: PrimeField, FSF: PrimeField, - PC: PolynomialCommitment>, + PC: PolynomialCommitment, FS>, FS: FiatShamirRng, MC: MarlinConfig, > { @@ -64,16 +64,16 @@ impl SNARK for MarlinSNARK where F: PrimeField, FSF: PrimeField, - PC: PolynomialCommitment>, + PC: PolynomialCommitment, FS>, FS: FiatShamirRng, MC: MarlinConfig, PC::VerifierKey: ToConstraintField, PC::Commitment: ToConstraintField, { - type ProvingKey = IndexProverKey; - type VerifyingKey = IndexVerifierKey; - type ProcessedVerifyingKey = PreparedIndexVerifierKey; - type Proof = Proof; + type ProvingKey = IndexProverKey; + type VerifyingKey = IndexVerifierKey; + type ProcessedVerifyingKey = PreparedIndexVerifierKey; + type Proof = Proof; type Error = Box; fn circuit_specific_setup, R: RngCore + CryptoRng>( @@ -122,14 +122,14 @@ impl UniversalSetupSNARK for MarlinSNARK>, + PC: PolynomialCommitment, FS>, FS: FiatShamirRng, MC: MarlinConfig, PC::VerifierKey: ToConstraintField, PC::Commitment: ToConstraintField, { type ComputationBound = MarlinBound; - type PublicParameters = (MarlinBound, UniversalSRS); + type PublicParameters = (MarlinBound, UniversalSRS); fn universal_setup( bound: &Self::ComputationBound, @@ -171,10 +171,10 @@ pub struct MarlinSNARKGadget where F: PrimeField, FSF: PrimeField, - PC: PolynomialCommitment>, + PC: PolynomialCommitment, FS>, FS: FiatShamirRng, MC: MarlinConfig, - PCG: PCCheckVar, PC, FSF>, + PCG: PCCheckVar, PC, FSF, FS>, FSG: FiatShamirRngVar, { pub f_phantom: PhantomData, @@ -191,10 +191,10 @@ impl SNARKGadget>, + PC: PolynomialCommitment, FS>, FS: FiatShamirRng, MC: MarlinConfig, - PCG: PCCheckVar, PC, FSF>, + PCG: PCCheckVar, PC, FSF, FS>, FSG: FiatShamirRngVar, PC::VerifierKey: ToConstraintField, PC::Commitment: ToConstraintField, @@ -202,9 +202,9 @@ where PCG::CommitmentVar: ToConstraintFieldGadget, { type ProcessedVerifyingKeyVar = PreparedIndexVerifierKeyVar; - type VerifyingKeyVar = IndexVerifierKeyVar; + type VerifyingKeyVar = IndexVerifierKeyVar; type InputVar = NonNativeFieldInputVar; - type ProofVar = ProofVar; + type ProofVar = ProofVar; type VerifierSize = usize; @@ -221,8 +221,12 @@ where proof: &Self::ProofVar, ) -> Result, SynthesisError> { Ok( - MarlinVerifierGadget::::prepared_verify(&circuit_pvk, &x.val, proof) - .unwrap(), + MarlinVerifierGadget::::prepared_verify( + &circuit_pvk, + &x.val, + proof, + ) + .unwrap(), ) } @@ -233,7 +237,7 @@ where proof: &Self::ProofVar, ) -> Result, SynthesisError> { Ok( - MarlinVerifierGadget::::verify::(circuit_vk, &x.val, proof) + MarlinVerifierGadget::::verify(circuit_vk, &x.val, proof) .unwrap(), ) } @@ -303,10 +307,10 @@ impl where F: PrimeField, FSF: PrimeField, - PC: PolynomialCommitment>, + PC: PolynomialCommitment, FS>, FS: FiatShamirRng, MC: MarlinConfig, - PCG: PCCheckVar, PC, FSF>, + PCG: PCCheckVar, PC, FSF, FS>, FSG: FiatShamirRngVar, PC::VerifierKey: ToConstraintField, PC::Commitment: ToConstraintField, @@ -372,7 +376,7 @@ impl ark_std::error::Error for MarlinError {} #[cfg(test)] mod test { - use crate::MarlinConfig; + use crate::{MarlinConfig, PoseidonSpongeVarWithDefaultRate, PoseidonSpongeWithDefaultRate}; #[derive(Clone)] struct TestMarlinConfig; impl MarlinConfig for TestMarlinConfig { @@ -391,10 +395,8 @@ mod test { } use crate::constraints::snark::{MarlinSNARK, MarlinSNARKGadget}; - use crate::fiat_shamir::constraints::FiatShamirAlgebraicSpongeRngVar; - use crate::fiat_shamir::poseidon::constraints::PoseidonSpongeVar; - use crate::fiat_shamir::poseidon::PoseidonSponge; - use crate::fiat_shamir::FiatShamirAlgebraicSpongeRng; + use crate::fiat_shamir::constraints::FiatShamirSpongeRngVar; + use crate::fiat_shamir::FiatShamirSpongeRng; use ark_crypto_primitives::snark::{SNARKGadget, SNARK}; use ark_ec::{CurveCycle, PairingEngine, PairingFriendlyCycle}; use ark_ff::{Field, UniformRand}; @@ -409,7 +411,6 @@ mod test { r1cs::{ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef, SynthesisError}, }; use core::ops::MulAssign; - #[derive(Copy, Clone)] struct Circuit { a: Option, @@ -449,22 +450,19 @@ mod test { type TestSNARK = MarlinSNARK< MNT4Fr, MNT4Fq, - MarlinKZG10>, + MarlinKZG10, FS4>, FS4, TestMarlinConfig, >; - type FS4 = FiatShamirAlgebraicSpongeRng>; - type PCGadget4 = MarlinKZG10Gadget, MNT4PairingVar>; - type FSG4 = FiatShamirAlgebraicSpongeRngVar< - MNT4Fr, - MNT4Fq, - PoseidonSponge, - PoseidonSpongeVar, - >; + type FS4 = FiatShamirSpongeRng>; + type PCGadget4 = MarlinKZG10Gadget, MNT4PairingVar, FS4>; + type FSG4 = + FiatShamirSpongeRngVar>; + type TestSNARKGadget = MarlinSNARKGadget< MNT4Fr, MNT4Fq, - MarlinKZG10>, + MarlinKZG10, FS4>, FS4, TestMarlinConfig, PCGadget4, @@ -475,6 +473,7 @@ mod test { use ark_relations::r1cs::OptimizationGoal; #[test] + #[ignore] fn marlin_snark_test() { let mut rng = ark_std::test_rng(); let a = MNT4Fr::rand(&mut rng); diff --git a/src/constraints/verifier.rs b/src/constraints/verifier.rs index 4a50414..c308443 100644 --- a/src/constraints/verifier.rs +++ b/src/constraints/verifier.rs @@ -16,21 +16,27 @@ use ark_relations::ns; pub struct Marlin< F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, + PR: FiatShamirRng, + R: FiatShamirRngVar, + PC: PolynomialCommitment, PR>, + PCG: PCCheckVar, PC, CF, PR>, >( PhantomData, PhantomData, + PhantomData, + PhantomData, PhantomData, PhantomData, ); -impl Marlin +impl Marlin where F: PrimeField, CF: PrimeField, - PC: PolynomialCommitment>, - PCG: PCCheckVar, PC, CF>, + PR: FiatShamirRng, + R: FiatShamirRngVar, + PC: PolynomialCommitment, PR>, + PCG: PCCheckVar, PC, CF, PR>, PCG::VerifierKeyVar: ToConstraintFieldGadget, PCG::CommitmentVar: ToConstraintFieldGadget, { @@ -38,10 +44,10 @@ where /// verify with an established hashchain initial state #[tracing::instrument(target = "r1cs", skip(index_pvk, proof))] - pub fn prepared_verify, R: FiatShamirRngVar>( + pub fn prepared_verify( index_pvk: &PreparedIndexVerifierKeyVar, public_input: &[NonNativeFieldVar], - proof: &ProofVar, + proof: &ProofVar, ) -> Result, Error> { let cs = index_pvk .cs @@ -53,9 +59,9 @@ where eprintln!("before AHP: constraints: {}", cs.num_constraints()); - fs_rng.absorb_nonnative_field_elements(&public_input, OptimizationType::Weight)?; + fs_rng.absorb_nonnative(&public_input, OptimizationType::Weight)?; - let (_, verifier_state) = AHPForR1CS::::verifier_first_round( + let (_, verifier_state) = AHPForR1CS::::verifier_first_round( index_pvk.domain_h_size, index_pvk.domain_k_size, &mut fs_rng, @@ -63,14 +69,14 @@ where &proof.prover_messages[0].field_elements, )?; - let (_, verifier_state) = AHPForR1CS::::verifier_second_round( + let (_, verifier_state) = AHPForR1CS::::verifier_second_round( verifier_state, &mut fs_rng, &proof.commitments[1], &proof.prover_messages[1].field_elements, )?; - let verifier_state = AHPForR1CS::::verifier_third_round( + let verifier_state = AHPForR1CS::::verifier_third_round( verifier_state, &mut fs_rng, &proof.commitments[2], @@ -82,7 +88,7 @@ where formatted_public_input.push(elem); } - let lc = AHPForR1CS::::verifier_decision( + let lc = AHPForR1CS::::verifier_decision( ns!(cs, "ahp").cs(), &formatted_public_input, &proof.evaluations, @@ -90,8 +96,8 @@ where &index_pvk.domain_k_size_gadget, )?; - let (num_opening_challenges, num_batching_rands, comm, query_set, evaluations) = - AHPForR1CS::::verifier_comm_query_eval_set( + let (num_opening_challenges, _num_batching_rands, comm, query_set, evaluations) = + AHPForR1CS::::verifier_comm_query_eval_set( &index_pvk, &proof, &verifier_state, @@ -110,12 +116,13 @@ where } } - fs_rng.absorb_nonnative_field_elements(&evals_vec, OptimizationType::Weight)?; + fs_rng.absorb_nonnative(&evals_vec, OptimizationType::Weight)?; let (opening_challenges, opening_challenges_bits) = - fs_rng.squeeze_128_bits_field_elements_and_bits(num_opening_challenges)?; + fs_rng.squeeze_128_bits_nonnative_and_bits(num_opening_challenges)?; + let (batching_rands, batching_rands_bits) = - fs_rng.squeeze_128_bits_field_elements_and_bits(num_batching_rands)?; + fs_rng.squeeze_128_bits_nonnative_and_bits(num_opening_challenges)?; eprintln!("before PC checks: constraints: {}", cs.num_constraints()); @@ -139,10 +146,10 @@ where } #[tracing::instrument(target = "r1cs", skip(index_vk, proof))] - pub fn verify, R: FiatShamirRngVar>( - index_vk: &IndexVerifierKeyVar, + pub fn verify( + index_vk: &IndexVerifierKeyVar, public_input: &[NonNativeFieldVar], - proof: &ProofVar, + proof: &ProofVar, ) -> Result, Error> { let index_pvk = PreparedIndexVerifierKeyVar::::prepare(&index_vk)?; Self::prepared_verify(&index_pvk, public_input, proof) diff --git a/src/constraints/verifier_test.rs b/src/constraints/verifier_test.rs index 6589042..a372d93 100644 --- a/src/constraints/verifier_test.rs +++ b/src/constraints/verifier_test.rs @@ -6,12 +6,12 @@ mod tests { data_structures::{IndexVerifierKeyVar, ProofVar, ProverMsgVar}, verifier::Marlin, }, - fiat_shamir::{ - constraints::FiatShamirAlgebraicSpongeRngVar, poseidon::constraints::PoseidonSpongeVar, - poseidon::PoseidonSponge, FiatShamirAlgebraicSpongeRng, - }, Marlin as MarlinNative, MarlinRecursiveConfig, Proof, }; + use crate::{ + FiatShamirSpongeRng, FiatShamirSpongeRngVar, PoseidonSpongeVarWithDefaultRate, + PoseidonSpongeWithDefaultRate, + }; use ark_ec::{CurveCycle, PairingEngine, PairingFriendlyCycle}; use ark_ff::{Field, UniformRand}; use ark_mnt4_298::{constraints::PairingVar as MNT4PairingVar, Fq, Fr, MNT4_298}; @@ -41,11 +41,11 @@ mod tests { type Engine2 = MNT4_298; } - type FS = FiatShamirAlgebraicSpongeRng>; - type MultiPC = MarlinKZG10>; + type FS = FiatShamirSpongeRng>; + type MultiPC = MarlinKZG10, FS>; type MarlinNativeInst = MarlinNative; - type MultiPCVar = MarlinKZG10Gadget, MNT4PairingVar>; + type MultiPCVar = MarlinKZG10Gadget, MNT4PairingVar, FS>; #[derive(Copy, Clone)] struct Circuit { @@ -83,6 +83,7 @@ mod tests { } #[test] + #[ignore] fn verifier_test() { let rng = &mut ark_std::test_rng(); @@ -121,7 +122,7 @@ mod tests { cs.set_optimization_goal(OptimizationGoal::Weight); // BEGIN: ivk to ivk_gadget - let ivk_gadget: IndexVerifierKeyVar = + let ivk_gadget: IndexVerifierKeyVar = IndexVerifierKeyVar::new_witness(ns!(cs, "alloc#index vk"), || Ok(index_vk)).unwrap(); // END: ivk to ivk_gadget @@ -187,7 +188,7 @@ mod tests { .collect(); let pc_batch_proof = - BatchLCProofVar::, MNT4PairingVar>::new_witness( + BatchLCProofVar::, MNT4PairingVar, FS>::new_witness( ns!(cs, "alloc#proof"), || Ok(pc_proof), ) @@ -212,7 +213,7 @@ mod tests { evaluation_gadgets.insert(s.to_string(), (*eval).clone()); } - let proof_gadget: ProofVar = ProofVar { + let proof_gadget: ProofVar = ProofVar { cs: cs.clone(), commitments: commitment_gadgets, evaluations: evaluation_gadgets, @@ -221,10 +222,14 @@ mod tests { }; // END: proof to proof_gadget - Marlin::::verify::< - FiatShamirAlgebraicSpongeRng>, - FiatShamirAlgebraicSpongeRngVar, PoseidonSpongeVar>, - >(&ivk_gadget, &public_input_gadget, &proof_gadget) + Marlin::< + Fr, + Fq, + FS, + FiatShamirSpongeRngVar>, + MultiPC, + MultiPCVar, + >::verify(&ivk_gadget, &public_input_gadget, &proof_gadget) .unwrap() .enforce_equal(&Boolean::Constant(true)) .unwrap(); diff --git a/src/data_structures.rs b/src/data_structures.rs index eded1b5..1d79109 100644 --- a/src/data_structures.rs +++ b/src/data_structures.rs @@ -10,6 +10,7 @@ use ark_poly_commit::{ }; use ark_relations::r1cs::SynthesisError; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_sponge::CryptographicSponge; use ark_std::{ format, io::{Read, Write}, @@ -20,7 +21,8 @@ use ark_std::{ /* ************************************************************************* */ /// The universal public parameters for the argument system. -pub type UniversalSRS = >>::UniversalParams; +pub type UniversalSRS = + , S>>::UniversalParams; /* ************************************************************************* */ /* ************************************************************************* */ @@ -28,7 +30,11 @@ pub type UniversalSRS = /// Verification key for a specific index (i.e., R1CS matrices). #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct IndexVerifierKey>> { +pub struct IndexVerifierKey< + F: PrimeField, + S: CryptographicSponge, + PC: PolynomialCommitment, S>, +> { /// Stores information about the size of the index, as well as its field of /// definition. pub index_info: IndexInfo, @@ -38,8 +44,8 @@ pub struct IndexVerifierKey>> ark_ff::ToBytes - for IndexVerifierKey +impl, S>> + ark_ff::ToBytes for IndexVerifierKey { fn write(&self, mut w: W) -> ark_std::io::Result<()> { self.index_info.write(&mut w)?; @@ -47,8 +53,8 @@ impl>> ark_ff::ToB } } -impl>> Clone - for IndexVerifierKey +impl, S>> + Clone for IndexVerifierKey { fn clone(&self) -> Self { Self { @@ -59,7 +65,9 @@ impl>> Clone } } -impl>> IndexVerifierKey { +impl, S>> + IndexVerifierKey +{ /// Iterate over the commitments to indexed polynomials in `self`. pub fn iter(&self) -> impl Iterator { self.index_comms.iter() @@ -71,8 +79,11 @@ impl>> IndexVerifi /* ************************************************************************* */ /// Verification key, prepared (preprocessed) for use in pairings. -pub struct PreparedIndexVerifierKey>> -{ +pub struct PreparedIndexVerifierKey< + F: PrimeField, + S: CryptographicSponge, + PC: PolynomialCommitment, S>, +> { /// Size of the variable domain. pub domain_h_size: u64, /// Size of the matrix domain. @@ -84,13 +95,14 @@ pub struct PreparedIndexVerifierKey, + pub orig_vk: IndexVerifierKey, } -impl Clone for PreparedIndexVerifierKey +impl Clone for PreparedIndexVerifierKey where F: PrimeField, - PC: PolynomialCommitment>, + S: CryptographicSponge, + PC: PolynomialCommitment, S>, { fn clone(&self) -> Self { PreparedIndexVerifierKey { @@ -103,12 +115,13 @@ where } } -impl PreparedIndexVerifierKey +impl PreparedIndexVerifierKey where F: PrimeField, - PC: PolynomialCommitment>, + S: CryptographicSponge, + PC: PolynomialCommitment, S>, { - pub fn prepare(vk: &IndexVerifierKey) -> Self { + pub fn prepare(vk: &IndexVerifierKey) -> Self { let mut prepared_index_comms = Vec::::new(); for (_, comm) in vk.index_comms.iter().enumerate() { prepared_index_comms.push(PC::PreparedCommitment::prepare(comm)); @@ -142,9 +155,13 @@ where /// Proving key for a specific index (i.e., R1CS matrices). #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct IndexProverKey>> { +pub struct IndexProverKey< + F: PrimeField, + S: CryptographicSponge, + PC: PolynomialCommitment, S>, +> { /// The index verifier key. - pub index_vk: IndexVerifierKey, + pub index_vk: IndexVerifierKey, /// The randomness for the index polynomial commitments. pub index_comm_rands: Vec, /// The index itself. @@ -153,7 +170,8 @@ pub struct IndexProverKey>> Clone for IndexProverKey +impl, S>> + Clone for IndexProverKey where PC::Commitment: Clone, { @@ -173,7 +191,11 @@ where /// A zkSNARK proof. #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct Proof>> { +pub struct Proof< + F: PrimeField, + S: CryptographicSponge, + PC: PolynomialCommitment, S>, +> { /// Commitments to the polynomials produced by the AHP prover. pub commitments: Vec>, /// Evaluations of these polynomials. @@ -181,16 +203,18 @@ pub struct Proof>> /// The field elements sent by the prover. pub prover_messages: Vec>, /// An evaluation proof from the polynomial commitment. - pub pc_proof: BatchLCProof, PC>, + pub pc_proof: BatchLCProof, } -impl>> Proof { +impl, S>> + Proof +{ /// Construct a new proof. pub fn new( commitments: Vec>, evaluations: Vec, prover_messages: Vec>, - pc_proof: BatchLCProof, PC>, + pc_proof: BatchLCProof, ) -> Self { Self { commitments, @@ -202,7 +226,7 @@ impl>> Proof>> Proof = self.pc_proof.proof.clone().into(); - let num_proofs = proofs.len(); - for proof in &proofs { - size_bytes_proofs += proof.size_in_bytes(); - } + let num_proofs = 1; + size_bytes_proofs += self.pc_proof.serialized_size(); let num_evals = self.evaluations.len(); let evals_size_in_bytes = num_evals * size_of_fe_in_bytes; @@ -270,7 +291,9 @@ impl>> Proof>> Clone for Proof { +impl, S>> + Clone for Proof +{ fn clone(&self) -> Self { Proof { commitments: self.commitments.clone(), diff --git a/src/fiat_shamir/constraints.rs b/src/fiat_shamir/constraints.rs index 4b60da8..994de0d 100644 --- a/src/fiat_shamir/constraints.rs +++ b/src/fiat_shamir/constraints.rs @@ -1,5 +1,5 @@ -use crate::fiat_shamir::{AlgebraicSponge, FiatShamirAlgebraicSpongeRng, FiatShamirRng}; -use crate::{overhead, Vec}; +use crate::sponge::CryptographicSpongeVarNonNative; +use crate::{overhead, CryptographicSpongeParameters, Vec}; use ark_ff::PrimeField; use ark_nonnative_field::params::{get_params, OptimizationType}; use ark_nonnative_field::{AllocatedNonNativeFieldVar, NonNativeFieldVar}; @@ -15,52 +15,41 @@ use ark_relations::lc; use ark_relations::r1cs::{ ConstraintSystemRef, LinearCombination, OptimizationGoal, SynthesisError, }; +use ark_sponge::constraints::{AbsorbGadget, CryptographicSpongeVar}; +use ark_sponge::CryptographicSponge; use core::marker::PhantomData; /// Vars for a RNG for use in a Fiat-Shamir transform. -pub trait FiatShamirRngVar>: - Clone +pub trait FiatShamirRngVar: + From> + CryptographicSpongeVar { - /// Create a new RNG. - fn new(cs: ConstraintSystemRef) -> Self; - // Instantiate from a plaintext fs_rng. - fn constant(cs: ConstraintSystemRef, pfs: &PFS) -> Self; + fn constant(cs: ConstraintSystemRef, pfs: &S) -> Self; /// Take in field elements. - fn absorb_nonnative_field_elements( + fn absorb_nonnative( &mut self, elems: &[NonNativeFieldVar], ty: OptimizationType, ) -> Result<(), SynthesisError>; /// Take in field elements. - fn absorb_native_field_elements(&mut self, elems: &[FpVar]) -> Result<(), SynthesisError>; + fn absorb_native(&mut self, elems: &[FpVar]) -> Result<(), SynthesisError>; /// Take in bytes. fn absorb_bytes(&mut self, elems: &[UInt8]) -> Result<(), SynthesisError>; /// Output field elements. - fn squeeze_native_field_elements( - &mut self, - num: usize, - ) -> Result>, SynthesisError>; + fn squeeze_native(&mut self, num: usize) -> Result>, SynthesisError>; /// Output field elements. - fn squeeze_field_elements( + fn squeeze_nonnative( &mut self, num: usize, ) -> Result>, SynthesisError>; - /// Output field elements and the corresponding bits (this can reduce repeated computation). - #[allow(clippy::type_complexity)] - fn squeeze_field_elements_and_bits( - &mut self, - num: usize, - ) -> Result<(Vec>, Vec>>), SynthesisError>; - /// Output field elements with only 128 bits. - fn squeeze_128_bits_field_elements( + fn squeeze_128_bits_nonnative( &mut self, num: usize, ) -> Result>, SynthesisError>; @@ -68,37 +57,19 @@ pub trait FiatShamirRngVar Result<(Vec>, Vec>>), SynthesisError>; } -/// Trait for an algebraic sponge such as Poseidon. -pub trait AlgebraicSpongeVar>: Clone { - /// Create the new sponge. - fn new(cs: ConstraintSystemRef) -> Self; - - /// Instantiate from a plaintext sponge. - fn constant(cs: ConstraintSystemRef, ps: &PS) -> Self; - - /// Obtain the constraint system. - fn cs(&self) -> ConstraintSystemRef; - - /// Take in field elements. - fn absorb(&mut self, elems: &[FpVar]) -> Result<(), SynthesisError>; - - /// Output field elements. - fn squeeze(&mut self, num: usize) -> Result>, SynthesisError>; -} - /// Building the Fiat-Shamir sponge's gadget from any algebraic sponge's gadget. #[derive(Clone)] -pub struct FiatShamirAlgebraicSpongeRngVar< +pub struct FiatShamirSpongeRngVar< F: PrimeField, CF: PrimeField, - PS: AlgebraicSponge, - S: AlgebraicSpongeVar, + PS: CryptographicSponge, + S: CryptographicSpongeVar, > { pub cs: ConstraintSystemRef, pub s: S, @@ -108,8 +79,8 @@ pub struct FiatShamirAlgebraicSpongeRngVar< ps_phantom: PhantomData, } -impl, S: AlgebraicSpongeVar> - FiatShamirAlgebraicSpongeRngVar +impl> + FiatShamirSpongeRngVar { /// Compress every two elements if possible. Provides a vector of (limb, num_of_additions), /// both of which are CF. @@ -223,7 +194,7 @@ impl, S: AlgebraicSpongeV let bits_per_element = CF::size_in_bits() - 1; let num_elements = (num_bits + bits_per_element - 1) / bits_per_element; - let src_elements = sponge.squeeze(num_elements)?; + let src_elements = sponge.squeeze_field_elements(num_elements)?; let mut dest_bits = Vec::>::new(); for elem in src_elements.iter() { @@ -335,27 +306,35 @@ impl, S: AlgebraicSpongeV } } -impl, S: AlgebraicSpongeVar> - FiatShamirRngVar> - for FiatShamirAlgebraicSpongeRngVar +impl< + F: PrimeField, + CF: PrimeField, + PS: CryptographicSponge, + S: CryptographicSpongeVarNonNative, + > From> for FiatShamirSpongeRngVar +where + >::Parameters: CryptographicSpongeParameters, { - fn new(cs: ConstraintSystemRef) -> Self { + fn from(cs: ConstraintSystemRef) -> Self { Self { cs: cs.clone(), - s: S::new(cs), + s: S::with_default_rate(cs), f_phantom: PhantomData, cf_phantom: PhantomData, ps_phantom: PhantomData, } } +} + +impl> + CryptographicSpongeVar for FiatShamirSpongeRngVar +{ + type Parameters = S::Parameters; - fn constant( - cs: ConstraintSystemRef, - pfs: &FiatShamirAlgebraicSpongeRng, - ) -> Self { + fn new(cs: ConstraintSystemRef, params: &Self::Parameters) -> Self { Self { cs: cs.clone(), - s: S::constant(cs, &pfs.s.clone()), + s: S::new(cs, params), f_phantom: PhantomData, cf_phantom: PhantomData, ps_phantom: PhantomData, @@ -363,7 +342,48 @@ impl, S: AlgebraicSpongeV } #[tracing::instrument(target = "r1cs", skip(self))] - fn absorb_nonnative_field_elements( + fn cs(&self) -> ConstraintSystemRef { + self.cs.clone() + } + + fn absorb(&mut self, input: &impl AbsorbGadget) -> Result<(), SynthesisError> { + self.s.absorb(input) + } + + #[tracing::instrument(target = "r1cs", skip(self))] + fn squeeze_bytes(&mut self, num_bytes: usize) -> Result>, SynthesisError> { + self.s.squeeze_bytes(num_bytes) + } + + #[tracing::instrument(target = "r1cs", skip(self))] + fn squeeze_bits(&mut self, num_bits: usize) -> Result>, SynthesisError> { + self.s.squeeze_bits(num_bits) + } + + #[tracing::instrument(target = "r1cs", skip(self))] + fn squeeze_field_elements( + &mut self, + num_elements: usize, + ) -> Result>, SynthesisError> { + self.s.squeeze_field_elements(num_elements) + } +} + +impl< + F: PrimeField, + CF: PrimeField, + PS: CryptographicSponge, + S: CryptographicSpongeVarNonNative, + > FiatShamirRngVar for FiatShamirSpongeRngVar +where + >::Parameters: CryptographicSpongeParameters, +{ + fn constant(cs: ConstraintSystemRef, _pfs: &PS) -> Self { + Self::from(cs) + } + + #[tracing::instrument(target = "r1cs", skip(self))] + fn absorb_nonnative( &mut self, elems: &[NonNativeFieldVar], ty: OptimizationType, @@ -372,60 +392,22 @@ impl, S: AlgebraicSpongeV } #[tracing::instrument(target = "r1cs", skip(self))] - fn absorb_native_field_elements(&mut self, elems: &[FpVar]) -> Result<(), SynthesisError> { - self.s.absorb(elems)?; - Ok(()) + fn absorb_native(&mut self, elems: &[FpVar]) -> Result<(), SynthesisError> { + self.absorb(&elems) } #[tracing::instrument(target = "r1cs", skip(self))] fn absorb_bytes(&mut self, elems: &[UInt8]) -> Result<(), SynthesisError> { - let capacity = CF::size_in_bits() - 1; - let mut bits = Vec::>::new(); - for elem in elems.iter() { - let mut bits_le = elem.to_bits_le()?; // UInt8's to_bits is le, which is an exception in Zexe. - bits_le.reverse(); - bits.extend_from_slice(&bits_le); - } - - let mut adjustment_factors = Vec::::new(); - let mut cur = CF::one(); - for _ in 0..capacity { - adjustment_factors.push(cur); - cur.double_in_place(); - } - - let mut gadgets = Vec::>::new(); - for elem_bits in bits.chunks(capacity) { - let mut elem = CF::zero(); - let mut lc = LinearCombination::zero(); - for (bit, adjustment_factor) in elem_bits.iter().rev().zip(adjustment_factors.iter()) { - if bit.value().unwrap_or_default() { - elem += adjustment_factor; - } - lc = &lc + bit.lc() * *adjustment_factor; - } - - let gadget = - AllocatedFp::new_witness(ark_relations::ns!(self.cs, "gadget"), || Ok(elem))?; - lc = lc.clone() - (CF::one(), gadget.variable); - - gadgets.push(FpVar::from(gadget)); - self.cs.enforce_constraint(lc!(), lc!(), lc)?; - } - - self.s.absorb(&gadgets) + self.absorb(&elems) } #[tracing::instrument(target = "r1cs", skip(self))] - fn squeeze_native_field_elements( - &mut self, - num: usize, - ) -> Result>, SynthesisError> { - self.s.squeeze(num) + fn squeeze_native(&mut self, num: usize) -> Result>, SynthesisError> { + self.s.squeeze_field_elements(num) } #[tracing::instrument(target = "r1cs", skip(self))] - fn squeeze_field_elements( + fn squeeze_nonnative( &mut self, num: usize, ) -> Result>, SynthesisError> { @@ -433,16 +415,7 @@ impl, S: AlgebraicSpongeV } #[tracing::instrument(target = "r1cs", skip(self))] - #[allow(clippy::type_complexity)] - fn squeeze_field_elements_and_bits( - &mut self, - num: usize, - ) -> Result<(Vec>, Vec>>), SynthesisError> { - Self::get_gadgets_and_bits_from_sponge(&mut self.s, num, false) - } - - #[tracing::instrument(target = "r1cs", skip(self))] - fn squeeze_128_bits_field_elements( + fn squeeze_128_bits_nonnative( &mut self, num: usize, ) -> Result>, SynthesisError> { @@ -450,8 +423,7 @@ impl, S: AlgebraicSpongeV } #[tracing::instrument(target = "r1cs", skip(self))] - #[allow(clippy::type_complexity)] - fn squeeze_128_bits_field_elements_and_bits( + fn squeeze_128_bits_nonnative_and_bits( &mut self, num: usize, ) -> Result<(Vec>, Vec>>), SynthesisError> { diff --git a/src/fiat_shamir/mod.rs b/src/fiat_shamir/mod.rs index 09bd1af..6ff5a06 100644 --- a/src/fiat_shamir/mod.rs +++ b/src/fiat_shamir/mod.rs @@ -1,16 +1,17 @@ -use crate::Vec; -use ark_ff::{BigInteger, FpParameters, PrimeField, ToConstraintField}; +use crate::{CryptographicSpongeParameters, CryptographicSpongeWithRate, Vec}; +use ark_ff::{BigInteger, PrimeField, ToConstraintField}; use ark_nonnative_field::params::{get_params, OptimizationType}; use ark_nonnative_field::AllocatedNonNativeFieldVar; +use ark_sponge::{Absorb, CryptographicSponge}; +use ark_std::io::{Read, Result as IoResult, Write}; use ark_std::marker::PhantomData; use ark_std::rand::{RngCore, SeedableRng}; +use core::{cmp, iter}; use digest::Digest; use rand_chacha::ChaChaRng; /// The constraints for Fiat-Shamir pub mod constraints; -/// The Poseidon sponge -pub mod poseidon; /// a macro for computing ceil(log2(x))+1 for a field element x #[doc(hidden)] @@ -45,33 +46,24 @@ macro_rules! overhead { } /// the trait for Fiat-Shamir RNG -pub trait FiatShamirRng: RngCore { - /// initialize the RNG - fn new() -> Self; - +pub trait FiatShamirRng: + Default + RngCore + Write + CryptographicSponge +{ /// take in field elements - fn absorb_nonnative_field_elements(&mut self, elems: &[F], ty: OptimizationType); + fn absorb_nonnative(&mut self, elems: &[F], ty: OptimizationType); /// take in field elements - fn absorb_native_field_elements>(&mut self, elems: &[T]); + fn absorb_native>(&mut self, elems: &[T]); /// take in bytes - fn absorb_bytes(&mut self, elems: &[u8]); + fn absorb_bytes(&mut self, bytes: &[u8]) { + ::write(self, bytes).ok(); + } /// take out field elements - fn squeeze_nonnative_field_elements(&mut self, num: usize, ty: OptimizationType) -> Vec; + fn squeeze_nonnative(&mut self, num: usize, ty: OptimizationType) -> Vec; /// take in field elements - fn squeeze_native_field_elements(&mut self, num: usize) -> Vec; + fn squeeze_native(&mut self, num: usize) -> Vec; /// take out field elements of 128 bits - fn squeeze_128_bits_nonnative_field_elements(&mut self, num: usize) -> Vec; -} - -/// the trait for algebraic sponge -pub trait AlgebraicSponge: Clone { - /// initialize the sponge - fn new() -> Self; - /// take in field elements - fn absorb(&mut self, elems: &[CF]); - /// take out field elements - fn squeeze(&mut self, num: usize) -> Vec; + fn squeeze_128_bits_nonnative(&mut self, num: usize) -> Vec; } /// use a ChaCha stream cipher to generate the actual pseudorandom bits @@ -85,6 +77,33 @@ pub struct FiatShamirChaChaRng { digest: PhantomData, } +impl Default for FiatShamirChaChaRng { + fn default() -> Self { + let seed = [0; 32]; + let r = ChaChaRng::from_seed(seed); + + Self { + r, + seed: seed.to_vec(), + field: PhantomData, + representation_field: PhantomData, + digest: PhantomData, + } + } +} + +impl Clone for FiatShamirChaChaRng { + fn clone(&self) -> Self { + Self { + r: self.r.clone(), + seed: self.seed.clone(), + field: PhantomData, + representation_field: PhantomData, + digest: PhantomData, + } + } +} + impl RngCore for FiatShamirChaChaRng { fn next_u32(&mut self) -> u32 { self.r.next_u32() @@ -106,9 +125,85 @@ impl RngCore for FiatShamirChaChaRng FiatShamirRng for FiatShamirChaChaRng { - fn new() -> Self { + fn absorb_nonnative(&mut self, elems: &[F], _: OptimizationType) { + elems + .iter() + .try_for_each(|elem| elem.write(&mut *self)) + .expect("failed to convert to bytes"); + } + + fn absorb_native>(&mut self, elems: &[T]) { + elems + .iter() + .filter_map(|elem| elem.to_field_elements()) + .flat_map(|v| v.into_iter()) + .try_for_each(|elem| elem.write(&mut *self)) + .expect("failed to convert to bytes"); + } + + fn squeeze_nonnative(&mut self, num: usize, _: OptimizationType) -> Vec { + iter::from_fn(|| Some(F::rand(&mut self.r))) + .take(num) + .collect() + } + + fn squeeze_native(&mut self, num: usize) -> Vec { + iter::from_fn(|| Some(CF::rand(&mut self.r))) + .take(num) + .collect() + } + + fn squeeze_128_bits_nonnative(&mut self, num: usize) -> Vec { + let mut x = [0u8; 16]; + + iter::from_fn(|| { + self.r.fill_bytes(&mut x); + + let elem = F::from_random_bytes(&x).expect("failed to create field element"); + + Some(elem) + }) + .take(num) + .collect() + } +} + +impl Write for FiatShamirChaChaRng { + fn write(&mut self, buf: &[u8]) -> IoResult { + self.seed = D::digest(buf).to_vec(); + + let l = cmp::min(32, self.seed.len()); + let mut seed = [0u8; 32]; + + (&mut seed[..l]).copy_from_slice(&self.seed[..l]); + + self.r = ChaChaRng::from_seed(seed); + + Ok(buf.len()) + } + + fn flush(&mut self) -> IoResult<()> { + Ok(()) + } +} + +impl Read for FiatShamirChaChaRng { + fn read(&mut self, buf: &mut [u8]) -> IoResult { + self.fill_bytes(buf); + + Ok(buf.len()) + } +} + +impl CryptographicSponge + for FiatShamirChaChaRng +{ + type Parameters = (); + + fn new(_params: &Self::Parameters) -> Self { let seed = [0; 32]; let r = ChaChaRng::from_seed(seed); + Self { r, seed: seed.to_vec(), @@ -118,80 +213,148 @@ impl FiatShamirRng } } - fn absorb_nonnative_field_elements(&mut self, elems: &[F], _: OptimizationType) { - let mut bytes = Vec::new(); - for elem in elems { - elem.write(&mut bytes).expect("failed to convert to bytes"); - } - self.absorb_bytes(&bytes); - } + fn absorb(&mut self, input: &impl Absorb) { + let bytes = input.to_sponge_bytes_as_vec(); - fn absorb_native_field_elements>(&mut self, src: &[T]) { - let mut elems = Vec::::new(); - for elem in src.iter() { - elems.append(&mut elem.to_field_elements().unwrap()); - } - - let mut bytes = Vec::new(); - for elem in elems.iter() { - elem.write(&mut bytes).expect("failed to convert to bytes"); - } - self.absorb_bytes(&bytes); - } - - fn absorb_bytes(&mut self, elems: &[u8]) { - let mut bytes = elems.to_vec(); - bytes.extend_from_slice(&self.seed); - - let new_seed = D::digest(&bytes); - self.seed = (*new_seed.as_slice()).to_vec(); + self.seed = D::digest(&bytes).to_vec(); + let l = cmp::min(32, self.seed.len()); let mut seed = [0u8; 32]; - for (i, byte) in self.seed.as_slice().iter().enumerate() { - seed[i] = *byte; - } + + (&mut seed[..l]).copy_from_slice(&self.seed[..l]); self.r = ChaChaRng::from_seed(seed); } - fn squeeze_nonnative_field_elements(&mut self, num: usize, _: OptimizationType) -> Vec { - let mut res = Vec::::new(); - for _ in 0..num { - res.push(F::rand(&mut self.r)); - } - res - } + fn squeeze_bytes(&mut self, num_bytes: usize) -> Vec { + let mut output = vec![0u8; num_bytes]; - fn squeeze_native_field_elements(&mut self, num: usize) -> Vec { - let mut res = Vec::::new(); - for _ in 0..num { - res.push(CF::rand(&mut self.r)); - } - res + self.fill_bytes(output.as_mut_slice()); + + output } - fn squeeze_128_bits_nonnative_field_elements(&mut self, num: usize) -> Vec { - let mut res = Vec::::new(); - for _ in 0..num { - let mut x = [0u8; 16]; - self.r.fill_bytes(&mut x); - res.push(F::from_random_bytes(&x).unwrap()); - } - res + fn squeeze_bits(&mut self, num_bits: usize) -> Vec { + self.squeeze_bytes(num_bits) + .into_iter() + .map(|b| (b & 0x01) == 1) + .collect() } } /// rng from any algebraic sponge -pub struct FiatShamirAlgebraicSpongeRng> { +pub struct FiatShamirSpongeRng { pub s: S, #[doc(hidden)] f_phantom: PhantomData, cf_phantom: PhantomData, } -impl> FiatShamirAlgebraicSpongeRng { - /// compress every two elements if possible. Provides a vector of (limb, num_of_additions), both of which are P::BaseField. - pub fn compress_elements(src_limbs: &[(CF, CF)], ty: OptimizationType) -> Vec { +impl Clone + for FiatShamirSpongeRng +{ + fn clone(&self) -> Self { + Self { + s: self.s.clone(), + f_phantom: PhantomData, + cf_phantom: PhantomData, + } + } +} + +impl From + for FiatShamirSpongeRng +{ + fn from(s: S) -> Self { + Self { + s, + f_phantom: PhantomData, + cf_phantom: PhantomData, + } + } +} + +impl Default + for FiatShamirSpongeRng +where + ::Parameters: CryptographicSpongeParameters, +{ + fn default() -> Self { + S::with_default_rate().into() + } +} + +impl CryptographicSponge + for FiatShamirSpongeRng +{ + type Parameters = S::Parameters; + + fn new(params: &Self::Parameters) -> Self { + S::new(params).into() + } + + fn absorb(&mut self, input: &impl Absorb) { + self.s.absorb(input) + } + + fn squeeze_bytes(&mut self, num_bytes: usize) -> Vec { + self.s.squeeze_bytes(num_bytes) + } + + fn squeeze_bits(&mut self, num_bits: usize) -> Vec { + self.s.squeeze_bits(num_bits) + } +} + +impl FiatShamirRng + for FiatShamirSpongeRng +where + CF: Absorb, + ::Parameters: CryptographicSpongeParameters, +{ + fn absorb_nonnative(&mut self, elems: &[F], ty: OptimizationType) { + // FIXME ignoring faulty elements; maybe panic? + let src: Vec<(CF, CF)> = elems + .iter() + .filter_map(|elem| { + AllocatedNonNativeFieldVar::::get_limbs_representations(elem, ty).ok() + }) + .flatten() + // specifically set to one since most gadgets in the constraint world would not have + // zero noise (due to the relatively weak normal form testing in `alloc`) + .map(|limb| (limb, CF::one())) + .collect(); + + let dest = Self::compress_elements(&src, ty); + + self.absorb(&dest); + } + + fn absorb_native>(&mut self, elems: &[T]) { + elems + .iter() + .filter_map(|elem| elem.to_field_elements()) + .flat_map(|v| v.into_iter()) + .for_each(|elem| self.absorb(&elem)); + } + + fn squeeze_nonnative(&mut self, num: usize, _: OptimizationType) -> Vec { + Self::get_elements_from_sponge(&mut self.s, num, false) + } + + fn squeeze_native(&mut self, num: usize) -> Vec { + self.squeeze_field_elements(num) + } + + fn squeeze_128_bits_nonnative(&mut self, num: usize) -> Vec { + Self::get_elements_from_sponge(&mut self.s, num, true) + } +} + +impl FiatShamirSpongeRng { + /// compress every two elements if possible. Provides a vector of (limb, num_of_additions), + /// both of which are P::BaseField. + fn compress_elements(src_limbs: &[(CF, CF)], ty: OptimizationType) -> Vec { let capacity = CF::size_in_bits() - 1; let mut dest_limbs = Vec::::new(); @@ -246,45 +409,10 @@ impl> FiatShamirAlgebraicS dest_limbs } - /// push elements to sponge, treated in the non-native field representations. - pub fn push_elements_to_sponge(sponge: &mut S, src: &[F], ty: OptimizationType) { - let mut src_limbs = Vec::<(CF, CF)>::new(); - - for elem in src.iter() { - let limbs = - AllocatedNonNativeFieldVar::::get_limbs_representations(elem, ty).unwrap(); - for limb in limbs.iter() { - src_limbs.push((*limb, CF::one())); - // specifically set to one, since most gadgets in the constraint world would not have zero noise (due to the relatively weak normal form testing in `alloc`) - } - } - - let dest_limbs = Self::compress_elements(&src_limbs, ty); - sponge.absorb(&dest_limbs); - } - - /// obtain random bits from hashchain. - /// not guaranteed to be uniformly distributed, should only be used in certain situations. - pub fn get_bits_from_sponge(sponge: &mut S, num_bits: usize) -> Vec { - let bits_per_element = CF::size_in_bits() - 1; - let num_elements = (num_bits + bits_per_element - 1) / bits_per_element; - - let src_elements = sponge.squeeze(num_elements); - let mut dest_bits = Vec::::new(); - - let skip = (CF::Params::REPR_SHAVE_BITS + 1) as usize; - for elem in src_elements.iter() { - // discard the highest bit - let elem_bits = elem.into_repr().to_bits_be(); - dest_bits.extend_from_slice(&elem_bits[skip..]); - } - - dest_bits - } - /// obtain random elements from hashchain. + /// /// not guaranteed to be uniformly distributed, should only be used in certain situations. - pub fn get_elements_from_sponge( + fn get_elements_from_sponge( sponge: &mut S, num_elements: usize, outputs_short_elements: bool, @@ -294,7 +422,7 @@ impl> FiatShamirAlgebraicS } else { F::size_in_bits() - 1 // also omit the highest bit }; - let bits = Self::get_bits_from_sponge(sponge, num_bits_per_nonnative * num_elements); + let bits = sponge.squeeze_bits(num_bits_per_nonnative * num_elements); let mut lookup_table = Vec::::new(); let mut cur = F::one(); @@ -306,7 +434,8 @@ impl> FiatShamirAlgebraicS let mut dest_elements = Vec::::new(); bits.chunks_exact(num_bits_per_nonnative) .for_each(|per_nonnative_bits| { - // technically, this can be done via BigInterger::from_bits; here, we use this method for consistency with the gadget counterpart + // this can be done via BigInterger::from_bits; here, we use this method for + // consistency with the gadget counterpart let mut res = F::zero(); for (i, bit) in per_nonnative_bits.iter().rev().enumerate() { @@ -322,28 +451,20 @@ impl> FiatShamirAlgebraicS } } -impl> RngCore - for FiatShamirAlgebraicSpongeRng +impl RngCore + for FiatShamirSpongeRng { fn next_u32(&mut self) -> u32 { - assert!( - CF::size_in_bits() > 128, - "The native field of the algebraic sponge is too small." - ); - let mut dest = [0u8; 4]; + self.fill_bytes(&mut dest); u32::from_be_bytes(dest) } fn next_u64(&mut self) -> u64 { - assert!( - CF::size_in_bits() > 128, - "The native field of the algebraic sponge is too small." - ); - let mut dest = [0u8; 8]; + self.fill_bytes(&mut dest); u64::from_be_bytes(dest) @@ -359,7 +480,7 @@ impl> RngCore let len = dest.len() * 8; let num_of_elements = (capacity + len - 1) / len; - let elements = self.s.squeeze(num_of_elements); + let elements: Vec = self.s.squeeze_field_elements(num_of_elements); let mut bits = Vec::::new(); for elem in elements.iter() { @@ -383,71 +504,33 @@ impl> RngCore } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), ark_std::rand::Error> { - assert!( - CF::size_in_bits() > 128, - "The native field of the algebraic sponge is too small." - ); - self.fill_bytes(dest); + Ok(()) } } -impl> FiatShamirRng - for FiatShamirAlgebraicSpongeRng +impl Write + for FiatShamirSpongeRng +where + CF: Absorb, + ::Parameters: CryptographicSpongeParameters, { - fn new() -> Self { - Self { - s: S::new(), - f_phantom: PhantomData, - cf_phantom: PhantomData, - } - } + fn write(&mut self, buf: &[u8]) -> IoResult { + self.absorb(&buf); - fn absorb_nonnative_field_elements(&mut self, elems: &[F], ty: OptimizationType) { - Self::push_elements_to_sponge(&mut self.s, elems, ty); + Ok(buf.len()) } - fn absorb_native_field_elements>(&mut self, src: &[T]) { - let mut elems = Vec::::new(); - for elem in src.iter() { - elems.append(&mut elem.to_field_elements().unwrap()); - } - self.s.absorb(&elems); - } - - fn absorb_bytes(&mut self, elems: &[u8]) { - let capacity = CF::size_in_bits() - 1; - let mut bits = Vec::::new(); - for elem in elems.iter() { - bits.append(&mut vec![ - elem & 128 != 0, - elem & 64 != 0, - elem & 32 != 0, - elem & 16 != 0, - elem & 8 != 0, - elem & 4 != 0, - elem & 2 != 0, - elem & 1 != 0, - ]); - } - let elements = bits - .chunks(capacity) - .map(|bits| CF::from_repr(CF::BigInt::from_bits_be(bits)).unwrap()) - .collect::>(); - - self.s.absorb(&elements); - } - - fn squeeze_nonnative_field_elements(&mut self, num: usize, _: OptimizationType) -> Vec { - Self::get_elements_from_sponge(&mut self.s, num, false) + fn flush(&mut self) -> IoResult<()> { + Ok(()) } +} - fn squeeze_native_field_elements(&mut self, num: usize) -> Vec { - self.s.squeeze(num) - } +impl Read for FiatShamirSpongeRng { + fn read(&mut self, buf: &mut [u8]) -> IoResult { + self.fill_bytes(buf); - fn squeeze_128_bits_nonnative_field_elements(&mut self, num: usize) -> Vec { - Self::get_elements_from_sponge(&mut self.s, num, true) + Ok(buf.len()) } } diff --git a/src/fiat_shamir/poseidon/constraints.rs b/src/fiat_shamir/poseidon/constraints.rs deleted file mode 100644 index d3bf8d1..0000000 --- a/src/fiat_shamir/poseidon/constraints.rs +++ /dev/null @@ -1,298 +0,0 @@ -/* - * credit: - * This implementation of Poseidon is entirely from Fractal's implementation - * ([COS20]: https://eprint.iacr.org/2019/1076) - * with small syntax changes. - */ - -use crate::fiat_shamir::constraints::AlgebraicSpongeVar; -use crate::fiat_shamir::poseidon::{PoseidonSponge, PoseidonSpongeState}; -use crate::Vec; -use ark_ff::PrimeField; -use ark_r1cs_std::fields::fp::FpVar; -use ark_r1cs_std::prelude::*; -use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; -use ark_std::rand::SeedableRng; - -#[derive(Clone)] -/// the gadget for Poseidon sponge -pub struct PoseidonSpongeVar { - /// constraint system - pub cs: ConstraintSystemRef, - /// number of rounds in a full-round operation - pub full_rounds: u32, - /// number of rounds in a partial-round operation - pub partial_rounds: u32, - /// Exponent used in S-boxes - pub alpha: u64, - /// Additive Round keys. These are added before each MDS matrix application to make it an affine shift. - /// They are indexed by ark[round_num][state_element_index] - pub ark: Vec>, - /// Maximally Distance Separating Matrix. - pub mds: Vec>, - - /// the sponge's state - pub state: Vec>, - /// the rate - pub rate: usize, - /// the capacity - pub capacity: usize, - /// the mode - mode: PoseidonSpongeState, -} - -impl PoseidonSpongeVar { - #[tracing::instrument(target = "r1cs", skip(self))] - fn apply_s_box( - &self, - state: &mut [FpVar], - is_full_round: bool, - ) -> Result<(), SynthesisError> { - // Full rounds apply the S Box (x^alpha) to every element of state - if is_full_round { - for state_item in state.iter_mut() { - *state_item = state_item.pow_by_constant(&[self.alpha])?; - } - } - // Partial rounds apply the S Box (x^alpha) to just the final element of state - else { - state[state.len() - 1] = state[state.len() - 1].pow_by_constant(&[self.alpha])?; - } - - Ok(()) - } - - #[tracing::instrument(target = "r1cs", skip(self))] - fn apply_ark(&self, state: &mut [FpVar], round_number: usize) -> Result<(), SynthesisError> { - for (i, state_elem) in state.iter_mut().enumerate() { - *state_elem += self.ark[round_number][i]; - } - Ok(()) - } - - #[tracing::instrument(target = "r1cs", skip(self))] - fn apply_mds(&self, state: &mut [FpVar]) -> Result<(), SynthesisError> { - let mut new_state = Vec::new(); - let zero = FpVar::::zero(); - for i in 0..state.len() { - let mut cur = zero.clone(); - for (j, state_elem) in state.iter().enumerate() { - let term = state_elem * self.mds[i][j]; - cur += &term; - } - new_state.push(cur); - } - state.clone_from_slice(&new_state[..state.len()]); - Ok(()) - } - - #[tracing::instrument(target = "r1cs", skip(self))] - fn permute(&mut self) -> Result<(), SynthesisError> { - let full_rounds_over_2 = self.full_rounds / 2; - let mut state = self.state.clone(); - for i in 0..full_rounds_over_2 { - self.apply_ark(&mut state, i as usize)?; - self.apply_s_box(&mut state, true)?; - self.apply_mds(&mut state)?; - } - for i in full_rounds_over_2..(full_rounds_over_2 + self.partial_rounds) { - self.apply_ark(&mut state, i as usize)?; - self.apply_s_box(&mut state, false)?; - self.apply_mds(&mut state)?; - } - - for i in - (full_rounds_over_2 + self.partial_rounds)..(self.partial_rounds + self.full_rounds) - { - self.apply_ark(&mut state, i as usize)?; - self.apply_s_box(&mut state, true)?; - self.apply_mds(&mut state)?; - } - - self.state = state; - Ok(()) - } - - #[tracing::instrument(target = "r1cs", skip(self))] - fn absorb_internal( - &mut self, - rate_start_index: usize, - elements: &[FpVar], - ) -> Result<(), SynthesisError> { - // if we can finish in this call - if rate_start_index + elements.len() <= self.rate { - for (i, element) in elements.iter().enumerate() { - self.state[i + rate_start_index] += element; - } - self.mode = PoseidonSpongeState::Absorbing { - next_absorb_index: rate_start_index + elements.len(), - }; - - return Ok(()); - } - // otherwise absorb (rate - rate_start_index) elements - let num_elements_absorbed = self.rate - rate_start_index; - for (i, element) in elements.iter().enumerate().take(num_elements_absorbed) { - self.state[i + rate_start_index] += element; - } - self.permute()?; - // Tail recurse, with the input elements being truncated by num elements absorbed - self.absorb_internal(0, &elements[num_elements_absorbed..]) - } - - // Squeeze |output| many elements. This does not end in a squeeze - #[tracing::instrument(target = "r1cs", skip(self))] - fn squeeze_internal( - &mut self, - rate_start_index: usize, - output: &mut [FpVar], - ) -> Result<(), SynthesisError> { - // if we can finish in this call - if rate_start_index + output.len() <= self.rate { - output - .clone_from_slice(&self.state[rate_start_index..(output.len() + rate_start_index)]); - self.mode = PoseidonSpongeState::Squeezing { - next_squeeze_index: rate_start_index + output.len(), - }; - return Ok(()); - } - // otherwise squeeze (rate - rate_start_index) elements - let num_elements_squeezed = self.rate - rate_start_index; - output[..num_elements_squeezed].clone_from_slice( - &self.state[rate_start_index..(num_elements_squeezed + rate_start_index)], - ); - - // Unless we are done with squeezing in this call, permute. - if output.len() != self.rate { - self.permute()?; - } - // Tail recurse, with the correct change to indices in output happening due to changing the slice - self.squeeze_internal(0, &mut output[num_elements_squeezed..]) - } -} - -impl AlgebraicSpongeVar> for PoseidonSpongeVar { - fn new(cs: ConstraintSystemRef) -> Self { - // Requires F to be Alt_Bn128Fr - let full_rounds = 8; - let partial_rounds = 31; - let alpha = 17; - - let mds = vec![ - vec![F::one(), F::zero(), F::one()], - vec![F::one(), F::one(), F::zero()], - vec![F::zero(), F::one(), F::one()], - ]; - - let mut ark = Vec::new(); - let mut ark_rng = rand_chacha::ChaChaRng::seed_from_u64(123456789u64); - - for _ in 0..(full_rounds + partial_rounds) { - let mut res = Vec::new(); - - for _ in 0..3 { - res.push(F::rand(&mut ark_rng)); - } - ark.push(res); - } - - let rate = 2; - let capacity = 1; - let zero = FpVar::::zero(); - let state = vec![zero; rate + capacity]; - let mode = PoseidonSpongeState::Absorbing { - next_absorb_index: 0, - }; - - Self { - cs, - full_rounds, - partial_rounds, - alpha, - ark, - mds, - - state, - rate, - capacity, - mode, - } - } - - fn constant(cs: ConstraintSystemRef, pfs: &PoseidonSponge) -> Self { - let mut state_gadgets = Vec::new(); - - for state_elem in pfs.state.iter() { - state_gadgets.push( - FpVar::::new_constant(ark_relations::ns!(cs, "alloc_elems"), *state_elem) - .unwrap(), - ); - } - - Self { - cs, - full_rounds: pfs.full_rounds, - partial_rounds: pfs.partial_rounds, - alpha: pfs.alpha, - ark: pfs.ark.clone(), - mds: pfs.mds.clone(), - - state: state_gadgets, - rate: pfs.rate, - capacity: pfs.capacity, - mode: pfs.mode.clone(), - } - } - - fn cs(&self) -> ConstraintSystemRef { - self.cs.clone() - } - - fn absorb(&mut self, elems: &[FpVar]) -> Result<(), SynthesisError> { - if elems.is_empty() { - return Ok(()); - } - - match self.mode { - PoseidonSpongeState::Absorbing { next_absorb_index } => { - let mut absorb_index = next_absorb_index; - if absorb_index == self.rate { - self.permute()?; - absorb_index = 0; - } - self.absorb_internal(absorb_index, elems)?; - } - PoseidonSpongeState::Squeezing { - next_squeeze_index: _, - } => { - self.permute()?; - self.absorb_internal(0, elems)?; - } - }; - - Ok(()) - } - - fn squeeze(&mut self, num: usize) -> Result>, SynthesisError> { - let zero = FpVar::zero(); - let mut squeezed_elems = vec![zero; num]; - match self.mode { - PoseidonSpongeState::Absorbing { - next_absorb_index: _, - } => { - self.permute()?; - self.squeeze_internal(0, &mut squeezed_elems)?; - } - PoseidonSpongeState::Squeezing { next_squeeze_index } => { - let mut squeeze_index = next_squeeze_index; - if squeeze_index == self.rate { - self.permute()?; - squeeze_index = 0; - } - self.squeeze_internal(squeeze_index, &mut squeezed_elems)?; - } - }; - - Ok(squeezed_elems) - } -} diff --git a/src/fiat_shamir/poseidon/mod.rs b/src/fiat_shamir/poseidon/mod.rs deleted file mode 100644 index 33f6d2a..0000000 --- a/src/fiat_shamir/poseidon/mod.rs +++ /dev/null @@ -1,243 +0,0 @@ -/* - * credit: - * This implementation of Poseidon is entirely from Fractal's implementation - * ([COS20]: https://eprint.iacr.org/2019/1076) - * with small syntax changes. - */ - -use crate::fiat_shamir::AlgebraicSponge; -use crate::Vec; -use ark_ff::PrimeField; -use ark_std::rand::SeedableRng; - -/// constraints for Poseidon -pub mod constraints; - -#[derive(Clone)] -enum PoseidonSpongeState { - Absorbing { next_absorb_index: usize }, - Squeezing { next_squeeze_index: usize }, -} - -#[derive(Clone)] -/// the sponge for Poseidon -pub struct PoseidonSponge { - /// number of rounds in a full-round operation - pub full_rounds: u32, - /// number of rounds in a partial-round operation - pub partial_rounds: u32, - /// Exponent used in S-boxes - pub alpha: u64, - /// Additive Round keys. These are added before each MDS matrix application to make it an affine shift. - /// They are indexed by ark[round_num][state_element_index] - pub ark: Vec>, - /// Maximally Distance Separating Matrix. - pub mds: Vec>, - - /// the sponge's state - pub state: Vec, - /// the rate - pub rate: usize, - /// the capacity - pub capacity: usize, - /// the mode - mode: PoseidonSpongeState, -} - -impl PoseidonSponge { - fn apply_s_box(&self, state: &mut [F], is_full_round: bool) { - // Full rounds apply the S Box (x^alpha) to every element of state - if is_full_round { - for elem in state { - *elem = elem.pow(&[self.alpha]); - } - } - // Partial rounds apply the S Box (x^alpha) to just the final element of state - else { - state[state.len() - 1] = state[state.len() - 1].pow(&[self.alpha]); - } - } - - fn apply_ark(&self, state: &mut [F], round_number: usize) { - for (i, state_elem) in state.iter_mut().enumerate() { - state_elem.add_assign(&self.ark[round_number][i]); - } - } - - fn apply_mds(&self, state: &mut [F]) { - let mut new_state = Vec::new(); - for i in 0..state.len() { - let mut cur = F::zero(); - for (j, state_elem) in state.iter().enumerate() { - let term = state_elem.mul(&self.mds[i][j]); - cur.add_assign(&term); - } - new_state.push(cur); - } - state.clone_from_slice(&new_state[..state.len()]) - } - - fn permute(&mut self) { - let full_rounds_over_2 = self.full_rounds / 2; - let mut state = self.state.clone(); - for i in 0..full_rounds_over_2 { - self.apply_ark(&mut state, i as usize); - self.apply_s_box(&mut state, true); - self.apply_mds(&mut state); - } - - for i in full_rounds_over_2..(full_rounds_over_2 + self.partial_rounds) { - self.apply_ark(&mut state, i as usize); - self.apply_s_box(&mut state, false); - self.apply_mds(&mut state); - } - - for i in - (full_rounds_over_2 + self.partial_rounds)..(self.partial_rounds + self.full_rounds) - { - self.apply_ark(&mut state, i as usize); - self.apply_s_box(&mut state, true); - self.apply_mds(&mut state); - } - self.state = state; - } - - // Absorbs everything in elements, this does not end in an absorbtion. - fn absorb_internal(&mut self, rate_start_index: usize, elements: &[F]) { - // if we can finish in this call - if rate_start_index + elements.len() <= self.rate { - for (i, element) in elements.iter().enumerate() { - self.state[i + rate_start_index] += element; - } - self.mode = PoseidonSpongeState::Absorbing { - next_absorb_index: rate_start_index + elements.len(), - }; - - return; - } - // otherwise absorb (rate - rate_start_index) elements - let num_elements_absorbed = self.rate - rate_start_index; - for (i, element) in elements.iter().enumerate().take(num_elements_absorbed) { - self.state[i + rate_start_index] += element; - } - self.permute(); - // Tail recurse, with the input elements being truncated by num elements absorbed - self.absorb_internal(0, &elements[num_elements_absorbed..]); - } - - // Squeeze |output| many elements. This does not end in a squeeze - fn squeeze_internal(&mut self, rate_start_index: usize, output: &mut [F]) { - // if we can finish in this call - if rate_start_index + output.len() <= self.rate { - output - .clone_from_slice(&self.state[rate_start_index..(output.len() + rate_start_index)]); - self.mode = PoseidonSpongeState::Squeezing { - next_squeeze_index: rate_start_index + output.len(), - }; - return; - } - // otherwise squeeze (rate - rate_start_index) elements - let num_elements_squeezed = self.rate - rate_start_index; - output[..num_elements_squeezed].clone_from_slice( - &self.state[rate_start_index..(num_elements_squeezed + rate_start_index)], - ); - - // Unless we are done with squeezing in this call, permute. - if output.len() != self.rate { - self.permute(); - } - // Tail recurse, with the correct change to indices in output happening due to changing the slice - self.squeeze_internal(0, &mut output[num_elements_squeezed..]); - } -} - -impl AlgebraicSponge for PoseidonSponge { - fn new() -> Self { - // Requires F to be Alt_Bn128Fr - let full_rounds = 8; - let partial_rounds = 31; - let alpha = 17; - - let mds = vec![ - vec![F::one(), F::zero(), F::one()], - vec![F::one(), F::one(), F::zero()], - vec![F::zero(), F::one(), F::one()], - ]; - - let mut ark = Vec::new(); - let mut ark_rng = rand_chacha::ChaChaRng::seed_from_u64(123456789u64); - - for _ in 0..(full_rounds + partial_rounds) { - let mut res = Vec::new(); - - for _ in 0..3 { - res.push(F::rand(&mut ark_rng)); - } - ark.push(res); - } - - let rate = 2; - let capacity = 1; - let state = vec![F::zero(); rate + capacity]; - let mode = PoseidonSpongeState::Absorbing { - next_absorb_index: 0, - }; - - PoseidonSponge { - full_rounds, - partial_rounds, - alpha, - ark, - mds, - - state, - rate, - capacity, - mode, - } - } - - fn absorb(&mut self, elems: &[F]) { - if elems.is_empty() { - return; - } - - match self.mode { - PoseidonSpongeState::Absorbing { next_absorb_index } => { - let mut absorb_index = next_absorb_index; - if absorb_index == self.rate { - self.permute(); - absorb_index = 0; - } - self.absorb_internal(absorb_index, elems); - } - PoseidonSpongeState::Squeezing { - next_squeeze_index: _, - } => { - self.permute(); - self.absorb_internal(0, elems); - } - }; - } - - fn squeeze(&mut self, num: usize) -> Vec { - let mut squeezed_elems = vec![F::zero(); num]; - match self.mode { - PoseidonSpongeState::Absorbing { - next_absorb_index: _, - } => { - self.permute(); - self.squeeze_internal(0, &mut squeezed_elems); - } - PoseidonSpongeState::Squeezing { next_squeeze_index } => { - let mut squeeze_index = next_squeeze_index; - if squeeze_index == self.rate { - self.permute(); - squeeze_index = 0; - } - self.squeeze_internal(squeeze_index, &mut squeezed_elems); - } - }; - squeezed_elems - } -} diff --git a/src/lib.rs b/src/lib.rs index c130fcf..dac77d0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -16,11 +16,14 @@ #![forbid(unsafe_code)] #![allow(clippy::op_ref)] +use crate::ahp::prover::ProverMsg; use ark_ff::{to_bytes, PrimeField, ToConstraintField}; +use ark_nonnative_field::params::OptimizationType; use ark_poly::{univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain}; -use ark_poly_commit::Evaluations; -use ark_poly_commit::LabeledPolynomial; -use ark_poly_commit::{LabeledCommitment, PCUniversalParams, PolynomialCommitment}; +use ark_poly_commit::{ + challenge::ChallengeGenerator, Evaluations, LabeledCommitment, LabeledPolynomial, + PCUniversalParams, PolynomialCommitment, +}; use ark_relations::r1cs::{ConstraintSynthesizer, SynthesisError}; use ark_std::rand::RngCore; @@ -45,7 +48,8 @@ macro_rules! eprintln { /// Implements a Fiat-Shamir based Rng that allows one to incrementally update /// the seed based on new messages in the proof transcript. pub mod fiat_shamir; -use crate::fiat_shamir::FiatShamirRng; +pub use fiat_shamir::constraints::{FiatShamirRngVar, FiatShamirSpongeRngVar}; +pub use fiat_shamir::{FiatShamirChaChaRng, FiatShamirRng, FiatShamirSpongeRng}; mod error; pub use error::*; @@ -57,10 +61,14 @@ pub mod constraints; /// Implements an Algebraic Holographic Proof (AHP) for the R1CS indexed relation. pub mod ahp; -use crate::ahp::prover::ProverMsg; -pub use ahp::AHPForR1CS; -use ahp::EvaluationsProvider; -use ark_nonnative_field::params::OptimizationType; +pub use ahp::{AHPForR1CS, EvaluationsProvider}; + +pub mod sponge; +pub use sponge::poseidon::{ + PoseidonParametersWithDefaultRate, PoseidonSpongeVarWithDefaultRate, + PoseidonSpongeWithDefaultRate, +}; +pub use sponge::{CryptographicSpongeParameters, CryptographicSpongeWithRate}; #[cfg(test)] mod test; @@ -87,7 +95,7 @@ impl MarlinConfig for MarlinRecursiveConfig { pub struct Marlin< F: PrimeField, FSF: PrimeField, - PC: PolynomialCommitment>, + PC: PolynomialCommitment, FS>, FS: FiatShamirRng, MC: MarlinConfig, >( @@ -98,22 +106,22 @@ pub struct Marlin< #[doc(hidden)] PhantomData, ); -fn compute_vk_hash(vk: &IndexVerifierKey) -> Vec +fn compute_vk_hash(vk: &IndexVerifierKey) -> Vec where F: PrimeField, FSF: PrimeField, - PC: PolynomialCommitment>, + PC: PolynomialCommitment, FS>, FS: FiatShamirRng, PC::Commitment: ToConstraintField, { - let mut vk_hash_rng = FS::new(); - vk_hash_rng.absorb_native_field_elements(&vk.index_comms); - vk_hash_rng.squeeze_native_field_elements(1) + let mut vk_hash_rng = FS::default(); + vk_hash_rng.absorb_native(&vk.index_comms); + vk_hash_rng.squeeze_native(1) } impl Marlin where - PC: PolynomialCommitment>, + PC: PolynomialCommitment, FS>, PC::VerifierKey: ToConstraintField, PC::Commitment: ToConstraintField, FS: FiatShamirRng, @@ -129,7 +137,7 @@ where num_variables: usize, num_non_zero: usize, rng: &mut R, - ) -> Result, Error> { + ) -> Result, Error> { let max_degree = AHPForR1CS::::max_degree(num_constraints, num_variables, num_non_zero)?; let setup_time = start_timer!(|| { format!( @@ -149,7 +157,7 @@ where pub fn circuit_specific_setup, R: RngCore>( c: C, rng: &mut R, - ) -> Result<(IndexProverKey, IndexVerifierKey), Error> { + ) -> Result<(IndexProverKey, IndexVerifierKey), Error> { let index_time = start_timer!(|| "Marlin::Index"); let for_recursion = MC::FOR_RECURSION; @@ -233,9 +241,9 @@ where /// keys. This is a deterministic algorithm that anyone can rerun. #[allow(clippy::type_complexity)] pub fn index>( - srs: &UniversalSRS, + srs: &UniversalSRS, c: C, - ) -> Result<(IndexProverKey, IndexVerifierKey), Error> { + ) -> Result<(IndexProverKey, IndexVerifierKey), Error> { let index_time = start_timer!(|| "Marlin::Index"); let for_recursion = MC::FOR_RECURSION; @@ -314,10 +322,10 @@ where /// Create a zkSNARK asserting that the constraint system is satisfied. pub fn prove, R: RngCore>( - index_pk: &IndexProverKey, + index_pk: &IndexProverKey, c: C, zk_rng: &mut R, - ) -> Result, Error> { + ) -> Result, Error> { let prover_time = start_timer!(|| "Marlin::Prover"); // TODO: Add check that c is in the correct mode. @@ -326,16 +334,14 @@ where let prover_init_state = AHPForR1CS::prover_init(&index_pk.index, c)?; let public_input = prover_init_state.public_input(); - let mut fs_rng = FS::new(); + let mut fs_rng = FS::default(); let hiding = !for_recursion; if for_recursion { fs_rng.absorb_bytes(&to_bytes![&Self::PROTOCOL_NAME].unwrap()); - fs_rng.absorb_native_field_elements(&compute_vk_hash::( - &index_pk.index_vk, - )); - fs_rng.absorb_nonnative_field_elements(&public_input, OptimizationType::Weight); + fs_rng.absorb_native(&compute_vk_hash::(&index_pk.index_vk)); + fs_rng.absorb_nonnative(&public_input, OptimizationType::Weight); } else { fs_rng.absorb_bytes( &to_bytes![&Self::PROTOCOL_NAME, &index_pk.index_vk, &public_input].unwrap(), @@ -358,11 +364,12 @@ where end_timer!(first_round_comm_time); if for_recursion { - fs_rng.absorb_native_field_elements(&first_comms); + fs_rng.absorb_native(&first_comms); + match prover_first_msg.clone() { ProverMsg::EmptyMessage => (), ProverMsg::FieldElements(v) => { - fs_rng.absorb_nonnative_field_elements(&v, OptimizationType::Weight) + fs_rng.absorb_nonnative(&v, OptimizationType::Weight) } } } else { @@ -377,7 +384,7 @@ where // Second round let (prover_second_msg, prover_second_oracles, prover_state) = - AHPForR1CS::prover_second_round(&verifier_first_msg, prover_state, zk_rng, hiding); + AHPForR1CS::prover_second_round(&verifier_first_msg, prover_state, hiding); let second_round_comm_time = start_timer!(|| "Committing to second round polys"); let (second_comms, second_comm_rands) = PC::commit( @@ -389,11 +396,12 @@ where end_timer!(second_round_comm_time); if for_recursion { - fs_rng.absorb_native_field_elements(&second_comms); + fs_rng.absorb_native(&second_comms); + match prover_second_msg.clone() { ProverMsg::EmptyMessage => (), ProverMsg::FieldElements(v) => { - fs_rng.absorb_nonnative_field_elements(&v, OptimizationType::Weight) + fs_rng.absorb_nonnative(&v, OptimizationType::Weight) } } } else { @@ -407,7 +415,7 @@ where // -------------------------------------------------------------------- // Third round let (prover_third_msg, prover_third_oracles) = - AHPForR1CS::prover_third_round(&verifier_second_msg, prover_state, zk_rng)?; + AHPForR1CS::prover_third_round(&verifier_second_msg, prover_state)?; let third_round_comm_time = start_timer!(|| "Committing to third round polys"); let (third_comms, third_comm_rands) = PC::commit( @@ -419,11 +427,12 @@ where end_timer!(third_round_comm_time); if for_recursion { - fs_rng.absorb_native_field_elements(&third_comms); + fs_rng.absorb_native(&third_comms); + match prover_third_msg.clone() { ProverMsg::EmptyMessage => (), ProverMsg::FieldElements(v) => { - fs_rng.absorb_nonnative_field_elements(&v, OptimizationType::Weight) + fs_rng.absorb_nonnative(&v, OptimizationType::Weight) } } } else { @@ -506,7 +515,7 @@ where // Compute the AHP verifier's query set. let (query_set, verifier_state) = - AHPForR1CS::verifier_query_set(verifier_state, &mut fs_rng, for_recursion); + AHPForR1CS::verifier_query_set::(verifier_state, for_recursion); let lc_s = AHPForR1CS::construct_linear_combinations( &public_input, &polynomials, @@ -532,46 +541,24 @@ where end_timer!(eval_time); if for_recursion { - fs_rng.absorb_nonnative_field_elements(&evaluations, OptimizationType::Weight); + fs_rng.absorb_nonnative(&evaluations, OptimizationType::Weight); } else { fs_rng.absorb_bytes(&to_bytes![&evaluations].unwrap()); } - let pc_proof = if for_recursion { - let num_open_challenges: usize = 7; - - let mut opening_challenges = Vec::::new(); - opening_challenges - .append(&mut fs_rng.squeeze_128_bits_nonnative_field_elements(num_open_challenges)); - - let opening_challenges_f = |i| opening_challenges[i as usize]; - - PC::open_combinations_individual_opening_challenges( - &index_pk.committer_key, - &lc_s, - polynomials, - &labeled_comms, - &query_set, - &opening_challenges_f, - &comm_rands, - Some(zk_rng), - ) - .map_err(Error::from_pc_err)? - } else { - let opening_challenge: F = fs_rng.squeeze_128_bits_nonnative_field_elements(1)[0]; - - PC::open_combinations( - &index_pk.committer_key, - &lc_s, - polynomials, - &labeled_comms, - &query_set, - opening_challenge, - &comm_rands, - Some(zk_rng), - ) - .map_err(Error::from_pc_err)? - }; + let mut opening_challenges = ChallengeGenerator::::new_multivariate(fs_rng); + + let pc_proof = PC::open_combinations( + &index_pk.committer_key, + &lc_s, + polynomials, + &labeled_comms, + &query_set, + &mut opening_challenges, + &comm_rands, + Some(zk_rng), + ) + .map_err(Error::from_pc_err)?; // Gather prover messages together. let prover_messages = vec![prover_first_msg, prover_second_msg, prover_third_msg]; @@ -585,9 +572,9 @@ where /// Verify that a proof for the constrain system defined by `C` asserts that /// all constraints are satisfied. pub fn verify( - index_vk: &IndexVerifierKey, + index_vk: &IndexVerifierKey, public_input: &[F], - proof: &Proof, + proof: &Proof, ) -> Result> { let verifier_time = start_timer!(|| "Marlin::Verify"); @@ -605,12 +592,12 @@ where let for_recursion = MC::FOR_RECURSION; - let mut fs_rng = FS::new(); + let mut fs_rng = FS::default(); if for_recursion { fs_rng.absorb_bytes(&to_bytes![&Self::PROTOCOL_NAME].unwrap()); - fs_rng.absorb_native_field_elements(&compute_vk_hash::(index_vk)); - fs_rng.absorb_nonnative_field_elements(&public_input, OptimizationType::Weight); + fs_rng.absorb_native(&compute_vk_hash::(index_vk)); + fs_rng.absorb_nonnative(&public_input, OptimizationType::Weight); } else { fs_rng .absorb_bytes(&to_bytes![&Self::PROTOCOL_NAME, &index_vk, &public_input].unwrap()); @@ -620,11 +607,11 @@ where // First round let first_comms = &proof.commitments[0]; if for_recursion { - fs_rng.absorb_native_field_elements(&first_comms); + fs_rng.absorb_native(&first_comms); match proof.prover_messages[0].clone() { ProverMsg::EmptyMessage => (), ProverMsg::FieldElements(v) => { - fs_rng.absorb_nonnative_field_elements(&v, OptimizationType::Weight) + fs_rng.absorb_nonnative(&v, OptimizationType::Weight) } }; } else { @@ -640,11 +627,11 @@ where let second_comms = &proof.commitments[1]; if for_recursion { - fs_rng.absorb_native_field_elements(&second_comms); + fs_rng.absorb_native(&second_comms); match proof.prover_messages[1].clone() { ProverMsg::EmptyMessage => (), ProverMsg::FieldElements(v) => { - fs_rng.absorb_nonnative_field_elements(&v, OptimizationType::Weight) + fs_rng.absorb_nonnative(&v, OptimizationType::Weight) } }; } else { @@ -659,11 +646,11 @@ where let third_comms = &proof.commitments[2]; if for_recursion { - fs_rng.absorb_native_field_elements(&third_comms); + fs_rng.absorb_native(&third_comms); match proof.prover_messages[2].clone() { ProverMsg::EmptyMessage => (), ProverMsg::FieldElements(v) => { - fs_rng.absorb_nonnative_field_elements(&v, OptimizationType::Weight) + fs_rng.absorb_nonnative(&v, OptimizationType::Weight) } }; } else { @@ -703,10 +690,10 @@ where .collect(); let (query_set, verifier_state) = - AHPForR1CS::verifier_query_set(verifier_state, &mut fs_rng, for_recursion); + AHPForR1CS::verifier_query_set::(verifier_state, for_recursion); if for_recursion { - fs_rng.absorb_nonnative_field_elements(&proof.evaluations, OptimizationType::Weight); + fs_rng.absorb_nonnative(&proof.evaluations, OptimizationType::Weight); } else { fs_rng.absorb_bytes(&to_bytes![&proof.evaluations].unwrap()); } @@ -734,56 +721,36 @@ where for_recursion, )?; - let evaluations_are_correct = if for_recursion { - let num_open_challenges: usize = 7; - - let mut opening_challenges = Vec::::new(); - opening_challenges - .append(&mut fs_rng.squeeze_128_bits_nonnative_field_elements(num_open_challenges)); - - let opening_challenges_f = |i| opening_challenges[i as usize]; - - PC::check_combinations_individual_opening_challenges( - &index_vk.verifier_key, - &lc_s, - &commitments, - &query_set, - &evaluations, - &proof.pc_proof, - &opening_challenges_f, - &mut fs_rng, - ) - .map_err(Error::from_pc_err)? - } else { - let opening_challenge: F = fs_rng.squeeze_128_bits_nonnative_field_elements(1)[0]; - - PC::check_combinations( - &index_vk.verifier_key, - &lc_s, - &commitments, - &query_set, - &evaluations, - &proof.pc_proof, - opening_challenge, - &mut fs_rng, - ) - .map_err(Error::from_pc_err)? - }; + let mut opening_challenges = ChallengeGenerator::::new_multivariate(fs_rng.clone()); + + let evaluations_are_correct = PC::check_combinations( + &index_vk.verifier_key, + &lc_s, + &commitments, + &query_set, + &evaluations, + &proof.pc_proof, + &mut opening_challenges, + &mut fs_rng, + ) + .map_err(Error::from_pc_err)?; if !evaluations_are_correct { eprintln!("PC::Check failed"); } + end_timer!(verifier_time, || format!( " PC::Check for AHP Verifier linear equations: {}", evaluations_are_correct )); + Ok(evaluations_are_correct) } pub fn prepared_verify( - prepared_vk: &PreparedIndexVerifierKey, + prepared_vk: &PreparedIndexVerifierKey, public_input: &[F], - proof: &Proof, + proof: &Proof, ) -> Result> { Self::verify(&prepared_vk.orig_vk, public_input, proof) } diff --git a/src/sponge/mod.rs b/src/sponge/mod.rs new file mode 100644 index 0000000..40414f9 --- /dev/null +++ b/src/sponge/mod.rs @@ -0,0 +1,62 @@ +use ark_ff::PrimeField; +use ark_nonnative_field::{params::OptimizationType, NonNativeFieldVar}; +use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; +use ark_sponge::{constraints::CryptographicSpongeVar, CryptographicSponge}; + +pub mod poseidon; + +pub trait CryptographicSpongeParameters { + fn from_rate(rate: usize) -> Self; +} + +pub trait CryptographicSpongeWithRate: CryptographicSponge +where + ::Parameters: CryptographicSpongeParameters, +{ + fn default_rate() -> usize; + + fn with_default_rate() -> Self { + let rate = Self::default_rate(); + + Self::from_rate(rate) + } + + fn from_rate(rate: usize) -> Self { + let params = + <::Parameters as CryptographicSpongeParameters>::from_rate( + rate, + ); + + ::new(¶ms) + } +} + +pub trait CryptographicSpongeVarNonNative: + CryptographicSpongeVar +where + >::Parameters: CryptographicSpongeParameters, +{ + fn default_rate() -> usize; + + fn with_default_rate(cs: ConstraintSystemRef) -> Self { + let rate = Self::default_rate(); + + Self::from_rate(cs, rate) + } + + fn from_rate(cs: ConstraintSystemRef, rate: usize) -> Self { + let params = + <>::Parameters as CryptographicSpongeParameters>::from_rate( + rate, + ); + + >::new(cs, ¶ms) + } + + /// Absorb non native elements + fn absorb_nonnative( + &mut self, + input: &[NonNativeFieldVar], + ty: OptimizationType, + ) -> Result<(), SynthesisError>; +} diff --git a/src/sponge/poseidon.rs b/src/sponge/poseidon.rs new file mode 100644 index 0000000..b1ff915 --- /dev/null +++ b/src/sponge/poseidon.rs @@ -0,0 +1,302 @@ +use core::marker::PhantomData; + +use ark_ff::{FpParameters, PrimeField}; +use ark_nonnative_field::{ + params::OptimizationType, AllocatedNonNativeFieldVar, NonNativeFieldVar, +}; +use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::prelude::UInt8; +use ark_r1cs_std::{alloc::AllocVar, boolean::Boolean}; +use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; +use ark_sponge::constraints::AbsorbGadget; +use ark_sponge::{ + constraints::CryptographicSpongeVar, + poseidon::{constraints::PoseidonSpongeVar, PoseidonParameters, PoseidonSponge}, + CryptographicSponge, +}; + +use super::{CryptographicSpongeParameters, CryptographicSpongeVarNonNative}; +use crate::{overhead, CryptographicSpongeWithRate}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct PoseidonArguments { + pub prime_bits: u64, + pub full_rounds: u32, + pub partial_rounds: u32, + pub skip_matrices: u64, + + _field: PhantomData, +} + +impl PoseidonArguments { + pub const DEFAULT: Self = Self { + prime_bits: F::Params::MODULUS_BITS as u64, + full_rounds: 8, + partial_rounds: 60, + skip_matrices: 0, + _field: PhantomData, + }; +} + +impl CryptographicSpongeWithRate for PoseidonSponge { + fn default_rate() -> usize { + PoseidonParametersWithDefaultRate::::DEFAULT_RATE + } +} + +impl CryptographicSpongeParameters for PoseidonParameters { + fn from_rate(rate: usize) -> Self { + PoseidonParametersWithDefaultRate::from_rate(rate).params + } +} + +impl + CryptographicSpongeVarNonNative for PoseidonSpongeVar +where + PoseidonSpongeVar: CryptographicSpongeVar, + >::Parameters: CryptographicSpongeParameters, +{ + fn default_rate() -> usize { + PoseidonParametersWithDefaultRate::::DEFAULT_RATE + } + + fn absorb_nonnative( + &mut self, + input: &[NonNativeFieldVar], + ty: OptimizationType, + ) -> Result<(), SynthesisError> { + let mut src_limbs: Vec<(FpVar, CF)> = Vec::new(); + + for elem in input.iter() { + match elem { + NonNativeFieldVar::Constant(c) => { + let v = AllocatedNonNativeFieldVar::::new_constant(self.cs(), c)?; + + for limb in v.limbs.iter() { + let num_of_additions_over_normal_form = + if v.num_of_additions_over_normal_form == CF::zero() { + CF::one() + } else { + v.num_of_additions_over_normal_form + }; + + src_limbs.push((limb.clone(), num_of_additions_over_normal_form)); + } + } + NonNativeFieldVar::Var(v) => { + for limb in v.limbs.iter() { + let num_of_additions_over_normal_form = + if v.num_of_additions_over_normal_form == CF::zero() { + CF::one() + } else { + v.num_of_additions_over_normal_form + }; + + src_limbs.push((limb.clone(), num_of_additions_over_normal_form)); + } + } + } + } + + let capacity = CF::size_in_bits() - 1; + let mut dest_limbs = Vec::>::new(); + + if !src_limbs.is_empty() { + let params = + ark_nonnative_field::params::get_params(F::size_in_bits(), CF::size_in_bits(), ty); + + let adjustment_factor_lookup_table = { + let mut table = Vec::::new(); + + let mut cur = CF::one(); + for _ in 1..=capacity { + table.push(cur); + cur.double_in_place(); + } + + table + }; + + let mut i: usize = 0; + let src_len = src_limbs.len(); + while i < src_len { + let first = &src_limbs[i]; + let second = if i + 1 < src_len { + Some(&src_limbs[i + 1]) + } else { + None + }; + + let first_max_bits_per_limb = + params.bits_per_limb + overhead!(first.1 + &CF::one()); + let second_max_bits_per_limb = if second.is_some() { + params.bits_per_limb + overhead!(second.unwrap().1 + &CF::one()) + } else { + 0 + }; + + if second.is_some() + && first_max_bits_per_limb + second_max_bits_per_limb <= capacity + { + let adjustment_factor = + &adjustment_factor_lookup_table[second_max_bits_per_limb]; + + dest_limbs.push(&first.0 * *adjustment_factor + &second.unwrap().0); + i += 2; + } else { + dest_limbs.push(first.0.clone()); + i += 1; + } + } + } + + self.absorb(&dest_limbs)?; + + Ok(()) + } +} + +/// Parameters and RNG used +#[derive(Clone, Debug)] +pub struct PoseidonParametersWithDefaultRate { + pub params: PoseidonParameters, +} + +impl PoseidonParametersWithDefaultRate { + /// Default rate for poseidon + pub const DEFAULT_RATE: usize = 4; +} + +impl From> for PoseidonParametersWithDefaultRate { + fn from(params: PoseidonParameters) -> Self { + Self { params } + } +} + +impl CryptographicSpongeParameters for PoseidonParametersWithDefaultRate { + fn from_rate(rate: usize) -> Self { + let PoseidonArguments { + prime_bits, + full_rounds, + partial_rounds, + skip_matrices, + .. + } = PoseidonArguments::::DEFAULT; + + // TODO consume the arguments + let capacity = 1; + let alpha = 5; + let _ = (rate, prime_bits, skip_matrices); + + // TODO generate secure constants + let ark = F::one(); + let ark = vec![ark; 3]; + let ark = vec![ark; (full_rounds + partial_rounds) as usize]; + + // TODO generate secure matrix + let mds = F::one(); + let mds = vec![mds; rate + capacity]; + let mds = vec![mds; rate + capacity]; + + PoseidonParameters::new(full_rounds, partial_rounds, alpha, mds, ark).into() + } +} + +#[derive(Clone)] +/// Wrapper for [`PoseidonSponge`] +pub struct PoseidonSpongeWithDefaultRate { + pub s: PoseidonSponge, +} + +impl From> for PoseidonSpongeWithDefaultRate { + fn from(s: PoseidonSponge) -> Self { + Self { s } + } +} + +impl CryptographicSponge for PoseidonSpongeWithDefaultRate { + type Parameters = PoseidonParametersWithDefaultRate; + + fn new(p: &Self::Parameters) -> Self { + PoseidonSponge::new(&p.params).into() + } + + fn absorb(&mut self, input: &impl ark_sponge::Absorb) { + self.s.absorb(input) + } + + fn squeeze_bytes(&mut self, num_bytes: usize) -> Vec { + self.s.squeeze_bytes(num_bytes) + } + + fn squeeze_bits(&mut self, num_bits: usize) -> Vec { + self.s.squeeze_bits(num_bits) + } +} + +impl CryptographicSpongeWithRate for PoseidonSpongeWithDefaultRate { + fn default_rate() -> usize { + PoseidonParametersWithDefaultRate::::DEFAULT_RATE + } +} + +#[derive(Clone)] +/// Wrapper for [`PoseidonSpongeVar`] +pub struct PoseidonSpongeVarWithDefaultRate { + pub s: PoseidonSpongeVar, +} + +impl From> for PoseidonSpongeVarWithDefaultRate { + fn from(s: PoseidonSpongeVar) -> Self { + Self { s } + } +} + +impl CryptographicSpongeVar + for PoseidonSpongeVarWithDefaultRate +{ + type Parameters = PoseidonParametersWithDefaultRate; + + fn new(cs: ConstraintSystemRef, p: &Self::Parameters) -> Self { + PoseidonSpongeVar::new(cs, &p.params).into() + } + + fn cs(&self) -> ConstraintSystemRef { + self.s.cs() + } + + fn absorb(&mut self, input: &impl AbsorbGadget) -> Result<(), SynthesisError> { + self.s.absorb(input) + } + + fn squeeze_bytes(&mut self, num_bytes: usize) -> Result>, SynthesisError> { + self.s.squeeze_bytes(num_bytes) + } + + fn squeeze_bits(&mut self, num_bits: usize) -> Result>, SynthesisError> { + self.s.squeeze_bits(num_bits) + } + + fn squeeze_field_elements( + &mut self, + num_elements: usize, + ) -> Result>, SynthesisError> { + self.s.squeeze_field_elements(num_elements) + } +} + +impl + CryptographicSpongeVarNonNative for PoseidonSpongeVarWithDefaultRate +{ + fn default_rate() -> usize { + PoseidonParametersWithDefaultRate::::DEFAULT_RATE + } + + fn absorb_nonnative( + &mut self, + input: &[NonNativeFieldVar], + ty: OptimizationType, + ) -> Result<(), SynthesisError> { + self.s.absorb_nonnative(input, ty) + } +} diff --git a/src/test.rs b/src/test.rs index f8d5d1c..8d9c551 100644 --- a/src/test.rs +++ b/src/test.rs @@ -115,7 +115,7 @@ impl ConstraintSynthesizer for OutlineTestCircuit { mod marlin { use super::*; - use crate::{fiat_shamir::FiatShamirChaChaRng, Marlin, MarlinDefaultConfig}; + use crate::{FiatShamirChaChaRng, Marlin, MarlinDefaultConfig}; use ark_bls12_381::{Bls12_381, Fq, Fr}; use ark_ff::UniformRand; @@ -124,7 +124,8 @@ mod marlin { use ark_std::ops::MulAssign; use blake2::Blake2s; - type MultiPC = MarlinKZG10>; + type MultiPC = + MarlinKZG10, FiatShamirChaChaRng>; type MarlinInst = Marlin, MarlinDefaultConfig>; @@ -204,10 +205,7 @@ mod marlin { mod marlin_recursion { use super::*; - use crate::{ - fiat_shamir::{poseidon::PoseidonSponge, FiatShamirAlgebraicSpongeRng}, - Marlin, MarlinRecursiveConfig, - }; + use crate::{FiatShamirSpongeRng, Marlin, MarlinRecursiveConfig}; use ark_ec::{CurveCycle, PairingEngine, PairingFriendlyCycle}; use ark_ff::UniformRand; @@ -215,14 +213,16 @@ mod marlin_recursion { use ark_mnt6_298::MNT6_298; use ark_poly::polynomial::univariate::DensePolynomial; use ark_poly_commit::marlin_pc::MarlinKZG10; + use ark_sponge::poseidon::PoseidonSponge; use core::ops::MulAssign; - type MultiPC = MarlinKZG10>; + type MultiPC = + MarlinKZG10, FiatShamirSpongeRng>>; type MarlinInst = Marlin< Fr, Fq, MultiPC, - FiatShamirAlgebraicSpongeRng>, + FiatShamirSpongeRng>, MarlinRecursiveConfig, >; @@ -338,20 +338,9 @@ mod marlin_recursion { } mod fiat_shamir { - use crate::fiat_shamir::constraints::FiatShamirRngVar; - use crate::fiat_shamir::{ - constraints::FiatShamirAlgebraicSpongeRngVar, - poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge}, - FiatShamirAlgebraicSpongeRng, FiatShamirChaChaRng, FiatShamirRng, - }; - use ark_ff::PrimeField; + use crate::fiat_shamir::{FiatShamirChaChaRng, FiatShamirRng}; use ark_mnt4_298::{Fq, Fr}; use ark_nonnative_field::params::OptimizationType; - use ark_nonnative_field::NonNativeFieldVar; - use ark_r1cs_std::alloc::AllocVar; - use ark_r1cs_std::bits::uint8::UInt8; - use ark_r1cs_std::R1CSVar; - use ark_relations::r1cs::{ConstraintSystem, ConstraintSystemRef, OptimizationGoal}; use ark_std::UniformRand; use blake2::Blake2s; @@ -380,141 +369,15 @@ mod fiat_shamir { ); } - let mut fs_rng = FiatShamirChaChaRng::::new(); - fs_rng - .absorb_nonnative_field_elements(&absorbed_rand_field_elems, OptimizationType::Weight); + let mut fs_rng = FiatShamirChaChaRng::::default(); + fs_rng.absorb_nonnative(&absorbed_rand_field_elems, OptimizationType::Weight); for absorbed_rand_byte_elem in absorbed_rand_byte_elems { fs_rng.absorb_bytes(&absorbed_rand_byte_elem); } - let _squeezed_fields_elems = fs_rng - .squeeze_nonnative_field_elements(NUM_SQUEEZED_FIELD_ELEMS, OptimizationType::Weight); + let _squeezed_fields_elems = + fs_rng.squeeze_nonnative(NUM_SQUEEZED_FIELD_ELEMS, OptimizationType::Weight); let _squeezed_short_fields_elems = - fs_rng.squeeze_128_bits_nonnative_field_elements(NUM_SQUEEZED_SHORT_FIELD_ELEMS); - } - - #[test] - fn test_poseidon() { - let rng = &mut ark_std::test_rng(); - - let mut absorbed_rand_field_elems = Vec::new(); - for _ in 0..NUM_ABSORBED_RAND_FIELD_ELEMS { - absorbed_rand_field_elems.push(Fr::rand(rng)); - } - - let mut absorbed_rand_byte_elems = Vec::>::new(); - for _ in 0..NUM_ABSORBED_RAND_BYTE_ELEMS { - absorbed_rand_byte_elems.push( - (0..SIZE_ABSORBED_BYTE_ELEM) - .map(|_| u8::rand(rng)) - .collect(), - ); - } - - // fs_rng in the plaintext world - let mut fs_rng = FiatShamirAlgebraicSpongeRng::>::new(); - - fs_rng - .absorb_nonnative_field_elements(&absorbed_rand_field_elems, OptimizationType::Weight); - - for absorbed_rand_byte_elem in &absorbed_rand_byte_elems { - fs_rng.absorb_bytes(absorbed_rand_byte_elem); - } - - let squeezed_fields_elems = fs_rng - .squeeze_nonnative_field_elements(NUM_SQUEEZED_FIELD_ELEMS, OptimizationType::Weight); - let squeezed_short_fields_elems = - fs_rng.squeeze_128_bits_nonnative_field_elements(NUM_SQUEEZED_SHORT_FIELD_ELEMS); - - // fs_rng in the constraint world - let cs_sys = ConstraintSystem::::new(); - let cs = ConstraintSystemRef::new(cs_sys); - cs.set_optimization_goal(OptimizationGoal::Weight); - let mut fs_rng_gadget = FiatShamirAlgebraicSpongeRngVar::< - Fr, - Fq, - PoseidonSponge, - PoseidonSpongeVar, - >::new(ark_relations::ns!(cs, "new").cs()); - - let mut absorbed_rand_field_elems_gadgets = Vec::new(); - for absorbed_rand_field_elem in absorbed_rand_field_elems.iter() { - absorbed_rand_field_elems_gadgets.push( - NonNativeFieldVar::::new_constant( - ark_relations::ns!(cs, "alloc elem"), - absorbed_rand_field_elem, - ) - .unwrap(), - ); - } - fs_rng_gadget - .absorb_nonnative_field_elements( - &absorbed_rand_field_elems_gadgets, - OptimizationType::Weight, - ) - .unwrap(); - - let mut absorbed_rand_byte_elems_gadgets = Vec::>>::new(); - for absorbed_rand_byte_elem in absorbed_rand_byte_elems.iter() { - let mut byte_gadget = Vec::>::new(); - for byte in absorbed_rand_byte_elem.iter() { - byte_gadget - .push(UInt8::new_constant(ark_relations::ns!(cs, "alloc byte"), byte).unwrap()); - } - absorbed_rand_byte_elems_gadgets.push(byte_gadget); - } - for absorbed_rand_byte_elems_gadget in absorbed_rand_byte_elems_gadgets.iter() { - fs_rng_gadget - .absorb_bytes(absorbed_rand_byte_elems_gadget) - .unwrap(); - } - - let squeezed_fields_elems_gadgets = fs_rng_gadget - .squeeze_field_elements(NUM_SQUEEZED_FIELD_ELEMS) - .unwrap(); - - let squeezed_short_fields_elems_gadgets = fs_rng_gadget - .squeeze_128_bits_field_elements(NUM_SQUEEZED_SHORT_FIELD_ELEMS) - .unwrap(); - - // compare elems - for (i, (left, right)) in squeezed_fields_elems - .iter() - .zip(squeezed_fields_elems_gadgets.iter()) - .enumerate() - { - assert_eq!( - left.into_repr(), - right.value().unwrap().into_repr(), - "{}: left = {:?}, right = {:?}", - i, - left.into_repr(), - right.value().unwrap().into_repr() - ); - } - - // compare short elems - for (i, (left, right)) in squeezed_short_fields_elems - .iter() - .zip(squeezed_short_fields_elems_gadgets.iter()) - .enumerate() - { - assert_eq!( - left.into_repr(), - right.value().unwrap().into_repr(), - "{}: left = {:?}, right = {:?}", - i, - left.into_repr(), - right.value().unwrap().into_repr() - ); - } - - if !cs.is_satisfied().unwrap() { - println!("\n========================================================="); - println!("\nUnsatisfied constraints:"); - println!("\n{:?}", cs.which_is_unsatisfied().unwrap()); - println!("\n========================================================="); - } - assert!(cs.is_satisfied().unwrap()); + fs_rng.squeeze_128_bits_nonnative(NUM_SQUEEZED_SHORT_FIELD_ELEMS); } }