Skip to content

Commit

Permalink
Added back the parallel multi-pairings
Browse files Browse the repository at this point in the history
  • Loading branch information
rozbb committed Oct 26, 2023
1 parent eae6162 commit d21db3a
Show file tree
Hide file tree
Showing 2 changed files with 64 additions and 6 deletions.
49 changes: 45 additions & 4 deletions inner_products/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use ark_ec::{
pairing::{Pairing, PairingOutput},
pairing::{MillerLoopOutput, Pairing, PairingOutput},
CurveGroup,
};
use ark_ff::Field;
Expand Down Expand Up @@ -68,12 +68,53 @@ impl<P: Pairing> InnerProduct for PairingInnerProduct<P> {
right.len(),
)));
};
let aff_left = P::G1::normalize_batch(left);
let aff_right = P::G2::normalize_batch(right);
Ok(P::multi_pairing(&aff_left, &aff_right))

Ok(cfg_multi_pairing(left, right).unwrap())
}
}

/// Equivalent to `P::multi_pairing`, but with more parallelism (if enabled)
pub fn cfg_multi_pairing<P: Pairing>(left: &[P::G1], right: &[P::G2]) -> Option<PairingOutput<P>> {
// We make the input affine, then convert to prepared. We do this for speed, since the
// conversion from projective to prepared always goes through affine.
let aff_left = P::G1::normalize_batch(left);
let aff_right = P::G2::normalize_batch(right);

let left = cfg_iter!(aff_left)
.map(P::G1Prepared::from)
.collect::<Vec<_>>();
let right = cfg_iter!(aff_right)
.map(P::G2Prepared::from)
.collect::<Vec<_>>();

// We want to process N chunks in parallel where N is the number of threads available
#[cfg(feature = "parallel")]
let num_chunks = rayon::current_num_threads();
#[cfg(not(feature = "parallel"))]
let num_chunks = 1;

let chunk_size = if num_chunks <= left.len() {
left.len() / num_chunks
} else {
// More threads than elements. Just do it all in parallel
1
};

#[cfg(feature = "parallel")]
let (left_chunks, right_chunks) = (left.par_chunks(chunk_size), right.par_chunks(chunk_size));
#[cfg(not(feature = "parallel"))]
let (left_chunks, right_chunks) = (left.chunks(chunk_size), right.chunks(chunk_size));

// Compute all the (partial) pairings and take the product. We have to take the product over
// P::TargetField because MillerLoopOutput doesn't impl Product
let ml_result = left_chunks
.zip(right_chunks)
.map(|(aa, bb)| P::multi_miller_loop(aa.iter().cloned(), bb.iter().cloned()).0)
.product();

P::final_exponentiation(MillerLoopOutput(ml_result))
}

#[derive(Copy, Clone)]
pub struct MultiexponentiationInnerProduct<G: CurveGroup> {
_projective: PhantomData<G>,
Expand Down
21 changes: 19 additions & 2 deletions sipp/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
use std::marker::PhantomData;

use ark_ec::{
pairing::{Pairing, PairingOutput},
pairing::{MillerLoopOutput, Pairing, PairingOutput},
scalar_mul::variable_base::VariableBaseMSM,
CurveGroup,
};
Expand Down Expand Up @@ -196,7 +196,24 @@ pub fn product_of_pairings_with_coeffs<E: Pairing>(
let a = a.par_iter().map(E::G1Prepared::from).collect::<Vec<_>>();
let b = b.par_iter().map(E::G2Prepared::from).collect::<Vec<_>>();

E::multi_pairing(a, b)
// We want to process N chunks in parallel where N is the number of threads available
let num_chunks = rayon::current_num_threads();
let chunk_size = if num_chunks <= a.len() {
a.len() / num_chunks
} else {
// More threads than elements. Just do it all in parallel
1
};

// Compute all the (partial) pairings and take the product. We have to take the product over
// P::TargetField because MillerLoopOutput doesn't impl Product
let ml_result = a
.par_chunks(chunk_size)
.zip(b.par_chunks(chunk_size))
.map(|(aa, bb)| E::multi_miller_loop(aa.iter().cloned(), bb.iter().cloned()).0)
.product();

E::final_exponentiation(MillerLoopOutput(ml_result)).unwrap()
}

/// Compute the product of pairings of `a` and `b`.
Expand Down

0 comments on commit d21db3a

Please sign in to comment.