diff --git a/Cargo.lock b/Cargo.lock index d601de0..f9d5fa1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -25,17 +25,18 @@ dependencies = [ [[package]] name = "ark-bls12-381" version = "0.3.0" -source = "git+https://github.com/arkworks-rs/curves#363426c1d41aa89c8e64bc4fd265b7cefe24fb03" +source = "git+https://github.com/arkworks-rs/curves#99831650f8021cb6a16481bac674420bc6c1a5a1" dependencies = [ "ark-ec", "ark-ff", + "ark-serialize", "ark-std", ] [[package]] name = "ark-ec" version = "0.3.0" -source = "git+https://github.com/arkworks-rs/algebra#aabe76c584b97c92c07c005e2d5297ce53e9d597" +source = "git+https://github.com/arkworks-rs/algebra#a4362f92f00986bce7e96dc4be45167fe45e6cda" dependencies = [ "ark-ff", "ark-poly", @@ -52,7 +53,7 @@ dependencies = [ [[package]] name = "ark-ff" version = "0.3.0" -source = "git+https://github.com/arkworks-rs/algebra#aabe76c584b97c92c07c005e2d5297ce53e9d597" +source = "git+https://github.com/arkworks-rs/algebra#a4362f92f00986bce7e96dc4be45167fe45e6cda" dependencies = [ "ark-ff-asm", "ark-ff-macros", @@ -60,6 +61,7 @@ dependencies = [ "ark-std", "derivative", "digest", + "itertools", "num-bigint", "num-traits", "paste", @@ -71,7 +73,7 @@ dependencies = [ [[package]] name = "ark-ff-asm" version = "0.3.0" -source = "git+https://github.com/arkworks-rs/algebra#aabe76c584b97c92c07c005e2d5297ce53e9d597" +source = "git+https://github.com/arkworks-rs/algebra#a4362f92f00986bce7e96dc4be45167fe45e6cda" dependencies = [ "quote", "syn", @@ -80,10 +82,11 @@ dependencies = [ [[package]] name = "ark-ff-macros" version = "0.3.0" -source = "git+https://github.com/arkworks-rs/algebra#aabe76c584b97c92c07c005e2d5297ce53e9d597" +source = "git+https://github.com/arkworks-rs/algebra#a4362f92f00986bce7e96dc4be45167fe45e6cda" dependencies = [ "num-bigint", "num-traits", + "proc-macro2", "quote", "syn", ] @@ -99,7 +102,7 @@ dependencies = [ "ark-relations", "ark-serialize", "ark-std", - "clap 3.2.22", + "clap 3.2.23", "criterion", "env_logger", "hashbrown", @@ -115,7 +118,7 @@ dependencies = [ [[package]] name = "ark-poly" version = "0.3.0" -source = "git+https://github.com/arkworks-rs/algebra#aabe76c584b97c92c07c005e2d5297ce53e9d597" +source = "git+https://github.com/arkworks-rs/algebra#a4362f92f00986bce7e96dc4be45167fe45e6cda" dependencies = [ "ark-ff", "ark-serialize", @@ -139,7 +142,7 @@ dependencies = [ [[package]] name = "ark-serialize" version = "0.3.0" -source = "git+https://github.com/arkworks-rs/algebra#aabe76c584b97c92c07c005e2d5297ce53e9d597" +source = "git+https://github.com/arkworks-rs/algebra#a4362f92f00986bce7e96dc4be45167fe45e6cda" dependencies = [ "ark-serialize-derive", "ark-std", @@ -149,7 +152,7 @@ dependencies = [ [[package]] name = "ark-serialize-derive" version = "0.3.0" -source = "git+https://github.com/arkworks-rs/algebra#aabe76c584b97c92c07c005e2d5297ce53e9d597" +source = "git+https://github.com/arkworks-rs/algebra#a4362f92f00986bce7e96dc4be45167fe45e6cda" dependencies = [ "proc-macro2", "quote", @@ -204,9 +207,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.11.0" +version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" [[package]] name = "byteorder" @@ -239,9 +242,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.22" +version = "3.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86447ad904c7fb335a790c9d7fe3d0d971dc523b8ccd1561a520de9a85302750" +checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" dependencies = [ "atty", "bitflags", @@ -251,7 +254,7 @@ dependencies = [ "once_cell", "strsim", "termcolor", - "textwrap 0.15.1", + "textwrap 0.16.0", ] [[package]] @@ -346,26 +349,24 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" +checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", "memoffset", - "once_cell", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc" +checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -451,9 +452,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "libc", @@ -508,9 +509,9 @@ dependencies = [ [[package]] name = "itertools" -version = "0.10.4" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8bf247779e67a9082a4790b45e71ac7cfd1321331a5c856a74a9faebdab78d0" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] @@ -523,9 +524,9 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "js-sys" @@ -550,9 +551,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.132" +version = "0.2.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5" +checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" [[package]] name = "log" @@ -638,9 +639,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.14.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f7254b99e31cad77da24b08ebf628882739a608578bb1bcdfc1f9c21260d7c0" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" [[package]] name = "oorandom" @@ -650,9 +651,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "os_str_bytes" -version = "6.3.0" +version = "6.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" +checksum = "3baf96e39c5359d2eb0dd6ccb42c62b91d9678aa68160d261b9e0ccbf9e9dea9" [[package]] name = "paste" @@ -726,9 +727,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.43" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" dependencies = [ "unicode-ident", ] @@ -899,9 +900,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.144" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f747710de3dcd43b88c9168773254e809d8ddbdf9653b84e2554ab219f17860" +checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" [[package]] name = "serde_cbor" @@ -915,9 +916,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.144" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94ed3a816fb1d101812f83e789f888322c34e291f894f19590dc310963e87a00" +checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" dependencies = [ "proc-macro2", "quote", @@ -926,11 +927,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.85" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" +checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" dependencies = [ - "itoa 1.0.3", + "itoa 1.0.4", "ryu", "serde", ] @@ -943,9 +944,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" -version = "1.0.100" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52205623b1b0f064a4e71182c3b18ae902267282930c6d5462c91b859668426e" +checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" dependencies = [ "proc-macro2", "quote", @@ -984,9 +985,9 @@ dependencies = [ [[package]] name = "textwrap" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "949517c0cf1bf4ee812e2e07e08ab448e3ae0d23472aee8a06c985f0c8815b16" +checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "tinytemplate" @@ -1000,9 +1001,9 @@ dependencies = [ [[package]] name = "tracing" -version = "0.1.36" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fce9567bd60a67d08a16488756721ba392f24f29006402881e43b19aac64307" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "pin-project-lite", @@ -1011,9 +1012,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.29" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aeea4303076558a00714b823f9ad67d58a3bbda1df83d8827d21193156e22f7" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ "once_cell", "valuable", @@ -1036,9 +1037,9 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "unicode-ident" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcc811dc4066ac62f84f11307873c4850cb653bfa9b1719cee2bd2204a4bc5dd" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" [[package]] name = "unicode-width" diff --git a/src/docs/lib_docs.md b/src/docs/lib_docs.md new file mode 100644 index 0000000..0619259 --- /dev/null +++ b/src/docs/lib_docs.md @@ -0,0 +1,66 @@ +Gemini: Elastic Arguments for R1CS. + +This library provides essentially two arguments: +- [`snark::Proof`], for non-preprocessing SNARKs. + It provides a non-interactive succinct argument of knowledge for R1CS + without indexer, and where the verifier complexity is linear in the circuit size. +- [`psnark::Proof`] for preprocessing SNARKs. + It provides a non-interactive succinct argument of knowledge for R1CS + where the verifier complexity is logarithmic in the circuit size. + +The library implements the Kate-Zaverucha-Goldberg protocol [[KZG](https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf)] +for polynomial commitments. + +### KZG Protocol Outline + +1. Trusted setup and key generation +2. Commitment to a polynomial $f(x) \in \FF\[x\]$. +4. Evaluation of polynomial and proof generation +5. Verification of evaluation proof + +The `kzg` and [`multikzg`] modules contain implementations of both time-efficient and space-efficient +versions of KZG polynomial commitments. + +The choice of the pairing-friendly elliptic curve to be used is entirely up to the user. +For example, crate [`ark_bls12_381`] contains the implementation for curve [`Bls12_381`](ark_bls12_381::Bls12_381). + +See the module documentation for `multikzg` below for an overview of protocol math. + +# Building + +This package can be compiled with `cargo build`. For now, this package can be built on rust stable. +Test package with `cargo test`. +Compile package documentation with `cargo rustdoc` and launch in default browser with `cargo rustdoc --open`. + +Both arguments rely on some sub-protocols, implemented as separate modules in [`subprotocols`] +and free of use for other protocols. + +# Building + +This package can be compiled with `cargo build`, and requires rust nightly at least +until [`Iterator::advance_by`] hits stable. There's a bunch of feature flags that can be turned +on: + +- `asm`, to turn on the assembly backend within [`ark-ff`](https://docs.rs/ark-ff/); +- `parallel`, to turn on multi-threading. This requires the additional dependency [`rayon`](https://docs.rs/rayon/latest/rayon/); +- `std`, to rely on the Rust Standard library; +- `print-trace`, to print additional information concerning the execution time of the sub-protocols. **This feature must be enabled if want to print the execution time of the examples.** + +# Benchmarking + +Micro-benchmarks aare available and can be fired with: + +```bash +cargo bench +``` + +Execution of (preprocessing-)SNARK for arbitrary instance sizes can be done running +the examples with: + +```bash +cargo run --example snark -- -i +``` + +# License + +This package is licensed under MIT license. diff --git a/src/docs/multivariate_protocol.md b/src/docs/multivariate_protocol.md new file mode 100644 index 0000000..7f91e80 --- /dev/null +++ b/src/docs/multivariate_protocol.md @@ -0,0 +1,61 @@ +This module implements multivariate polynomial commitments, while module `kzg` implements the univariate version. \ +Like in the univariate version, the notation $\tau \in \Z_p$ is a prime field element and $G \in \GG_1$ is a generator for affine group $\GG_1$. \ +$H$ is a generator for the affine group $\GG_2$. $\GG_1$ and $\GG_2$ are pairing-friendly. \ +The univariate protocol is edited for commitments to multilinear polynomials $f(x_1, x_2, x_3, ... x_n)$. \ +For example, $f(x_1, ... x_n) = f_0 + f_1 \cdot x_1 + f_2 \cdot x_2 + f_3 \cdot x_1 \cdot x_2 + f_4 \cdot x_3 + f_5 \cdot x_3 \cdot x_1 + f_6 \cdot x_3 \cdot x_2 + f_7 \cdot x_3 \cdot x_2 \cdot x_1 + ...$ \ + +##### Setup + +$\tau_i \in \Z_p$ are still prime field elements. \ +The commitment key is now defined as $ck = \(G, \tau_1 G, \tau_2 G, \tau_1 \tau_2 G, \tau_3 G, \tau_3 \tau_1 G ... \)$. \ +The verification key is defined as $vk = \(G, H_0, \tau_1 H_1, \tau_2 H_2, ... \tau_n H_n\)$. \ +The polynomial $f$ is a vector of coefficients $f(x) = \Sigma_{i=0}^{n-1} f_i \cdot x_i$. \ +Commitment key is represented by struct `CommitterKeyMulti` from which `VerifierKeyMulti` can be produced. + +##### Commitment + +The commitment step, implemented by `commit`, hides the choice of polynomial using the commitment key $ck$. \ +The commitment step again returns $C = \Sigma_i f_i \cdot ck_i$. + +##### Evaluation + +The evaluation step is given the committer key, polynomial, and an evaluation vector $\hat{\alpha} \in \Z_p$. \ +`open` returns the evaluation (output of the polynomial evaluated at $\hat{\alpha}$) and the proof, a set of quotients. \ +Proof quotients $Q_i$ are found by dividing $\(x - \alpha\)$ into $f(x)$, such that $f(x) = \Sigma_i q(x) \cdot \(x_i - \alpha\) + r(x)$. \ +Thus, the evaluation $f( \alpha ) = r \in \FF_p$. \ +The proof is ${ \Sigma_j q_{1,j} \cdot ck_j, ..., \Sigma_j q_{n,j} \cdot ck_j } $. + +##### Verification + +`verify` verifies that the group pairing equation $\epsilon(C - f(\hat{\alpha})G, H_0) = \Sigma_i \epsilon(Q_i, H_i - \alpha_i H_i)$ is true. + +### Multivariate Batching + +If multiple polynomials are to be opened at one evaluation point, `batched_poly` takes a linear combination of the polynomials scaled by powers of a field element challenge, and then `batch_open_multi_polys` actually opens the combined polynomial to one point. \ +Say we have a set of $m$ multilinear polynomials of equal degree to evaluate at $\alpha$, $f_i$. The Verifier sends across a challenge field element $c \in \FF_p$. \ +Then the scaling vector is computed as $(1, c, c^{2}, c^{3}...)$ and proofs are batched. \ +The statement to verify becomes $\Sigma_i (\mu^i \cdot f_i) (\alpha) = \Sigma_i \mu_i \cdot (f_i (\alpha))$. + + +# Example Multivariate Protocol Usage +``` +use ark_bls12_381::Bls12_381; +use ark_bls12_381::Fr; +use ark_std::UniformRand; +use ark_gemini::errors::{VerificationError, VerificationResult}; +use ark_gemini::multikzg::{Commitment, VerifierKeyMulti, CommitterKeyMulti}; + +let dim = 3; +let rng = &mut ark_std::test_rng(); +let ck = CommitterKeyMulti::::new(dim, rng); +let vk = VerifierKeyMulti::from(&ck); + +let polynomial_flat = (0..1<>(); + +let alpha = (0..dim).map(|_| Fr::rand(rng)).collect::>(); +let commitment = ck.commit(&polynomial_flat); +let (evaluation, proof) = ck.open(&polynomial_flat, &alpha); +assert!(vk.verify(&commitment, &alpha, &evaluation, &proof).is_ok()); +``` +See also: Package tests. + diff --git a/src/docs/old_docs.md b/src/docs/old_docs.md new file mode 100644 index 0000000..d41db10 --- /dev/null +++ b/src/docs/old_docs.md @@ -0,0 +1,77 @@ +An extension of the KZG (or Kate) polynomial commitment for multilinear polynomials, space- and time-efficient. + +# Background +[[KZG](https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf)] +commitments are described here. + +We encode a multilinear polynomial as follows. +For an \\(n\\) dimensional multilinear polynomial, let \\(i GG= [b_0, b_1, ... b_n]_2\\). That is, +\\(b_1, b_2 ... b_n\\) is the binary representation of \\(i\\). Then, multiply \\(f_i\\) by the +j'th component of \\(x\\) if and only if \\(b_j\\) is 1. + +A verification key takes \\(VK\\) consists of a G1 element \\(G\\), a G2 element \\(H\\), +and an n dimensional vector of G2 elements \\(H_0, H_1, ... H_{n - 1}\\). + +- A [`CommitterKeyMulti`](self::CommitterKeyMulti) consists of a sequence + \\(\vec PK \defeq G \times (1, \tau_0, \tau_1, \tau_0 \times \tau_1 \tau\dots, \prod_j{\tau_j})\\). + - This is the expansion of an n-dimensional vector \\(\tau\\), where components are multiplied in a way + mirroring the multilinear expansion described earlier. +- A [`Commitment`](self::EvaluationProofMulti) is a polynomial \\(f(x)\\) is \\(C \defeq \langle \vec f, \vec PK le \\). +- An [`EvaluationProof`](self::EvaluationProof) +for the polynomial \\(f\\) +in the evaluation point \\(\alpha\\) +is a commitment to the repeated division of \\(f(x)\\) by \\((x_i- \alpha_i)\\). +After all \\(n\\) divisions, the remainder is the evaluation \\(f(\alpha)\\). +We refer to the proof as \\(\pi\\). + +To verify a proof \\(\pi\\) proving that \\(f(\alpha) = \mu\\), one considers the pairing equation: +\\[ +e(C - f(\alpha)G, H) = \sum_i{e(Q_i, H_i - \alpha_i H))} +\\] + + + +# Examples + +When creating a new SRS, one will need to specify only the degree of the multilinear polynomial +to commit to. +From the SRS, it is possible to derive the verification key +[`VerifierKeyMulti`](self::VerifierKeyMulti). + +``` +use ark_gemini::kzg::CommitterKey; +use ark_bls12_381::{ScalarField, Bls12_381}; + +let rng = &mut ark_std::test_rng(); +let degree = 100; + +let ck = CommitterKeyMulti::::new(degree); +# // XXX. if you change the following lines, +# // please note that documentation below might break. +# let f = vec![ScalarField::from(1u64), ScalarField::from(2u64), ScalarField::from(4u64), ScalarField::from(8u64)]; +# let commitment = ck.commit(&f); +# let alpha = vec![ScalarField::from(42u64), ScalarField::from(43u64)]; +# let (evaluation, proof) = ck.open(&f, &alpha); +# use ark_gemini::kzg::VerifierKey; +# let vk = VerifierKey::from(&ck); +# assert!(vk.verify(&commitment, &alpha, &evaluation, &proof).is_ok()) +```` + +Then to commit to a polynomial `f`: +```ignore +let f = vec![ScalarField::from(1u64), ScalarField::from(2u64), ScalarField::from(4u64), ScalarField::from(8u64)]; +let commitment = ck.commit(&f); +``` +To prove the evaluation of `f` in a point `alpha`: + +```ignore +let alpha = ScalarField::from(42u64); +let (evaluation, proof) = ck.open(&f, &alpha); +``` +To veify that an opening is correct: +```ignore +use gemini::kzg::VerifierKey; + +let vk = VerifierKey::from(&ck); +assert!(vk.verify(&commitment, &alpha, &evaluation, &proof).is_ok()) +``` diff --git a/src/docs/old_univariate_docs.md b/src/docs/old_univariate_docs.md new file mode 100644 index 0000000..deb533a --- /dev/null +++ b/src/docs/old_univariate_docs.md @@ -0,0 +1,72 @@ + The KZG (or Kate) polynomial commitment, space- and time-efficient. +# Background +[[KZG](https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf)] +commitments are pretty simple: +- A [`CommitterKey`](self::kzg::CommitterKey) is consists of a sequence $\vec G \defeq (G, \tau G, \dots, \tau^DG)$. +- A [`Commitment`](self::kzg::EvaluationProof) is a polynomial $f(x)$ is $C \defeq \langle \vec f, \vec G \rangle $. +- An [`EvaluationProof`](self::kzg::EvaluationProof) +for the polynomial $f$ +in the evaluation point $\alpha$ +is a commitment to the quotient of $f(x)$ by $(\tau - \alpha)$. +The remainder is the evaluation $f(\alpha)$. +When evaluation over points $(\alpha_0, \dots, \alpha_m)$, +we can consider at once the quotient of $f(x)$ by $Z$ (the polynomial whose roots are $\alpha_i$). +The remainder is a polynomial $r$ such that $r(\alpha_i) = f(\alpha_i)$. +We refer to the proof as $\pi$. +To verify a proof $\pi$ proving that $f(\alpha) = \mu$, one considers the pairing equation: +\\[ +e(C, \tau H - \mu H) = e(f - \mu G, H) +\\] +To verify a proof $\pi$ over a set of points $f(\alpha_i) = \mu_i$, +consider the polynomial $\nu$ such that $\nu(\alpha_i) = \mu_i $, and check: +\\[ +e(C, Z) = e(f - \nu, H). +\\] +It is also possible to open multiple polynomials $f_0, \dots, f_n$ + _on the same set of evaluation points_ +by asking the verifier a random challenge $\eta$, and opening instead +$\sum_i \eta^i f_i $. +_Nota bene:_ despite it is also possible to open multiple polynomials +over different points [[BDFG20](https://eprint.iacr.org/2020/081.pdf)], +however this is not currently supported by our implementation. + + +# Examples +When creating a new SRS, one must specify a degree bound `max_degree` +for the commitment polynomials, and a degree bound `max_evals` for +the maximum number of opening points. +From the SRS, it is possible to derive the verification key +[`VerifierKey`](self::kzg::VerifierKey). +``` +use ark_gemini::kzg::CommitterKey; +use ark_bls12_381::{Fr, Bls12_381}; +let rng = &mut ark_std::test_rng(); +let max_degree = 100; +let max_evals = 10; +let ck = CommitterKey::::new(max_degree, max_evals, rng); +# // XXX. if you change the following lines, +# // please note that documentation below might break. +# let f = vec![Fr::from(1u64), Fr::from(2u64), Fr::from(4u64), Fr::from(8u64)]; +# let commitment = ck.commit(&f); +# let alpha = Fr::from(42u64); +# let (evaluation, proof) = ck.open(&f, &alpha); +# use ark_gemini::kzg::VerifierKey; +# let vk = VerifierKey::from(&ck); +# assert!(vk.verify(&commitment, &alpha, &evaluation, &proof).is_ok()) +```` +Then to commit to a polynomial `f`: +```ignore +let f = vec![Fr::from(1u64), Fr::from(2u64), Fr::from(4u64), Fr::from(8u64)]; +let commitment = ck.commit(&f); +``` +To prove the evaluation of `f` in a point `alpha`: +```ignore +let alpha = Fr::from(42u64); +let (evaluation, proof) = ck.open(&f, &alpha); +``` +To veify that an opening is correct: +```ignore +use gemini::kzg::VerifierKey; +let vk = VerifierKey::from(&ck); +assert!(vk.verify(&commitment, &alpha, &evaluation, &proof).is_ok()) +``` \ No newline at end of file diff --git a/src/docs/univariate_protocol.md b/src/docs/univariate_protocol.md new file mode 100644 index 0000000..0fade42 --- /dev/null +++ b/src/docs/univariate_protocol.md @@ -0,0 +1,64 @@ +##### Setup + +$\tau \in \Z_p$, a prime field element, and $G \in \GG_1$ is a generator for affine group $\GG_1$. \ +$H$ is a generator for the affine group $\GG_2$. $\GG_1$ and $\GG_2$ are pairing-friendly. \ +The commitment key is defined as $ck = \(G, \tau G, \tau^2 G, ..., \tau^{n-1} G \)$. \ +The verification key is defined as $vk = \(G, H, \tau H \)$. \ +The polynomial $f$ is a vector of coefficients $f(x) = \Sigma_{i=0}^{n-1} f_i \cdot x_i$. \ + +##### Commitment +The commitment step hides the choice of polynomial using the commitment key $ck$. \ +The commitment step returns $C = \Sigma_i f_i \cdot ck_i$. + +##### Evaluation + +The evaluation step is given the committer key, polynomial, and an evaluation point $\alpha \in \Z_p$. \ +It returns the evaluation (output of the polynomial evaluated at $\alpha$) and the proof, a set of quotients. \ +Proof quotients $q_i$ are found by dividing $\(x - \alpha\)$ into $f(x)$, such that $f(x) = \Sigma_i q(x) \cdot \(x_i - \alpha\) + r$. \ +Thus, the evaluation $f( \alpha ) = r \in \FF_p$. \ +The proof is $\Sigma_i q_i \cdot ck_i$. + +##### Verification + +Verification verifies that the group pairing equation $\epsilon(C - f(\alpha)G, H) = \epsilon(Q, \tau H - \alpha H)$ is true. \ +The pairing $\epsilon$ is defined by the user's choice of groups $\GG_1, \GG_2$ and their implied pairing scheme. + +# Examples +When creating a new SRS, one must specify a degree bound `max_degree` +for the commitment polynomials, and a degree bound `max_evals` for +the maximum number of opening points. +From the SRS, it is possible to derive the verification key +[`VerifierKey`](self::kzg::VerifierKey). +``` +use ark_gemini::kzg::CommitterKey; +use ark_bls12_381::{Fr, Bls12_381}; +let rng = &mut ark_std::test_rng(); +let max_degree = 100; +let max_evals = 10; +let ck = CommitterKey::::new(max_degree, max_evals, rng); +# // XXX. if you change the following lines, +# // please note that documentation below might break. +# let f = vec![Fr::from(1u64), Fr::from(2u64), Fr::from(4u64), Fr::from(8u64)]; +# let commitment = ck.commit(&f); +# let alpha = Fr::from(42u64); +# let (evaluation, proof) = ck.open(&f, &alpha); +# use ark_gemini::kzg::VerifierKey; +# let vk = VerifierKey::from(&ck); +# assert!(vk.verify(&commitment, &alpha, &evaluation, &proof).is_ok()) +```` +Then to commit to a polynomial `f`: +```ignore +let f = vec![Fr::from(1u64), Fr::from(2u64), Fr::from(4u64), Fr::from(8u64)]; +let commitment = ck.commit(&f); +``` +To prove the evaluation of `f` in a point `alpha`: +```ignore +let alpha = Fr::from(42u64); +let (evaluation, proof) = ck.open(&f, &alpha); +``` +To veify that an opening is correct: +```ignore +use gemini::kzg::VerifierKey; +let vk = VerifierKey::from(&ck); +assert!(vk.verify(&commitment, &alpha, &evaluation, &proof).is_ok()) +``` \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index 7e69faf..5316a82 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,52 +1,4 @@ -//! Gemini: elastic arguments for R1CS. -//! -//! This library provides essentually two arguments: -//! - [`snark::Proof`], for non-preprocessing SNARKs. -//! It provides a non-interactive succinct argument of knowledge for R1CS -//! without indexer, and where the verifier complexity is linear in the circuit size. -//! - [`psnark::Proof`] for preprocessing SNARKs. -//! It provides a non-interactive succinct argument of knowledge for R1CS -//! where the verifier complexity is logarithmic in the circuit size. -//! -//! Choice of the pairing-friendly elliptic curve, -//! is entirely up to the implementor. -//! All arguments are internally using the [`kzg`](crate::kzg) commitment scheme. -//! Support for generic univariate or multivariate commitments will is scheduled and will -//! happen at some point in the future. -//! -//! Both arguments rely on some sub-protocols, implemented as separate modules in [`subprotocols`] -//! and free of use for other protocols. -//! -//! # Building -//! -//! This package can be compiled with `cargo build`, and requires rust nightly at least -//! until [`Iterator::advance_by`] hits stable. There's a bunch of feature flags that can be turned -//! on: -//! -//! - `asm`, to turn on the assembly backend within [`ark-ff`](https://docs.rs/ark-ff/); -//! - `parallel`, to turn on multi-threading. This requires the additional dependency [`rayon`](https://docs.rs/rayon/latest/rayon/); -//! - `std`, to rely on the Rust Standard library; -//! - `print-trace`, to print additional information concerning the execution time of the sub-protocols. **This feature must be enabled if you want to print the execution time of the examples.** -//! -//! # Benchmarking -//! -//! Micro-benchmarks aare available and can be fired with: -//! -//! ```bash -//! cargo bench -//! ``` -//! -//! Execution of (preprocessing-)SNARK for arbitrary instance sizes can be done running -//! the examples with: -//! -//! ```bash -//! cargo run --example snark -- -i -//! ``` -//! -//! # License -//! -//! This package is licensed under MIT license. -//! +#![doc = include_str!("docs/lib_docs.md")] #![feature(iter_advance_by)] #![no_std] @@ -74,7 +26,27 @@ const SPACE_TIME_THRESHOLD: usize = 22; pub mod errors; pub mod iterable; + + +/// \ +/// +/// # Multi KZG Protocol Math Overview +/// +/// ### Univariate Polynomial Commitments +/// +#[doc = include_str!("docs/univariate_protocol.md")] pub mod kzg; + +/// \ +/// +/// # Multi KZG Protocol Math Overview +/// +/// ### Multivariate Polynomial Commitments +/// +#[doc = include_str!("docs/multivariate_protocol.md")] +pub mod multikzg; + + pub mod psnark; pub mod snark; pub mod subprotocols; @@ -83,5 +55,6 @@ pub mod subprotocols; #[doc(hidden)] pub mod circuit; -pub mod misc; +mod misc; + mod transcript; diff --git a/src/misc.rs b/src/misc.rs index 48febd2..b078f8e 100644 --- a/src/misc.rs +++ b/src/misc.rs @@ -1,10 +1,13 @@ use ark_ff::Field; use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; +use ark_poly::multivariate::SparsePolynomial; +use ark_poly::multivariate::Term; use ark_std::borrow::Borrow; use ark_std::vec::Vec; use crate::circuit::Matrix; -use ark_std::collections::{BTreeMap, BTreeSet}; +use ark_std::collections::{BTreeMap, BTreeSet, HashSet}; +use ark_std::rand::RngCore; pub(crate) const TENSOR_EXPANSION_LOG: usize = 16; pub(crate) const TENSOR_EXPANSION: usize = (1 << TENSOR_EXPANSION_LOG) - 1; @@ -47,6 +50,164 @@ where .unwrap_or_else(|| Vec::new()) } +/// Given as input `elements`, an array of field elements +/// \\(\rho_0, \dots, \rho_{n-1}\\) +/// compute the tensor product +/// \\( \otimes_j (1, \rho_j )\\) +pub fn tensor(elements: &[F]) -> Vec { + if elements.is_empty() { + Vec::new() + } else { + let mut tensor = vec![F::one(); 1 << elements.len()]; + let mut elements_iterator = elements.iter().enumerate(); + + tensor[1] = *elements_iterator + .next() + .expect("Expecting at least one element in the tensor product.") + .1; + // guaranteed to have at least one element. + + for (i, element) in elements_iterator { + for j in 0..1 << i { + tensor[(1 << i) + j] = tensor[j] * element; + } + } + tensor + } +} + +pub(crate) type PartialTensor = Vec>; + +/// Partially expand the tensor product +/// \\(\otimes (1, \rho_j)\\) +/// XXX TODO: This function is pub(crate) as in a previous version of this library, +/// Iterable: Copy and hence couldn't store vectors itself. +/// This is not anymore the case thus it can be moved inside init. +pub fn expand_tensor(elements: &[F]) -> PartialTensor { + // expected_len = ceil(tensor_len / N) + let expected_len = ceil_div(elements.len(), TENSOR_EXPANSION_LOG); + let mut expanded_tensor = Vec::with_capacity(expected_len); + + for i in 0..expected_len { + let mut got = if (i + 1) * TENSOR_EXPANSION_LOG <= elements.len() { + tensor(&elements[i * TENSOR_EXPANSION_LOG..(i + 1) * TENSOR_EXPANSION_LOG]) + } else { + tensor(&elements[i * TENSOR_EXPANSION_LOG..]) + }; + // remove the first element (1) that is the tensor with no element. + got.remove(0); + expanded_tensor.push(got); + } + + expanded_tensor +} + +/// Generate a random vector of field elements such that they're all different +pub(crate) fn random_unique_vector(size: usize, rng: &mut impl RngCore) -> Vec +where + F: Field, +{ + let mut result = HashSet::new(); + while result.len() < size { + result.insert(F::rand(rng)); + } + result.into_iter().collect::>() +} + +pub(crate) fn random_vector(size: usize, rng: &mut impl RngCore) -> Vec +where + F: Field, +{ + (0..size).map(|_| F::rand(rng)).collect::>() +} + +/// Evaluation of a multilinear polynomial at point `x`. +/// Consider the i'th coefficient of n-dimensional polynomial, and i's binary representation +/// $i_0, i_1 \cdots i_n$. Multiply the i'th coefficient by $\prod_{j=1}^{n}i_jx_j$ +/// to get a multilinear polynomial. +/// Optimized for speed, not space. +/// +#[cfg(test)] +pub(crate) fn evaluate_multi_poly(polynomial: &[F], x: &[F]) -> F +where + F: Field, +{ + // check that lengths line up + assert_eq!(polynomial.len(), 1 << x.len()); + let polynomial_iterator = polynomial.iter().enumerate(); + let mut result = F::zero(); + let tensor_x = tensor(x); + for (i, polynomial_i) in polynomial_iterator { + result += tensor_x[i] * polynomial_i; + } + result +} + +#[test] +fn test_evaluate_multi_poly() { + use crate::ark_std::One; + use ark_bls12_381::Fr; + let coefs_ones = [Fr::one(); 16]; + let point_ones = [Fr::one(); 4]; + let mut result = evaluate_multi_poly(&coefs_ones, &point_ones); + assert_eq!(result, Fr::from(16)); + println!("result: {}", evaluate_multi_poly(&coefs_ones, &point_ones)); + + // This evaluation should be 112. I chose these + let coefs = [0_u64, 6, 9, 2, 4, 8, 5, 1]; + let point = [4_u64, 2, 1]; + // How can I turn this into a function + let coefs_fr = coefs.into_iter().map(|x| Fr::from(x)).collect::>(); + let point_fr = point.into_iter().map(|x| Fr::from(x)).collect::>(); + + let expected_evaluation = coefs_fr[0] + + coefs_fr[1] * point_fr[0] + + coefs_fr[2] * point_fr[1] + + coefs_fr[3] * point_fr[0] * point_fr[1] + + coefs_fr[4] * point_fr[2] + + coefs_fr[5] * point_fr[2] * point_fr[0] + + coefs_fr[6] * point_fr[2] * point_fr[1] + + coefs_fr[7] * point_fr[2] * point_fr[1] * point_fr[0]; + + result = evaluate_multi_poly(&coefs_fr, &point_fr); + assert_eq!(result, expected_evaluation); +} + +/// divide polynomials +/// for evaluation point \\(alpha_1, alpha_2, ... alpha_n\\) +/// get some polynomials \\(q_1, ... q_n\\) such that polynomial = \\(\sum_i{q_i(x)(x_i - \alpha_i)} + f(\alpha)\\) +/// polynomial is ordered f0, f1, f2, .... f_N +/// Take the binary expansion of i: b0, b1, ... bn. If b_j is 1, then multiply f_i by x_j. +/// +pub fn multi_poly_decompose(polynomial: &[F], eval_point: &[F]) -> (Vec>, F) { + let mut results = Vec::new(); + for _i in 0..eval_point.len() { + results.push(vec![F::zero(); polynomial.len()]); + } + let mut evaluation = F::zero(); + for (ind, item) in polynomial.iter().enumerate() { + // if the i'th bit of j is 1, that means it shouldn't be in the quotient + let mut term = *item; + let mut ind_shifted = ind; + let _counter = 0; + for dim in 0..eval_point.len() { + if ind_shifted == 0 { + break; + } + if ind_shifted & 1 == 1 { + let dim_bit_off = (ind_shifted >> 1) << (1 + dim); + results[dim][dim_bit_off] += term; + term *= eval_point[dim]; + } + ind_shifted >>= 1; + } + evaluation += term; + } + + // assert_eq!(evaluate_multi_poly(&remainder, evaluation_point), remainder[0]); + (results, evaluation) +} + /// Helper function for folding single polynomial. #[inline] pub(crate) fn fold_polynomial(f: &[F], r: F) -> Vec { @@ -126,53 +287,7 @@ pub fn product_vector_matrix(z: &[F], matrix: &[Vec<(F, usize)>]) -> V res } -/// Given as input `elements`, an array of field elements -/// \\(\rho_0, \dots, \rho_{n-1}\\) -/// compute the tensor product -/// \\( \otimes_j (1, \rho_j )\\) -pub fn tensor(elements: &[F]) -> Vec { - assert!(!elements.is_empty()); - let mut tensor = vec![F::one(); 1 << elements.len()]; - let mut elements_iterator = elements.iter().enumerate(); - - tensor[1] = *elements_iterator - .next() - .expect("Expecting at lest one element in the tensor product.") - .1; - // guaranteed to have at least one element. - for (i, element) in elements_iterator { - for j in 0..1 << i { - tensor[(1 << i) + j] = tensor[j] * element; - } - } - tensor -} - -pub(crate) type PartialTensor = Vec>; - -/// Partially expand the tensor product -/// \\(\otimes (1, \rho_j)\\) -/// XXX TODO: This function is pub(crate) as in a previous version of this library, -/// Iterable: Copy and hence couldn't store vectors itself. -/// This is not anymore the case thus it can be moved inside init. -pub fn expand_tensor(elements: &[F]) -> PartialTensor { - // expected_len = ceil(tensor_len / N) - let expected_len = ceil_div(elements.len(), TENSOR_EXPANSION_LOG); - let mut expanded_tensor = Vec::with_capacity(expected_len); - - for i in 0..expected_len { - let mut got = if (i + 1) * TENSOR_EXPANSION_LOG <= elements.len() { - tensor(&elements[i * TENSOR_EXPANSION_LOG..(i + 1) * TENSOR_EXPANSION_LOG]) - } else { - tensor(&elements[i * TENSOR_EXPANSION_LOG..]) - }; - // remove the first element (1) that is the tensor with no element. - got.remove(0); - expanded_tensor.push(got); - } - expanded_tensor -} /// Polynomial evaluation, assuming that the /// coeffients are in big-endian. @@ -412,3 +527,38 @@ fn test_evaluate_index_poly() { let expected = evaluate_le(&index_polynomial, &x); assert_eq!(got, expected); } + +#[test] +fn test_multi_poly_decompose() { + use crate::ark_std::Zero; + use ark_bls12_381::Fr; + let polynomial_small = vec![Fr::from(2_i32), Fr::from(3_i32)]; + let eval_point_small: Vec = vec![Fr::from(6_i32)]; + let (quotients_small, remainder_small) = + multi_poly_decompose(&polynomial_small, &eval_point_small); + println!("testing small values"); + assert_eq!(quotients_small[0][0], Fr::from(3_i32)); + assert_eq!(remainder_small, Fr::from(20_i32)); + println!("testing random values"); + let reps = 3; + let dim = 12; + let rng = &mut ark_std::test_rng(); + let polynomial: Vec = random_vector(1 << dim, rng); + let eval_point: Vec = random_vector(dim, rng); + + let (quotients, remainder) = multi_poly_decompose(&polynomial, &eval_point); + for _ in 0..reps { + let test_point: Vec = random_unique_vector(dim, rng); + let mut sum = Fr::zero(); + for (idx, quotient) in quotients.iter().enumerate() { + let eval_quotient = evaluate_multi_poly(quotient, &test_point); + + // sum (q_i(x) * (x_i - alpha_i)) + remainder = f(x) + sum += eval_quotient * (test_point[idx] - eval_point[idx]); + } + assert_eq!( + sum + remainder, + evaluate_multi_poly(&polynomial, &test_point) + ); + } +} diff --git a/src/multikzg/division_stream.rs b/src/multikzg/division_stream.rs new file mode 100644 index 0000000..ec68920 --- /dev/null +++ b/src/multikzg/division_stream.rs @@ -0,0 +1,205 @@ +use ark_ff::Field; +use ark_std::borrow::Borrow; +use ark_std::vec::Vec; + +use crate::iterable::Iterable; + +/// A `Streamer` that repeatedly divides an n dimensional multilinear polynomial with binomial terms +/// of the form \\((x_i - \alpha_i)\\), for some n dimensional \\(\alpha\\). +/// Produces a stream that describes \\(q_i\\) where \\(\sum_i{q_i(x)(x_i - \alpha_i)} + f(\alpha)\\) +/// Outputs pairs of the form \\((i, x)\\), where \\(i\\) is which quotient is being referred to, +/// and \\(x\\) is the next nonzero coefficient in that quotient. Coefficients are outputted in order. +/// +/// There is a special case at the end, where \\(i\\) is equal to the dimension of the polynomial. +/// Then, the corresponding \\(x\\) is the evaluation of the polynomial at \\(\alpha\\). +/// +/// The stream can produce all quotient coefficients in the tree with a single pass over the initial stream. +#[derive(Clone, Copy)] +pub struct MultiPolynomialTree<'a, F, S> { + eval_point: &'a [F], + coefficients: &'a S, +} + +impl<'a, F, S> MultiPolynomialTree<'a, F, S> +where + S: Iterable, + F: Field, + S::Item: Borrow, +{ + /// Initialize a new polynomial tree. + pub fn new(coefficients: &'a S, eval_point: &'a [F]) -> Self { + Self { + coefficients, + eval_point, + } + } + + /// Outputs the depth of the polynomial tree. + #[inline] + pub fn depth(&self) -> usize { + self.eval_point.len() + } +} + +impl<'a, F, S> Iterable for MultiPolynomialTree<'a, F, S> +where + S: Iterable, + F: Field, + S::Item: Borrow, +{ + type Item = (usize, F); + + type Iter = MultiPolynomialTreeIter<'a, F, S::Iter>; + + fn iter(&self) -> Self::Iter { + MultiPolynomialTreeIter::new( + self.coefficients.iter(), + self.coefficients.len(), + self.eval_point, + ) + } + + fn len(&self) -> usize { + self.coefficients.len() + } +} + +/// Iterator of the polynomial division tree. +pub struct MultiPolynomialTreeIter<'a, F, I> { + eval_point: &'a [F], + iterator: I, + stack: Vec<(usize, F)>, + parities: Vec, +} + +fn init_stack(n: usize, dim: usize) -> Vec<(usize, F)> { + let mut stack = Vec::with_capacity(dim); + + // generally we expect the size to be a power of two. + // If not, we are going to fill the stack as if the array was padded to zero up to the expected size. + let chunk_size = 1 << dim; + if n % chunk_size != 0 { + let mut delta = chunk_size - n % chunk_size; + for i in (0..dim).rev() { + if delta >= 1 << i { + stack.push((i, F::zero())); + delta -= 1 << i + } + } + } + stack +} + +impl<'a, F, I> MultiPolynomialTreeIter<'a, F, I> +where + F: Field, + I: Iterator, + I::Item: Borrow, +{ + fn new(iterator: I, n: usize, eval_point: &'a [F]) -> Self { + let stack = init_stack(n, eval_point.len()); + let parities = vec![false; eval_point.len()]; + + Self { + eval_point, + iterator, + stack, + parities, + } + } +} + +/// Each time we call next, a tuple (i, x) means that the next nonzero coefficient in the +/// i'th quotient is x. Note that the 0'th quotient has nonzero coeffients at the 0, 2, 4, ... indices, +/// the 1'th quotient has nonzero coefficients at the 0, 4, 8, ... indices, and so on. +/// +impl<'a, F, I> Iterator for MultiPolynomialTreeIter<'a, F, I> +where + F: Field, + I: Iterator, + I::Item: Borrow, +{ + type Item = (usize, F); + + fn next(&mut self) -> Option<::Item> { + let len = self.stack.len(); + let stack_item = if len > 1 && self.stack[len - 1].0 == self.stack[len - 2].0 { + // pop the last two elements from the stack. + // we could also use .pop() twice but truncate is slightly faster. + let (_level, lhs) = self.stack[len - 1]; + let (level, rhs) = self.stack[len - 2]; + self.stack.truncate(len - 2); + + let folded_coefficient = lhs * self.eval_point[level] + rhs; + (level + 1, folded_coefficient) + } else { + (0, *self.iterator.next()?.borrow()) + }; + + // do not add to the stack the coefficient of the max-depth folded polynomial. + // instead, just return it as is. + if stack_item.0 != self.eval_point.len() { + self.stack.push(stack_item) + } else { + return Some(stack_item); + } + // for each quotient, only yield every other coefficient. + self.parities[stack_item.0] = !self.parities[stack_item.0]; + if self.parities[stack_item.0] { + self.next() + } else { + Some(stack_item) + } + } +} + +/// For a n-dimensional point x, multiply the components of x that correspond to the bits of idx. +/// +#[cfg(test)] +fn mul_components(x: &[F], idx: usize) -> F +where + F: Field, +{ + let mut result = F::one(); + for (i, x_i) in x.iter().enumerate() { + if (idx >> i) & 1 == 1 { + result = result.mul(x_i); + } + } + result +} + +#[test] +fn test_polynomial_divide_randomized() { + use crate::misc::{evaluate_multi_poly, random_vector}; + use ark_bls12_381::Fr as F; + use ark_ff::Zero; + + let dim = 4; + let rng = &mut ark_std::test_rng(); + let coefficients: Vec = random_vector(1 << dim, rng); + let alpha: Vec = random_vector(dim, rng); // the evaluation point + let test_point: Vec = random_vector(dim, rng); + let coefficients_stream = coefficients.as_slice(); + let foldstream = MultiPolynomialTree::new(&coefficients_stream, alpha.as_slice()); + let mut result = F::zero(); + let alpha_eval = evaluate_multi_poly(&coefficients, &alpha); + let mut quotient_evals: Vec = vec![F::zero(); dim]; + let mut quotient_idxs = vec![0; dim]; + for (quotient_num, quotient_coefficient) in foldstream.iter() { + if quotient_num != dim { + quotient_evals[quotient_num] += + quotient_coefficient * mul_components(&test_point, quotient_idxs[quotient_num]); + quotient_idxs[quotient_num] += 1 << (quotient_num + 1); + } else { + assert_eq!(quotient_coefficient, alpha_eval); + } + } + for i in 0..dim { + result += quotient_evals[i] * (test_point[i] - alpha[i]) + } + assert_eq!( + result, + evaluate_multi_poly(&coefficients, &test_point) - alpha_eval + ); +} diff --git a/src/multikzg/mod.rs b/src/multikzg/mod.rs new file mode 100644 index 0000000..a9c9984 --- /dev/null +++ b/src/multikzg/mod.rs @@ -0,0 +1,118 @@ +mod space; +mod time; +mod division_stream; + +use crate::errors::{VerificationError, VerificationResult}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::vec::Vec; +pub use space::CommitterKeyMultiStream; +pub use time::CommitterKeyMulti; + +use ark_ff::{One, PrimeField}; +use ark_std::ops::Mul; + +use ark_ec::{ + pairing::Pairing, scalar_mul::variable_base::VariableBaseMSM, AffineRepr, + CurveGroup, +}; + +/// A MultiKZG polynomial commitment over a bilinear group, represented as a single $\GG_1$ element. +/// +/// The `Commitment +/// +/// Given $\Tau \in \Z_p$, a prime field element, and $G \in \GG_1$ is a generator for affine group $\GG_1$. \ +/// Given $H$ is a generator for the affine group $\GG_2$. $\GG_1$ and $\GG_2$ are pairing-friendly. \ +/// The commitment key is defined as $ck = \(G, \tau G, \tau^2 G, ..., \tau^{n-1} G \)$. \ +/// The commitment step returns $C = \Sigma_i f_i \cdot ck_i$ in $\GG_1$. \ +/// +/// Where `polynomial_flat` points to a vector of type `Vec`, `ck.commit(&polynomial_flat)` returns the commitment to $f(x)$. +/// +/// This commitment scheme is homomorphic on elements of $\GG_1$. +#[derive(Debug, Copy, Clone, PartialEq, Eq, CanonicalDeserialize, CanonicalSerialize)] +pub struct Commitment(pub(crate) E::G1Affine); + +#[inline] +fn msm(bases: &[E::G1Affine], scalars: &[E::ScalarField]) -> E::G1Affine { + let scalars = scalars.iter().map(|x| x.into_bigint()).collect::>(); + let sp: E::G1 = VariableBaseMSM::msm_bigint(bases, &scalars); + sp.into_affine() +} + +/// A polynomial evaluation proof, represented as a `dim` dimensional vector of $\GG_1$ elements. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct EvaluationProofMulti(pub(crate) Vec); + +/// The verification key type for the polynomial commitment scheme. +/// +/// `VerifierKeyMulti` implements the verification function `verify` for the evaluation proof. +/// +#[derive(Debug, PartialEq, Eq, CanonicalDeserialize, CanonicalSerialize)] +pub struct VerifierKeyMulti { + /// The generator of \\(\GG_1\\) + g: E::G1Affine, + /// The generator of \\(\GG_2\\). + g2: E::G2Affine, + /// generator of G2 multiplied by the trapdoor vector. + powers_of_g2: Vec, +} + +impl VerifierKeyMulti { + /// The verification procedure for the EvaluationProof with a single polynomial evaluated at a single evaluation point. + /// The polynomials are evaluated at the point ``alpha`` and is committed as ``commitment``. + /// The evaluation remainder (`evaluation`) and the respective polynomial quotients (the ``proof``), + /// can be obtained either in a space-efficient or a time-efficient manner. + /// To use the space-efficient implementation, see `CommitterKeyMultiStream` and respective functions. + /// To use the time-efficient implementation, see `CommitterKeyMulti` and respective functions. + pub fn verify( + &self, + commitment: &Commitment, + alpha: &[E::ScalarField], + evaluation: &E::ScalarField, + proof: &EvaluationProofMulti, + ) -> VerificationResult { + let lhs = E::pairing( + (commitment.0.into_group() - self.g.mul(evaluation)).into_affine(), + self.g2, + ) + .0; + // turn this into a fold, so it doesnt have to be mutable? + let mut rhs = E::TargetField::one(); + + assert_eq!(self.powers_of_g2.len(), alpha.len()); + assert_eq!(self.powers_of_g2.len(), proof.0.len()); + for (i, alpha_i) in alpha.iter().enumerate() { + rhs *= E::pairing( + proof.0[i], + (self.powers_of_g2[i].into_group() - self.g2.mul(alpha_i)).into_affine(), + ) + .0; + } + if lhs == rhs { + Ok(()) + } else { + Err(VerificationError) + } + } + + pub fn batch_verify( + &self, + commitments: &[Commitment], + alpha: &[E::ScalarField], + evaluations: &[E::ScalarField], + proof: &EvaluationProofMulti, + batch_chal: &E::ScalarField, + ) -> VerificationResult { + use crate::misc::powers; + + let pows = powers(*batch_chal, commitments.len()); + let commitment = commitments + .iter() + .zip(&pows) + .map(|(&p, &ch)| p.0 * ch) + .reduce(|x, y| x + y) + .map(|x| Commitment::(x.into_affine())) + .unwrap_or_else(|| Commitment::(E::G1Affine::zero())); + let evaluation = evaluations.iter().zip(pows).map(|(&p, e)| p * e).sum(); + self.verify(&commitment, alpha, &evaluation, proof) + } +} diff --git a/src/multikzg/space.rs b/src/multikzg/space.rs new file mode 100644 index 0000000..6277693 --- /dev/null +++ b/src/multikzg/space.rs @@ -0,0 +1,310 @@ +//! An impementation of a space-efficient version of Michele's multilinear extension +//! of Kate et al's polynomial commitment, +//! with optimization from [\[BDFG20\]](https://eprint.iacr.org/2020/081.pdf). +//! + +use ark_ec::scalar_mul::variable_base::ChunkedPippenger; +use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup}; +use ark_ff::PrimeField; +use ark_std::vec::Vec; +use ark_std::{borrow::Borrow, ops::Mul}; + +use super::EvaluationProofMulti; +use crate::iterable::Iterable; +use crate::multikzg::time::CommitterKeyMulti; +use crate::multikzg::{Commitment, VerifierKeyMulti}; +use ark_serialize::{ + CanonicalDeserialize, CanonicalSerialize, Compress, Read, SerializationError, Valid, Validate, + Write, +}; +// use super::VerificationResult; +// use crate::ark_std::Zero; + +pub struct CommitterKeyMultiStream +where + SG: Iterable, + SG::Item: Borrow, +{ + pub(crate) powers_of_g: SG, + pub(crate) g2: E::G2Affine, + pub(crate) powers_of_g2: Vec, +} + +impl Valid for CommitterKeyMultiStream +where + E: Pairing, + SG: Iterable + CanonicalDeserialize + CanonicalSerialize, +{ + fn check(&self) -> Result<(), SerializationError> { + let powers_of_g_check = self.powers_of_g.check(); + let g2_check = self.g2.check(); + let powers_of_g2_check = self.powers_of_g2.check(); + + if powers_of_g_check.is_err() { + powers_of_g_check + } else if g2_check.is_err() { + g2_check + } else if powers_of_g2_check.is_err() { + powers_of_g2_check + } else { + Ok(()) + } + } +} + +impl CanonicalSerialize for CommitterKeyMultiStream +where + E: Pairing, + SG: Iterable + CanonicalDeserialize + CanonicalSerialize, +{ + fn serialize_with_mode( + &self, + mut writer: W, + compress: Compress, + ) -> Result<(), SerializationError> { + self.powers_of_g + .serialize_with_mode(&mut writer, compress)?; + self.g2.serialize_with_mode(&mut writer, compress)?; + self.powers_of_g2.serialize_with_mode(&mut writer, compress) + } + + fn serialized_size(&self, compress: Compress) -> usize { + self.powers_of_g.serialized_size(compress) + + self.g2.serialized_size(compress) + + self.powers_of_g2.serialized_size(compress) + } +} + +impl CanonicalDeserialize for CommitterKeyMultiStream +where + E: Pairing, + SG: Iterable + CanonicalDeserialize + CanonicalSerialize + Valid, +{ + fn deserialize_with_mode( + mut reader: R, + compress: Compress, + validate: Validate, + ) -> Result { + let powers_of_g = SG::deserialize_with_mode(&mut reader, compress, validate)?; + let g2 = E::G2Affine::deserialize_with_mode(&mut reader, compress, validate)?; + let powers_of_g2 = + Vec::::deserialize_with_mode(&mut reader, compress, validate)?; + + Ok(Self { + powers_of_g, + g2, + powers_of_g2, + }) + } +} + +/// This struct naively and inefficiently implements both Iterable and CanonicalSerialize/Deserialize +/// +#[derive(CanonicalSerialize, CanonicalDeserialize)] +struct TestStreamer(Vec); + +impl Iterable for TestStreamer { + type Item = E::G1Affine; + type Iter = as IntoIterator>::IntoIter; + + fn iter(&self) -> Self::Iter { + self.0.clone().into_iter() + } + + fn len(&self) -> usize { + self.0.len() + } +} + +impl<'a, E: Pairing> From<&'a CommitterKeyMulti> + for CommitterKeyMultiStream, E> +{ + fn from(ck: &'a CommitterKeyMulti) -> Self { + let powers_of_g = TestStreamer(ck.powers_of_g.clone()); + let g2 = ck.g2; + let powers_of_g2 = ck.powers_of_g2.clone(); + + CommitterKeyMultiStream { + powers_of_g, + g2, + powers_of_g2, + } + } +} + +impl From, E>> for CommitterKeyMulti { + fn from(ck_stream: CommitterKeyMultiStream, E>) -> Self { + let powers_of_g = ck_stream.powers_of_g.0; + let g2 = ck_stream.g2; + let powers_of_g2 = ck_stream.powers_of_g2; + + CommitterKeyMulti { + powers_of_g, + g2, + powers_of_g2, + } + } +} + +impl From<&CommitterKeyMultiStream, E>> for VerifierKeyMulti { + fn from(ck: &CommitterKeyMultiStream, E>) -> VerifierKeyMulti { + let powers_of_g2 = ck.powers_of_g2.to_vec(); + let g = ck.powers_of_g.iter().next().unwrap(); + let g2 = ck.g2; + + VerifierKeyMulti { + g, + g2, + powers_of_g2, + } + } +} + +impl CommitterKeyMultiStream +where + E: Pairing, + SG: Iterable, + SG::Item: Borrow, +{ + /// Given a polynomial `polynomial` of degree less than `max_degree`, return a commitment to `polynomial`. + pub fn commit(&self, polynomial: &SF) -> Commitment + where + SF: Iterable, + SF::Item: Borrow, + { + Commitment(stream_pippenger::<_, _, E>(&self.powers_of_g, polynomial)) + } + + // / Given a polynomial `polynomial` and an evaluation point `evaluation_point`, + // / return the evaluation of `polynomial in `evaluation_point`, + // / together with an evaluation proof + pub fn open( + &self, + polynomial: &SF, + evaluation_point: &[E::ScalarField], + ) -> (E::ScalarField, EvaluationProofMulti) + where + SF: Iterable, + SF::Item: Borrow, + { + use crate::multikzg::division_stream::MultiPolynomialTree; + use ark_ff::Zero; + + let dim = evaluation_point.len(); + let mut power_of_g_iters = Vec::new(); + for i in 0..dim { + power_of_g_iters.push(self.powers_of_g.iter().step_by(1 << (i + 1))); + } + let mut result = vec![E::G1Affine::zero(); dim]; + let tree = MultiPolynomialTree::new(polynomial, evaluation_point); + for (i, coef) in tree.iter() { + if i == dim { + return (coef, EvaluationProofMulti(result)); + } + let next_power_of_g: E::G1Affine = *power_of_g_iters[i].next().unwrap().borrow(); + + let addend = next_power_of_g.mul(&coef).into_affine(); + result[i] = (result[i] + addend).into_affine(); + } + (E::ScalarField::zero(), EvaluationProofMulti(result)) + } + + /// Evaluate a single polynomial at a set of points `eval_points`, and provide a single evaluation proof. + pub fn open_multi_points( + &self, + _polynomial: &[E::ScalarField], + _eval_points: &[E::ScalarField], + ) -> EvaluationProofMulti { + todo!(); + } +} + +fn stream_pippenger(bases: &SG, scalars: &SF) -> E::G1Affine +where + SF: Iterable, + SF::Item: Borrow, + SG: Iterable, + SG::Item: Borrow, +{ + let mut cp: ChunkedPippenger = ChunkedPippenger::new(1 << 15); + let zipped = bases.iter().zip(scalars.iter()); + zipped.for_each(|(b, s)| cp.add(b, s.borrow().into_bigint())); + cp.finalize().into_affine() +} + +#[test] +fn test_streaming_commit() { + use crate::misc::random_unique_vector; + use ark_bls12_381::Bls12_381; + + let dim = 15; + let rng = &mut ark_std::test_rng(); + let ck = CommitterKeyMulti::::new(dim, rng); + let ck_stream = CommitterKeyMultiStream::from(&ck); + + let poly = random_unique_vector(dim, rng); + assert_eq!(ck.commit(&poly), ck_stream.commit(&poly.as_slice())); +} + +#[test] +fn test_streaming_open() { + use crate::misc::random_vector; + use ark_bls12_381::Bls12_381; + + let dim = 15; + let rng = &mut ark_std::test_rng(); + let ck = CommitterKeyMulti::::new(dim, rng); + let ck_stream = CommitterKeyMultiStream::from(&ck); + + let poly = random_vector(1 << dim, rng); + let evaluation_point = random_vector(dim, rng); + assert_eq!( + ck.open(&poly, &evaluation_point), + ck_stream.open(&poly.as_slice(), &evaluation_point.as_slice()) + ); +} + +#[test] +fn test_end_to_end() { + use crate::misc::evaluate_multi_poly; + use crate::misc::random_vector; + use crate::multikzg::VerifierKeyMulti; + use ark_bls12_381::Bls12_381; + use ark_bls12_381::Fr; + + let dim = 7; + let rng = &mut ark_std::test_rng(); + let ck = CommitterKeyMulti::::new(dim, rng); + let ck_streaming = CommitterKeyMultiStream::from(&ck); + let vk = VerifierKeyMulti::from(&ck_streaming); + + let polynomial: Vec = random_vector(1 << dim, rng); + let polynomial_stream = polynomial.as_slice(); + + let alpha: Vec = random_vector(dim, rng); + let commitment = ck_streaming.commit(&polynomial_stream); + let (evaluation, proof) = ck_streaming.open(&polynomial_stream, &alpha); + let expected_evaluation = evaluate_multi_poly(&polynomial_stream, &alpha); + assert_eq!(evaluation, expected_evaluation); + assert!(vk.verify(&commitment, &alpha, &evaluation, &proof).is_ok()) +} + +#[test] +fn test_serialize() { + use ark_bls12_381::Bls12_381; + + let dim = 11; + let rng = &mut ark_std::test_rng(); + let ck = CommitterKeyMulti::::new(dim, rng); + let ck_streaming = CommitterKeyMultiStream::from(&ck); + + let mut ck_buf = Vec::new(); + assert!(ck_streaming.serialize_compressed(&mut ck_buf).is_ok()); + let deserialized_ck_streaming = + CommitterKeyMultiStream::, Bls12_381>::deserialize_compressed( + ck_buf.as_slice(), + ) + .unwrap(); + let deserialized_ck = CommitterKeyMulti::from(deserialized_ck_streaming); + assert_eq!(deserialized_ck, ck); +} diff --git a/src/multikzg/time.rs b/src/multikzg/time.rs new file mode 100644 index 0000000..3dd720f --- /dev/null +++ b/src/multikzg/time.rs @@ -0,0 +1,273 @@ +//! An impementation of a time-efficient version of Michele's multilinear extension +//! of Kate et al's polynomial commitment, +//! with optimization from [\[BDFG20\]](https://eprint.iacr.org/2020/081.pdf). +use ark_ec::pairing::Pairing; +use ark_ec::scalar_mul::fixed_base::FixedBase; +use ark_ec::{AffineRepr, CurveGroup as ProjectiveCurve}; +use ark_ff::PrimeField; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::borrow::Borrow; +use ark_std::rand::RngCore; +use ark_std::UniformRand; +use ark_std::{ops::Mul, vec::Vec}; + +use crate::multikzg::{msm, Commitment, EvaluationProofMulti, VerifierKeyMulti}; + +use crate::misc::{linear_combination, multi_poly_decompose, powers, random_unique_vector, tensor}; + +/// A time efficient implementation of Michele's multilinear expansion of KZG polynomial committment +/// +#[derive(PartialEq, Eq, Debug, CanonicalDeserialize, CanonicalSerialize)] +pub struct CommitterKeyMulti { + pub(crate) powers_of_g: Vec, + pub(crate) g2: E::G2Affine, + pub(crate) powers_of_g2: Vec, +} + +// Converts a `CommitterKeyMulti` into a `VerifierKeyMulti` +impl From<&CommitterKeyMulti> for VerifierKeyMulti { + fn from(ck: &CommitterKeyMulti) -> VerifierKeyMulti { + let powers_of_g2 = ck.powers_of_g2.to_vec(); + let g = ck.powers_of_g[0]; + let g2 = ck.g2; + + VerifierKeyMulti { + g, + g2, + powers_of_g2, + } + } +} + +impl CommitterKeyMulti { + /// The setup algorithm for the commitment scheme. + /// + /// Given a dimension of the polynomial `dim` + /// and a cryptographically-secure random number generator `rng`, + /// construct the committer key. + pub fn new(dim: usize, rng: &mut impl RngCore) -> Self { + // Generate n *different* points tau_1, tau_2 ... tau_n. + // Each tau is in Fr + + let tau: Vec = random_unique_vector(dim, rng); + let powers_of_tau = tensor(&tau); + + let g = E::G1::rand(rng); + let window_size = FixedBase::get_mul_window_size(1 << (dim + 1)); + let scalar_bits = E::ScalarField::MODULUS_BIT_SIZE as usize; + let g_table = FixedBase::get_window_table(scalar_bits, window_size, g); + let powers_of_g_proj = FixedBase::msm(scalar_bits, window_size, &g_table, &powers_of_tau); + let powers_of_g = E::G1::normalize_batch(&powers_of_g_proj); + + let g2 = E::G2::rand(rng).into_affine(); + let powers_of_g2 = tau + .iter() + .map(|t| g2.mul(t).into_affine()) + .collect::>(); + + CommitterKeyMulti { + powers_of_g, + g2, + powers_of_g2, + } + } + + /// Return the bound on evaluation points. + #[inline] + pub fn max_eval_points(&self) -> usize { + self.powers_of_g2.len() - 1 + } + + /// Given a polynomial `polynomial` of degree less than `max_degree`, return a commitment to `polynomial`. + pub fn commit(&self, polynomial: &[E::ScalarField]) -> Commitment { + Commitment(msm::(&self.powers_of_g, polynomial)) + } + + /// Obtain a new preprocessed committer key defined by the indices `indices` + /// + pub fn index_by(&self, indices: &[usize]) -> Self { + let mut indexed_powers_of_g = vec![E::G1Affine::zero(); self.powers_of_g.len()]; + indices + .iter() + .zip(self.powers_of_g.iter()) + .for_each(|(&i, &g)| { + indexed_powers_of_g[i] = (indexed_powers_of_g[i] + g).into_affine() + }); + Self { + powers_of_g2: self.powers_of_g2.clone(), + g2: self.g2, + powers_of_g: indexed_powers_of_g, + } + } + + /// Given an iterator over `polynomials`, expressed as vectors of coefficients, return a vector of commitments to all of them. + pub fn batch_commit(&self, polynomials: J) -> Vec> + where + J: IntoIterator, + J::Item: Borrow>, + { + polynomials + .into_iter() + .map(|p| self.commit(p.borrow())) + .collect::>() + } + + /// Given a polynomial `polynomial` and an evaluation point `evaluation_point`, + /// return the evaluation of `polynomial in `evaluation_point`, + /// together with an evaluation proof (the quotient polynomial). + pub fn open( + &self, + polynomial: &[E::ScalarField], + eval_point: &[E::ScalarField], + ) -> (E::ScalarField, EvaluationProofMulti) { + let (quotients, remainder) = multi_poly_decompose(polynomial, eval_point); + let proof = quotients + .into_iter() + .map(|quotient| msm::(&self.powers_of_g, "ient)) + .collect::>(); + (remainder, EvaluationProofMulti(proof)) + } + + /// Evaluate multiple polynomials at a single point `eval_point`, and provide a single evaluation proof. + pub fn batch_open( + &self, + _polynomials: &[&[E::ScalarField]], + _eval_point: &[E::ScalarField], + _eval_chal: E::ScalarField, + ) -> EvaluationProofMulti { + todo!(); + } + + /// Evaluate a single polynomial at a set of points `eval_points`, and provide a single evaluation proof. + pub fn open_multi_points( + &self, + _polynomial: &[E::ScalarField], + _eval_points: &[E::ScalarField], + ) -> EvaluationProofMulti { + todo!(); + } + + /// Scale multiple multilinear polynomials by powers of `eval_chal`, and linearly combine them. + pub fn batched_poly( + &self, + polynomials: &[Vec], + eval_chal: &E::ScalarField, + ) -> Vec { + let pows = powers(*eval_chal, polynomials.len()); + linear_combination(polynomials, &pows) + } + + /// Evaluate a set of multilinear polynomials at a single point `eval_point`, and provide a single batched evaluation proof. + /// `eval_chal` is the random challenge for batching evaluation proofs across different polynomials. + pub fn batch_open_multi_polys( + &self, + polynomials: &[Vec], + eval_point: &[E::ScalarField], + eval_chal: &E::ScalarField, + ) -> (E::ScalarField, EvaluationProofMulti) { + self.open(&self.batched_poly(polynomials, eval_chal), eval_point) + } + + /// Evaluate a set of polynomials at a set of points `eval_points`, and provide a single batched evaluation proof. + /// `eval_chal` is the random challenge for batching evaluation proofs across different polynomials. + pub fn batch_open_multi_points( + &self, + _polynomials: &[&Vec], + _eval_points: &[E::ScalarField], + _eval_chal: &[E::ScalarField], + ) -> EvaluationProofMulti { + todo!(); + } +} + +#[test] +fn test_time_open() { + use crate::misc::random_vector; + use ark_bls12_381::Bls12_381; + + let dim = 15; + let rng = &mut ark_std::test_rng(); + let ck = CommitterKeyMulti::::new(dim, rng); + + let poly = random_vector(1 << dim, rng); + let evaluation_point = random_vector(dim, rng); + assert_eq!( + ck.open(&poly, &evaluation_point), + ck.open(&poly.as_slice(), &evaluation_point.as_slice()) + ); +} + +/// an end to end test for the commitment scheme +/// generates a random polynomial, random evaluation point, and ensures that a correct evaluation proof +/// verifies to ok. +/// +#[test] +fn test_end_to_end() { + use crate::misc::{evaluate_multi_poly, random_vector}; + use ark_bls12_381::Bls12_381; + use ark_bls12_381::Fr; + + let dim = 11; + let rng = &mut ark_std::test_rng(); + let ck = CommitterKeyMulti::::new(dim, rng); + let vk = VerifierKeyMulti::from(&ck); + + let polynomial_flat: Vec = random_vector(1 << dim, rng); + + let alpha: Vec = random_vector(dim, rng); + let commitment = ck.commit(&polynomial_flat); + let (evaluation, proof) = ck.open(&polynomial_flat, &alpha); + let expected_evaluation = evaluate_multi_poly(&polynomial_flat, &alpha); + assert_eq!(evaluation, expected_evaluation); + assert!(vk.verify(&commitment, &alpha, &evaluation, &proof).is_ok()) +} + +#[test] +fn test_serialize() { + use ark_bls12_381::Bls12_381; + + let dim = 11; + let rng = &mut ark_std::test_rng(); + let ck = CommitterKeyMulti::::new(dim, rng); + let vk = VerifierKeyMulti::from(&ck); + + let mut ck_buf = Vec::new(); + assert!(ck.serialize_compressed(&mut ck_buf).is_ok()); + let deserialized_ck = CommitterKeyMulti::deserialize_compressed(ck_buf.as_slice()).unwrap(); + assert_eq!(deserialized_ck, ck); + + let mut vk_buf = Vec::new(); + assert!(vk.serialize_compressed(&mut vk_buf).is_ok()); + let deserialized_vk = VerifierKeyMulti::deserialize_compressed(vk_buf.as_slice()).unwrap(); + assert_eq!(deserialized_vk, vk) +} + +#[test] +fn test_batched_polys() { + use crate::misc::{evaluate_multi_poly, random_vector}; + use ark_bls12_381::Bls12_381; + use ark_bls12_381::Fr; + + let dim = 11; + let rng = &mut ark_std::test_rng(); + let ck = CommitterKeyMulti::::new(dim, rng); + let vk = VerifierKeyMulti::from(&ck); + + let num_polys = 5; + let polynomials: Vec> = (0..num_polys) + .map(|_| random_vector(1 << dim, rng)) + .collect::>(); + let chal = Fr::rand(rng); + let alpha: Vec = random_vector(dim, rng); + + let batched_proof = ck.batch_open_multi_polys(&polynomials, &alpha, &chal); + + let commitments = ck.batch_commit(&polynomials); + let evaluations = polynomials + .iter() + .map(|p| evaluate_multi_poly(p, &alpha)) + .collect::>(); + assert!(vk + .batch_verify(&commitments, &alpha, &evaluations, &batched_proof.1, &chal) + .is_ok()) +}