From 2858a224c878de47cfd40e66ce4c8963a44b5463 Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:50:41 -0500 Subject: [PATCH 01/25] remove zombie code: rand in InitPhase1 --- backend/groth16/bn254/mpcsetup/phase1.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index a912a473aa..e09e34b0ce 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -52,15 +52,6 @@ type Phase1 struct { func InitPhase1(power int) (phase1 Phase1) { N := int(math.Pow(2, float64(power))) - // Generate key pairs - var tau, alpha, beta fr.Element - tau.SetOne() - alpha.SetOne() - beta.SetOne() - phase1.PublicKeys.Tau = newPublicKey(tau, nil, 1) - phase1.PublicKeys.Alpha = newPublicKey(alpha, nil, 2) - phase1.PublicKeys.Beta = newPublicKey(beta, nil, 3) - // First contribution use generators _, _, g1, g2 := curve.Generators() phase1.Parameters.G2.Beta.Set(&g2) From 3477517d49ad686696e3b8d8511fa677870bc11e Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Thu, 10 Oct 2024 12:13:12 -0500 Subject: [PATCH 02/25] docs comment genR --- backend/groth16/bn254/mpcsetup/utils.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index e3b47d1121..f455173232 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -156,6 +156,8 @@ func linearCombinationG2(A []curve.G2Affine) (L1, L2 curve.G2Affine) { } // Generate R in G₂ as Hash(gˢ, gˢˣ, challenge, dst) +// it is to be used as a challenge for generating a proof of knowledge to x +// π ≔ x.r; e([1]₁, π) =﹖ e([x]₁, r) func genR(sG1, sxG1 curve.G1Affine, challenge []byte, dst byte) curve.G2Affine { var buf bytes.Buffer buf.Grow(len(challenge) + curve.SizeOfG1AffineUncompressed*2) From 2e425fc40bf766f68931393b0b073a2fb06e92fb Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Fri, 11 Oct 2024 14:29:30 -0500 Subject: [PATCH 03/25] revert bring init back --- backend/groth16/bn254/mpcsetup/phase1.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index e09e34b0ce..a912a473aa 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -52,6 +52,15 @@ type Phase1 struct { func InitPhase1(power int) (phase1 Phase1) { N := int(math.Pow(2, float64(power))) + // Generate key pairs + var tau, alpha, beta fr.Element + tau.SetOne() + alpha.SetOne() + beta.SetOne() + phase1.PublicKeys.Tau = newPublicKey(tau, nil, 1) + phase1.PublicKeys.Alpha = newPublicKey(alpha, nil, 2) + phase1.PublicKeys.Beta = newPublicKey(beta, nil, 3) + // First contribution use generators _, _, g1, g2 := curve.Generators() phase1.Parameters.G2.Beta.Set(&g2) From 2719edb918720627d29d27fca45189d478971352 Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Fri, 11 Oct 2024 15:29:14 -0500 Subject: [PATCH 04/25] refactor: updateValue and verify --- backend/groth16/bn254/mpcsetup/utils.go | 121 +++++++++++++++++++++--- 1 file changed, 110 insertions(+), 11 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index f455173232..e4bd05fa0d 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -18,9 +18,11 @@ package mpcsetup import ( "bytes" + "crypto/rand" "math/big" "math/bits" "runtime" + "time" "github.com/consensys/gnark-crypto/ecc" curve "github.com/consensys/gnark-crypto/ecc/bn254" @@ -31,7 +33,7 @@ import ( type PublicKey struct { SG curve.G1Affine SXG curve.G1Affine - XR curve.G2Affine + XR curve.G2Affine // XR = X.R ∈ 𝔾₂ proof of knowledge } func newPublicKey(x fr.Element, challenge []byte, dst byte) PublicKey { @@ -69,7 +71,7 @@ func bitReverse[T any](a []T) { } } -// Returns [1, a, a², ..., aⁿ⁻¹ ] in Montgomery form +// Returns [1, a, a², ..., aⁿ⁻¹ ] func powers(a fr.Element, n int) []fr.Element { result := make([]fr.Element, n) result[0] = fr.NewElement(1) @@ -79,7 +81,7 @@ func powers(a fr.Element, n int) []fr.Element { return result } -// Returns [aᵢAᵢ, ...] in G1 +// Returns [aᵢAᵢ, ...]∈𝔾₁ func scaleG1InPlace(A []curve.G1Affine, a []fr.Element) { utils.Parallelize(len(A), func(start, end int) { var tmp big.Int @@ -90,7 +92,7 @@ func scaleG1InPlace(A []curve.G1Affine, a []fr.Element) { }) } -// Returns [aᵢAᵢ, ...] in G2 +// Returns [aᵢAᵢ, ...]∈𝔾₂ func scaleG2InPlace(A []curve.G2Affine, a []fr.Element) { utils.Parallelize(len(A), func(start, end int) { var tmp big.Int @@ -101,16 +103,22 @@ func scaleG2InPlace(A []curve.G2Affine, a []fr.Element) { }) } +/* // Check e(a₁, a₂) = e(b₁, b₂) func sameRatio(a1, b1 curve.G1Affine, a2, b2 curve.G2Affine) bool { if !a1.IsInSubGroup() || !b1.IsInSubGroup() || !a2.IsInSubGroup() || !b2.IsInSubGroup() { panic("invalid point not in subgroup") } - var na2 curve.G2Affine - na2.Neg(&a2) + return sameRatioUnsafe(a1, b1, a2, b2) +}*/ + +// Check n₁/d₁ = n₂/d₂ i.e. e(n₁, d₂) = e(d₁, n₂). No subgroup checks. +func sameRatioUnsafe(n1, d1 curve.G1Affine, n2, d2 curve.G2Affine) bool { + var nd1 curve.G1Affine + nd1.Neg(&d1) res, err := curve.PairingCheck( - []curve.G1Affine{a1, b1}, - []curve.G2Affine{na2, b2}) + []curve.G1Affine{n1, nd1}, + []curve.G2Affine{d2, n2}) if err != nil { panic(err) } @@ -129,7 +137,7 @@ func merge(A, B []curve.G1Affine) (a, b curve.G1Affine) { return } -// L1 = ∑ rᵢAᵢ, L2 = ∑ rᵢAᵢ₊₁ in G1 +// L1 = ∑ rᵢAᵢ, L2 = ∑ rᵢAᵢ₊₁∈𝔾₁ func linearCombinationG1(A []curve.G1Affine) (L1, L2 curve.G1Affine) { nc := runtime.NumCPU() n := len(A) @@ -142,7 +150,7 @@ func linearCombinationG1(A []curve.G1Affine) (L1, L2 curve.G1Affine) { return } -// L1 = ∑ rᵢAᵢ, L2 = ∑ rᵢAᵢ₊₁ in G2 +// L1 = ∑ rᵢAᵢ, L2 = ∑ rᵢAᵢ₊₁∈𝔾₂ func linearCombinationG2(A []curve.G2Affine) (L1, L2 curve.G2Affine) { nc := runtime.NumCPU() n := len(A) @@ -155,7 +163,7 @@ func linearCombinationG2(A []curve.G2Affine) (L1, L2 curve.G2Affine) { return } -// Generate R in G₂ as Hash(gˢ, gˢˣ, challenge, dst) +// Generate R∈𝔾₂ as Hash(gˢ, gˢˣ, challenge, dst) // it is to be used as a challenge for generating a proof of knowledge to x // π ≔ x.r; e([1]₁, π) =﹖ e([x]₁, r) func genR(sG1, sxG1 curve.G1Affine, challenge []byte, dst byte) curve.G2Affine { @@ -170,3 +178,94 @@ func genR(sG1, sxG1 curve.G1Affine, challenge []byte, dst byte) curve.G2Affine { } return spG2 } + +type RandomBeacon func(time.Time) []byte + +// func (rb RandomBeacon) GenerateChallenge(...) []byte {} + +type pair struct { + g1 curve.G1Affine + g2 *curve.G2Affine // optional; some values expect to have a 𝔾₂ representation, some don't. +} + +// check that g1, g2 are valid as updated values, i.e. in their subgroups, and non-zero +func (p *pair) validUpdate() bool { + // if the contribution is 0 the product is doomed to be 0. + // no need to check this for g2 independently because if g1 is 0 and g2 is not, consistency checks will fail + return !p.g1.IsInfinity() && p.g1.IsInSubGroup() && (p.g2 == nil || p.g2.IsInSubGroup()) +} + +type valueUpdate struct { + contributionCommitment curve.G1Affine // x or [Xⱼ]₁ + contributionPok curve.G2Affine // π ≔ x.r ∈ 𝔾₂ + updatedCommitment pair // [X₁..Xⱼ] +} + +// updateValue produces values associated with contribution to an existing value. +// if prevCommitment contains only a 𝔾₁ value, then so will updatedCommitment +func updateValue(prevCommitment pair, challenge []byte, dst byte) valueUpdate { + var x valueUpdate + contributionValue, err := rand.Int(rand.Reader, fr.Modulus()) + + eraseToxicWaste := func() { + if contributionValue == nil { + return + } + for i := range contributionValue.Bits() { // TODO check that this works + contributionValue.Bits()[i] = 0 + } + } + defer eraseToxicWaste() + + if err != nil { + panic(err) + } + + _, _, g1, _ := curve.Generators() + x.contributionCommitment.ScalarMultiplication(&g1, contributionValue) + x.updatedCommitment.g1.ScalarMultiplication(&prevCommitment.g1, contributionValue) + if prevCommitment.g2 != nil { // TODO make sure this is correct + x.updatedCommitment.g2 = new(curve.G2Affine).ScalarMultiplication(prevCommitment.g2, contributionValue) + } + + // proof of knowledge to commitment. Algorithm 3 from section 3.7 + pokBase := genR(x.contributionCommitment, x.updatedCommitment.g1, challenge, dst) // r + x.contributionPok.ScalarMultiplication(&pokBase, contributionValue) + + return x +} + +// verify corresponds with verification steps {i, i+3} with 1 ≤ i ≤ 3 in section 7.1 of Bowe-Gabizon17 +// it checks the proof of knowledge of the contribution, and the fact that the product of the contribution +// and previous commitment makes the new commitment. +// prevCommitment is assumed to be valid. No subgroup check and the like. +func (x *valueUpdate) verify(prevCommitment pair, challenge []byte, dst byte) bool { + noG2 := prevCommitment.g2 == nil + if noG2 != (x.updatedCommitment.g2 == nil) { // no erasing or creating g2 values + return false + } + + if !x.contributionPok.IsInSubGroup() || !x.contributionCommitment.IsInSubGroup() || !x.updatedCommitment.validUpdate() { + return false + } + + // verify commitment proof of knowledge. CheckPOK, algorithm 4 from section 3.7 + r := genR(x.contributionCommitment, x.updatedCommitment.g1, challenge, dst) // verification challenge in the form of a g2 base + _, _, g1, _ := curve.Generators() + if !sameRatioUnsafe(x.contributionCommitment, g1, x.contributionPok, r) { // π ?= x.r i.e. x/g1 =? π/r + return false + } + + // check that the updated/previous ratio is consistent between the 𝔾₁ and 𝔾₂ representations. Based on CONSISTENT, algorithm 2 in Section 3.6. + if !noG2 && !sameRatioUnsafe(x.updatedCommitment.g1, prevCommitment.g1, *x.updatedCommitment.g2, *prevCommitment.g2) { + return false + } + + // now verify that updated₁/previous₁ = x ( = x/g1 = π/r ) + // have to use the latter value for the RHS because we sameRatio needs both 𝔾₁ and 𝔾₂ values + if !sameRatioUnsafe(x.updatedCommitment.g1, prevCommitment.g1, x.contributionPok, r) { + return false + } + + return true +} From 7c93ec0e415cd8982ebf13db3c375767e02ef833 Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Fri, 11 Oct 2024 15:43:56 -0500 Subject: [PATCH 05/25] feat new phase struct --- backend/groth16/bn254/mpcsetup/phase1.go | 3 +++ backend/groth16/bn254/mpcsetup/utils.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index a912a473aa..1095392b36 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -25,6 +25,9 @@ import ( "math/big" ) +type phase1 struct { +} + // Phase1 represents the Phase1 of the MPC described in // https://eprint.iacr.org/2017/1050.pdf // diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index e4bd05fa0d..e45ea811a2 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -252,7 +252,7 @@ func (x *valueUpdate) verify(prevCommitment pair, challenge []byte, dst byte) bo // verify commitment proof of knowledge. CheckPOK, algorithm 4 from section 3.7 r := genR(x.contributionCommitment, x.updatedCommitment.g1, challenge, dst) // verification challenge in the form of a g2 base _, _, g1, _ := curve.Generators() - if !sameRatioUnsafe(x.contributionCommitment, g1, x.contributionPok, r) { // π ?= x.r i.e. x/g1 =? π/r + if !sameRatioUnsafe(x.contributionCommitment, g1, x.contributionPok, r) { // π =? x.r i.e. x/g1 =? π/r return false } From c35033691005ec06fd4f0f422a5a39117e566feb Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Fri, 11 Oct 2024 18:04:06 -0500 Subject: [PATCH 06/25] refactor phase1 --- backend/groth16/bn254/mpcsetup/phase1.go | 204 +++++++++++++++++++---- backend/groth16/bn254/mpcsetup/utils.go | 101 ++++++----- 2 files changed, 231 insertions(+), 74 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index 1095392b36..afed0e31a6 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -19,13 +19,89 @@ package mpcsetup import ( "crypto/sha256" "errors" + "fmt" + "github.com/consensys/gnark-crypto/ecc" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" "math" "math/big" + "runtime" + "sync" ) +// Phase1 represents the Phase1 of the MPC described in +// https://eprint.iacr.org/2017/1050.pdf +// +// Also known as "Powers of Tau" type phase1 struct { + Principal struct { // "main" contributions + Tau, Alpha, Beta valueUpdate + } + G1Derived struct { + Tau []curve.G1Affine // {[τ⁰]₁, [τ¹]₁, [τ²]₁, …, [τ²ⁿ⁻²]₁} + AlphaTau []curve.G1Affine // {α[τ⁰]₁, α[τ¹]₁, α[τ²]₁, …, α[τⁿ⁻¹]₁} + BetaTau []curve.G1Affine // {β[τ⁰]₁, β[τ¹]₁, β[τ²]₁, …, β[τⁿ⁻¹]₁} + } + G2Derived struct { + Tau []curve.G2Affine // {[τ⁰]₂, [τ¹]₂, [τ²]₂, …, [τⁿ⁻¹]₂} + } + Challenge []byte // Hash of the transcript PRIOR to this participant +} + +func eraseBigInts(i ...*big.Int) { + for _, i := range i { + if i != nil { + for j := range i.Bits() { + i.Bits()[j] = 0 + } + } + } +} + +func eraseFrVectors(v ...[]fr.Element) { + for _, v := range v { + for i := range v { + v[i].SetZero() + } + } +} + +// Contribute contributes randomness to the phase1 object. This mutates phase1. +// p is trusted to be well-formed. The ReadFrom function performs such basic sanity checks. +func (p *phase1) Contribute() { + N := len(p.G2Derived.Tau) + challenge := p.hash() + + // Generate main value updates + var tau, alpha, beta *big.Int + p.Principal.Tau, tau = updateValue(p.Principal.Tau.updatedCommitment, challenge, 1) + p.Principal.Alpha, alpha = updateValue(p.Principal.Alpha.updatedCommitment, challenge, 2) + p.Principal.Beta, beta = updateValue(p.Principal.Beta.updatedCommitment, challenge, 3) + + defer eraseBigInts(tau, alpha, beta) + + // Compute τ, ατ, and βτ + taus := powers(tau, 2*N-1) + alphaTau := make([]fr.Element, N) + betaTau := make([]fr.Element, N) + + defer eraseFrVectors(taus, alphaTau, betaTau) + + alphaTau[0].SetBigInt(alpha) + betaTau[0].SetBigInt(beta) + for i := 1; i < N; i++ { + alphaTau[i].Mul(&taus[i], &alphaTau[0]) + betaTau[i].Mul(&taus[i], &betaTau[0]) + } + + // Update using previous parameters + // TODO @gbotrel working with jacobian points here will help with perf. + scaleG1InPlace(p.G1Derived.Tau, taus) + scaleG2InPlace(p.G2Derived.Tau, taus[0:N]) + scaleG1InPlace(p.G1Derived.AlphaTau, alphaTau) + scaleG1InPlace(p.G1Derived.BetaTau, betaTau) + + p.Challenge = challenge } // Phase1 represents the Phase1 of the MPC described in @@ -88,42 +164,6 @@ func InitPhase1(power int) (phase1 Phase1) { return } -// Contribute contributes randomness to the phase1 object. This mutates phase1. -func (phase1 *Phase1) Contribute() { - N := len(phase1.Parameters.G2.Tau) - - // Generate key pairs - var tau, alpha, beta fr.Element - tau.SetRandom() - alpha.SetRandom() - beta.SetRandom() - phase1.PublicKeys.Tau = newPublicKey(tau, phase1.Hash[:], 1) - phase1.PublicKeys.Alpha = newPublicKey(alpha, phase1.Hash[:], 2) - phase1.PublicKeys.Beta = newPublicKey(beta, phase1.Hash[:], 3) - - // Compute powers of τ, ατ, and βτ - taus := powers(tau, 2*N-1) - alphaTau := make([]fr.Element, N) - betaTau := make([]fr.Element, N) - for i := 0; i < N; i++ { - alphaTau[i].Mul(&taus[i], &alpha) - betaTau[i].Mul(&taus[i], &beta) - } - - // Update using previous parameters - // TODO @gbotrel working with jacobian points here will help with perf. - scaleG1InPlace(phase1.Parameters.G1.Tau, taus) - scaleG2InPlace(phase1.Parameters.G2.Tau, taus[0:N]) - scaleG1InPlace(phase1.Parameters.G1.AlphaTau, alphaTau) - scaleG1InPlace(phase1.Parameters.G1.BetaTau, betaTau) - var betaBI big.Int - beta.BigInt(&betaBI) - phase1.Parameters.G2.Beta.ScalarMultiplication(&phase1.Parameters.G2.Beta, &betaBI) - - // Compute hash of Contribution - phase1.Hash = phase1.hash() -} - func VerifyPhase1(c0, c1 *Phase1, c ...*Phase1) error { contribs := append([]*Phase1{c0, c1}, c...) for i := 0; i < len(contribs)-1; i++ { @@ -134,6 +174,97 @@ func VerifyPhase1(c0, c1 *Phase1, c ...*Phase1) error { return nil } +// Verify assumes previous is correct +func (p *phase1) Verify(previous *phase1) error { + + if err := p.Principal.Tau.verify(previous.Principal.Tau.updatedCommitment, p.Challenge, 1); err != nil { + return fmt.Errorf("failed to verify contribution to τ: %w", err) + } + if err := p.Principal.Alpha.verify(previous.Principal.Alpha.updatedCommitment, p.Challenge, 2); err != nil { + return fmt.Errorf("failed to verify contribution to α: %w", err) + } + if err := p.Principal.Beta.verify(previous.Principal.Beta.updatedCommitment, p.Challenge, 3); err != nil { + return fmt.Errorf("failed to verify contribution to β: %w", err) + } + + if !areInSubGroupG1(p.G1Derived.Tau) || !areInSubGroupG1(p.G1Derived.BetaTau) || !areInSubGroupG1(p.G1Derived.AlphaTau) { + return errors.New("derived values 𝔾₁ subgroup check failed") + } + if !areInSubGroupG2(p.G2Derived.Tau) { + return errors.New("derived values 𝔾₂ subgroup check failed") + } + + _, _, g1, g2 := curve.Generators() + + // for 1 ≤ i ≤ 2N-3 we want to check τⁱ⁺¹/τⁱ = τ + // i.e. e(τⁱ⁺¹,[1]₂) = e(τⁱ,[τ]₂). Due to bi-linearity we can instead check + // e(∑rⁱ⁻¹τⁱ⁺¹,[1]₂) = e(∑rⁱ⁻¹τⁱ,[τ]₂), which is tantamount to the check + // ∑rⁱ⁻¹τⁱ⁺¹ / ∑rⁱ⁻¹τⁱ = τ + r := linearCombCoeffs(len(p.G1Derived.Tau) - 1) // the longest of all lengths + // will be reusing the coefficient TODO @Tabaie make sure that's okay + nc := runtime.NumCPU() + var ( + tauT1, tauS1, alphaTT, alphaTS, betaTT, betaTS curve.G1Affine + tauT2, tauS2 curve.G2Affine + wg sync.WaitGroup + ) + + mulExpG1 := func(v *curve.G1Affine, points []curve.G1Affine, nbTasks int) { + if _, err := v.MultiExp(points, r[:len(points)], ecc.MultiExpConfig{NbTasks: nbTasks}); err != nil { + panic(err) + } + wg.Done() + } + + mulExpG2 := func(v *curve.G2Affine, points []curve.G2Affine, nbTasks int) { + if _, err := v.MultiExp(points, r[:len(points)], ecc.MultiExpConfig{NbTasks: nbTasks}); err != nil { + panic(err) + } + wg.Done() + } + + if nc < 2 { + mulExpG1(&tauT1, truncate(p.G1Derived.Tau), nc) + mulExpG1(&tauS1, p.G1Derived.Tau[1:], nc) + } else { + // larger tasks than the others. better get them done together + wg.Add(2) + go mulExpG1(&tauT1, truncate(p.G1Derived.Tau), nc/2) // truncated: smaller powers + mulExpG1(&tauS1, p.G1Derived.Tau[1:], nc - nc/2) // shifted: larger powers + wg.Wait() + } + + if nc < 4 { + mulExpG1(&alphaTT, truncate(p.G1Derived.AlphaTau), nc) + mulExpG1(&alphaTS, p.G1Derived.AlphaTau[1:], nc) + mulExpG1(&betaTT, truncate(p.G1Derived.BetaTau), nc) + mulExpG1(&betaTS, p.G1Derived.BetaTau[1:], nc) + } else { + wg.Add(4) + go mulExpG1(&alphaTT, truncate(p.G1Derived.AlphaTau), nc/4) + go mulExpG1(&alphaTS, p.G1Derived.AlphaTau[1:], nc/2 - nc/4) + go mulExpG1(&betaTT, truncate(p.G1Derived.BetaTau), nc/4) + mulExpG1(&betaTS, p.G1Derived.BetaTau[1:], nc - nc/2 - nc/4) + wg.Wait() + } + + + + if err := tauT1.MultiExp.G1Derived.Tau[:len(p.G1Derived.Tau)-1], r, ecc.MultiExpConfig{NbTasks: nc/2}) + + tauT1, tauS1 := linearCombinationG1(r, p.G1Derived.Tau[1:]) // at this point we should already know that tau[0] = infty and tau[1] = τ. ReadFrom is in charge of ensuring that. + + + if !sameRatioUnsafe(tauS1, tauT1, *p.Principal.Tau.updatedCommitment.g2, g2) { + return errors.New("couldn't verify 𝔾₁ representations of the τⁱ") + } + tauT2, tauS2 := linearCombinationG2(r, p.G2Derived.Tau[1:]) + if !sameRatioUnsafe(p.Principal.Tau.updatedCommitment.g1, g1, tauS2, tauT2) { + return errors.New("couldn't verify 𝔾₂ representations of the τⁱ") + } + +} + // verifyPhase1 checks that a contribution is based on a known previous Phase1 state. func verifyPhase1(current, contribution *Phase1) error { // Compute R for τ, α, β @@ -153,6 +284,7 @@ func verifyPhase1(current, contribution *Phase1) error { } // Check for valid updates using previous parameters + // if !sameRatio(contribution.Parameters.G1.Tau[1], current.Parameters.G1.Tau[1], tauR, contribution.PublicKeys.Tau.XR) { return errors.New("couldn't verify that [τ]₁ is based on previous contribution") } diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index e45ea811a2..499fda08d1 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -19,6 +19,7 @@ package mpcsetup import ( "bytes" "crypto/rand" + "errors" "math/big" "math/bits" "runtime" @@ -71,12 +72,22 @@ func bitReverse[T any](a []T) { } } +func linearCombCoeffs(n int) []fr.Element { + a, err := rand.Int(rand.Reader, fr.Modulus()) + if err != nil { + panic(err) + } + return powers(a, n) +} + // Returns [1, a, a², ..., aⁿ⁻¹ ] -func powers(a fr.Element, n int) []fr.Element { +func powers(a *big.Int, n int) []fr.Element { + var aMont fr.Element + aMont.SetBigInt(a) result := make([]fr.Element, n) result[0] = fr.NewElement(1) for i := 1; i < n; i++ { - result[i].Mul(&result[i-1], &a) + result[i].Mul(&result[i-1], &aMont) } return result } @@ -137,29 +148,23 @@ func merge(A, B []curve.G1Affine) (a, b curve.G1Affine) { return } -// L1 = ∑ rᵢAᵢ, L2 = ∑ rᵢAᵢ₊₁∈𝔾₁ -func linearCombinationG1(A []curve.G1Affine) (L1, L2 curve.G1Affine) { - nc := runtime.NumCPU() +// truncated = ∑ rᵢAᵢ, shifted = ∑ rᵢAᵢ₊₁∈𝔾₁ +func linearCombinationG1(r []fr.Element, A []curve.G1Affine, nbTasks int) curve.G1Affine { n := len(A) - r := make([]fr.Element, n-1) - for i := 0; i < n-1; i++ { - r[i].SetRandom() - } - L1.MultiExp(A[:n-1], r, ecc.MultiExpConfig{NbTasks: nc / 2}) - L2.MultiExp(A[1:], r, ecc.MultiExpConfig{NbTasks: nc / 2}) + r = r[:n-1] + var res curve.G1Affine + res.MultiExp(A[:n-1], r, ecc.MultiExpConfig{NbTasks: nc / 2}) + shifted.MultiExp(A[1:], r, ecc.MultiExpConfig{NbTasks: nc / 2}) return } -// L1 = ∑ rᵢAᵢ, L2 = ∑ rᵢAᵢ₊₁∈𝔾₂ -func linearCombinationG2(A []curve.G2Affine) (L1, L2 curve.G2Affine) { +// truncated = ∑ rᵢAᵢ, shifted = ∑ rᵢAᵢ₊₁∈𝔾₂ +func linearCombinationG2(r []fr.Element, A []curve.G2Affine) (truncated, shifted curve.G2Affine) { nc := runtime.NumCPU() n := len(A) - r := make([]fr.Element, n-1) - for i := 0; i < n-1; i++ { - r[i].SetRandom() - } - L1.MultiExp(A[:n-1], r, ecc.MultiExpConfig{NbTasks: nc / 2}) - L2.MultiExp(A[1:], r, ecc.MultiExpConfig{NbTasks: nc / 2}) + r = r[:n-1] + truncated.MultiExp(A[:n-1], r, ecc.MultiExpConfig{NbTasks: nc / 2}) + shifted.MultiExp(A[1:], r, ecc.MultiExpConfig{NbTasks: nc / 2}) return } @@ -203,20 +208,11 @@ type valueUpdate struct { // updateValue produces values associated with contribution to an existing value. // if prevCommitment contains only a 𝔾₁ value, then so will updatedCommitment -func updateValue(prevCommitment pair, challenge []byte, dst byte) valueUpdate { +// the second output is toxic waste. It is the caller's responsibility to safely "dispose" of it. +func updateValue(prevCommitment pair, challenge []byte, dst byte) (valueUpdate, *big.Int) { var x valueUpdate contributionValue, err := rand.Int(rand.Reader, fr.Modulus()) - eraseToxicWaste := func() { - if contributionValue == nil { - return - } - for i := range contributionValue.Bits() { // TODO check that this works - contributionValue.Bits()[i] = 0 - } - } - defer eraseToxicWaste() - if err != nil { panic(err) } @@ -232,40 +228,69 @@ func updateValue(prevCommitment pair, challenge []byte, dst byte) valueUpdate { pokBase := genR(x.contributionCommitment, x.updatedCommitment.g1, challenge, dst) // r x.contributionPok.ScalarMultiplication(&pokBase, contributionValue) - return x + return x, contributionValue } // verify corresponds with verification steps {i, i+3} with 1 ≤ i ≤ 3 in section 7.1 of Bowe-Gabizon17 // it checks the proof of knowledge of the contribution, and the fact that the product of the contribution // and previous commitment makes the new commitment. // prevCommitment is assumed to be valid. No subgroup check and the like. -func (x *valueUpdate) verify(prevCommitment pair, challenge []byte, dst byte) bool { +func (x *valueUpdate) verify(prevCommitment pair, challenge []byte, dst byte) error { noG2 := prevCommitment.g2 == nil - if noG2 != (x.updatedCommitment.g2 == nil) { // no erasing or creating g2 values - return false + if noG2 != (x.updatedCommitment.g2 == nil) { + return errors.New("erasing or creating g2 values") } if !x.contributionPok.IsInSubGroup() || !x.contributionCommitment.IsInSubGroup() || !x.updatedCommitment.validUpdate() { - return false + return errors.New("contribution values subgroup check failed") } // verify commitment proof of knowledge. CheckPOK, algorithm 4 from section 3.7 r := genR(x.contributionCommitment, x.updatedCommitment.g1, challenge, dst) // verification challenge in the form of a g2 base _, _, g1, _ := curve.Generators() if !sameRatioUnsafe(x.contributionCommitment, g1, x.contributionPok, r) { // π =? x.r i.e. x/g1 =? π/r - return false + return errors.New("contribution proof of knowledge verification failed") } // check that the updated/previous ratio is consistent between the 𝔾₁ and 𝔾₂ representations. Based on CONSISTENT, algorithm 2 in Section 3.6. if !noG2 && !sameRatioUnsafe(x.updatedCommitment.g1, prevCommitment.g1, *x.updatedCommitment.g2, *prevCommitment.g2) { - return false + return errors.New("g2 update inconsistent") } // now verify that updated₁/previous₁ = x ( = x/g1 = π/r ) // have to use the latter value for the RHS because we sameRatio needs both 𝔾₁ and 𝔾₂ values if !sameRatioUnsafe(x.updatedCommitment.g1, prevCommitment.g1, x.contributionPok, r) { - return false + return errors.New("g1 update inconsistent") } + return nil +} + +func toRefs[T any](s []T) []*T { + res := make([]*T, len(s)) + for i := range s { + res[i] = &s[i] + } + return res +} + +func areInSubGroup[T interface{ IsInSubGroup() bool }](s []T) bool { + for i := range s { + if !s[i].IsInSubGroup() { + return false + } + } return true } + +func areInSubGroupG1(s []curve.G1Affine) bool { + return areInSubGroup(toRefs(s)) +} + +func areInSubGroupG2(s []curve.G2Affine) bool { + return areInSubGroup(toRefs(s)) +} + +func truncate[T any](s []T) []T { + return s[:len(s)-1] +} From fcde6650dd764c5161e0153f6b9864f55b8caf79 Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Mon, 14 Oct 2024 16:55:43 -0500 Subject: [PATCH 07/25] feat complete phase1 verification --- backend/groth16/bn254/mpcsetup/phase1.go | 63 ++++++++++-------------- backend/groth16/bn254/mpcsetup/utils.go | 34 +++++++++++++ 2 files changed, 59 insertions(+), 38 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index afed0e31a6..34a157411f 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -202,11 +202,14 @@ func (p *phase1) Verify(previous *phase1) error { // ∑rⁱ⁻¹τⁱ⁺¹ / ∑rⁱ⁻¹τⁱ = τ r := linearCombCoeffs(len(p.G1Derived.Tau) - 1) // the longest of all lengths // will be reusing the coefficient TODO @Tabaie make sure that's okay + + tauT1, tauS1 := linearCombinationsG1(r, p.G1Derived.Tau) + tauT2, tauS2 := linearCombinationsG2(r, p.G2Derived.Tau) + nc := runtime.NumCPU() var ( - tauT1, tauS1, alphaTT, alphaTS, betaTT, betaTS curve.G1Affine - tauT2, tauS2 curve.G2Affine - wg sync.WaitGroup + alphaTS, betaTS curve.G1Affine + wg sync.WaitGroup ) mulExpG1 := func(v *curve.G1Affine, points []curve.G1Affine, nbTasks int) { @@ -216,53 +219,37 @@ func (p *phase1) Verify(previous *phase1) error { wg.Done() } - mulExpG2 := func(v *curve.G2Affine, points []curve.G2Affine, nbTasks int) { - if _, err := v.MultiExp(points, r[:len(points)], ecc.MultiExpConfig{NbTasks: nbTasks}); err != nil { - panic(err) - } - wg.Done() - } - - if nc < 2 { - mulExpG1(&tauT1, truncate(p.G1Derived.Tau), nc) - mulExpG1(&tauS1, p.G1Derived.Tau[1:], nc) - } else { - // larger tasks than the others. better get them done together - wg.Add(2) - go mulExpG1(&tauT1, truncate(p.G1Derived.Tau), nc/2) // truncated: smaller powers - mulExpG1(&tauS1, p.G1Derived.Tau[1:], nc - nc/2) // shifted: larger powers + if nc >= 2 { + wg.Add(2) // small tasks over 𝔾₁ + go mulExpG1(&alphaTS, p.G1Derived.AlphaTau[1:], nc/2) + mulExpG1(&betaTS, p.G1Derived.BetaTau[1:], nc-nc/2) wg.Wait() - } - - if nc < 4 { - mulExpG1(&alphaTT, truncate(p.G1Derived.AlphaTau), nc) + } else { mulExpG1(&alphaTS, p.G1Derived.AlphaTau[1:], nc) - mulExpG1(&betaTT, truncate(p.G1Derived.BetaTau), nc) mulExpG1(&betaTS, p.G1Derived.BetaTau[1:], nc) - } else { - wg.Add(4) - go mulExpG1(&alphaTT, truncate(p.G1Derived.AlphaTau), nc/4) - go mulExpG1(&alphaTS, p.G1Derived.AlphaTau[1:], nc/2 - nc/4) - go mulExpG1(&betaTT, truncate(p.G1Derived.BetaTau), nc/4) - mulExpG1(&betaTS, p.G1Derived.BetaTau[1:], nc - nc/2 - nc/4) - wg.Wait() } - - - if err := tauT1.MultiExp.G1Derived.Tau[:len(p.G1Derived.Tau)-1], r, ecc.MultiExpConfig{NbTasks: nc/2}) - - tauT1, tauS1 := linearCombinationG1(r, p.G1Derived.Tau[1:]) // at this point we should already know that tau[0] = infty and tau[1] = τ. ReadFrom is in charge of ensuring that. - - if !sameRatioUnsafe(tauS1, tauT1, *p.Principal.Tau.updatedCommitment.g2, g2) { return errors.New("couldn't verify 𝔾₁ representations of the τⁱ") } - tauT2, tauS2 := linearCombinationG2(r, p.G2Derived.Tau[1:]) + if !sameRatioUnsafe(p.Principal.Tau.updatedCommitment.g1, g1, tauS2, tauT2) { return errors.New("couldn't verify 𝔾₂ representations of the τⁱ") } + // for 1 ≤ i < N we want to check ατⁱ/τⁱ = α + // with a similar bi-linearity argument as above we can do this with a single pairing check + // Note that the check at i = 0 is part of the well-formedness requirement and is not checked here, + // but guaranteed by ReadFrom. + + if !sameRatioUnsafe(alphaTS, tauS1, *p.Principal.Alpha.updatedCommitment.g2, g2) { + return errors.New("couldn't verify the ατⁱ") + } + if !sameRatioUnsafe(betaTS, tauS1, *p.Principal.Beta.updatedCommitment.g2, g2) { + return errors.New("couldn't verify the βτⁱ") + } + + return nil } // verifyPhase1 checks that a contribution is based on a known previous Phase1 state. diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index 499fda08d1..6a446e3236 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -168,6 +168,40 @@ func linearCombinationG2(r []fr.Element, A []curve.G2Affine) (truncated, shifted return } +// linearCombinationsG1 assumes, and does not check, that rPowers[i+1] = rPowers[1].rPowers[i] for all applicable i +// Also assumed that 3 ≤ N ≔ len(A) ≤ len(rPowers) +func linearCombinationsG1(rPowers []fr.Element, A []curve.G1Affine) (truncated, shifted curve.G1Affine) { + // the common section, 1 to N-2 + var common curve.G1Affine + common.MultiExp(A[1:len(A)-1], rPowers[:len(A)-2], ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}) // A[1] + r.A[2] + ... + rᴺ⁻³.A[N-2] + + var c big.Int + rPowers[1].BigInt(&c) + truncated.ScalarMultiplication(&common, &c).Add(&truncated, &A[0]) // A[0] + r.A[1] + r².A[2] + ... + rᴺ⁻².A[N-2] + + rPowers[len(A)-1].BigInt(&c) + shifted.ScalarMultiplication(&A[len(A)-1], &c).Add(&shifted, &common) + + return +} + +// linearCombinationsG2 assumes, and does not check, that rPowers[i+1] = rPowers[1].rPowers[i] for all applicable i +// Also assumed that 3 ≤ N ≔ len(A) ≤ len(rPowers) +func linearCombinationsG2(rPowers []fr.Element, A []curve.G2Affine) (truncated, shifted curve.G2Affine) { + // the common section, 1 to N-2 + var common curve.G2Affine + common.MultiExp(A[1:len(A)-1], rPowers[:len(A)-2], ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}) // A[1] + r.A[2] + ... + rᴺ⁻³.A[N-2] + + var c big.Int + rPowers[1].BigInt(&c) + truncated.ScalarMultiplication(&common, &c).Add(&truncated, &A[0]) // A[0] + r.A[1] + r².A[2] + ... + rᴺ⁻².A[N-2] + + rPowers[len(A)-1].BigInt(&c) + shifted.ScalarMultiplication(&A[len(A)-1], &c).Add(&shifted, &common) + + return +} + // Generate R∈𝔾₂ as Hash(gˢ, gˢˣ, challenge, dst) // it is to be used as a challenge for generating a proof of knowledge to x // π ≔ x.r; e([1]₁, π) =﹖ e([x]₁, r) From 6fc1ffe2e25596cd19e0936fbfcb2611c310c7d7 Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Mon, 14 Oct 2024 17:30:13 -0500 Subject: [PATCH 08/25] fix check without assuming alpha, beta in G2 --- backend/groth16/bn254/mpcsetup/marshal.go | 70 ++++---- backend/groth16/bn254/mpcsetup/phase1.go | 172 ++++--------------- backend/groth16/bn254/mpcsetup/setup_test.go | 16 +- backend/groth16/bn254/mpcsetup/utils.go | 27 +++ 4 files changed, 99 insertions(+), 186 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/marshal.go b/backend/groth16/bn254/mpcsetup/marshal.go index 08cb2ae3d1..2985022bbd 100644 --- a/backend/groth16/bn254/mpcsetup/marshal.go +++ b/backend/groth16/bn254/mpcsetup/marshal.go @@ -22,31 +22,31 @@ import ( ) // WriteTo implements io.WriterTo -func (phase1 *Phase1) WriteTo(writer io.Writer) (int64, error) { - n, err := phase1.writeTo(writer) +func (p *Phase1) WriteTo(writer io.Writer) (int64, error) { + n, err := p.writeTo(writer) if err != nil { return n, err } - nBytes, err := writer.Write(phase1.Hash) + nBytes, err := writer.Write(p.Hash) return int64(nBytes) + n, err } -func (phase1 *Phase1) writeTo(writer io.Writer) (int64, error) { +func (p *Phase1) writeTo(writer io.Writer) (int64, error) { toEncode := []interface{}{ - &phase1.PublicKeys.Tau.SG, - &phase1.PublicKeys.Tau.SXG, - &phase1.PublicKeys.Tau.XR, - &phase1.PublicKeys.Alpha.SG, - &phase1.PublicKeys.Alpha.SXG, - &phase1.PublicKeys.Alpha.XR, - &phase1.PublicKeys.Beta.SG, - &phase1.PublicKeys.Beta.SXG, - &phase1.PublicKeys.Beta.XR, - phase1.Parameters.G1.Tau, - phase1.Parameters.G1.AlphaTau, - phase1.Parameters.G1.BetaTau, - phase1.Parameters.G2.Tau, - &phase1.Parameters.G2.Beta, + &p.PublicKeys.Tau.SG, + &p.PublicKeys.Tau.SXG, + &p.PublicKeys.Tau.XR, + &p.PublicKeys.Alpha.SG, + &p.PublicKeys.Alpha.SXG, + &p.PublicKeys.Alpha.XR, + &p.PublicKeys.Beta.SG, + &p.PublicKeys.Beta.SXG, + &p.PublicKeys.Beta.XR, + p.Parameters.G1.Tau, + p.Parameters.G1.AlphaTau, + p.Parameters.G1.BetaTau, + p.Parameters.G2.Tau, + &p.Parameters.G2.Beta, } enc := curve.NewEncoder(writer) @@ -59,22 +59,22 @@ func (phase1 *Phase1) writeTo(writer io.Writer) (int64, error) { } // ReadFrom implements io.ReaderFrom -func (phase1 *Phase1) ReadFrom(reader io.Reader) (int64, error) { +func (p *Phase1) ReadFrom(reader io.Reader) (int64, error) { toEncode := []interface{}{ - &phase1.PublicKeys.Tau.SG, - &phase1.PublicKeys.Tau.SXG, - &phase1.PublicKeys.Tau.XR, - &phase1.PublicKeys.Alpha.SG, - &phase1.PublicKeys.Alpha.SXG, - &phase1.PublicKeys.Alpha.XR, - &phase1.PublicKeys.Beta.SG, - &phase1.PublicKeys.Beta.SXG, - &phase1.PublicKeys.Beta.XR, - &phase1.Parameters.G1.Tau, - &phase1.Parameters.G1.AlphaTau, - &phase1.Parameters.G1.BetaTau, - &phase1.Parameters.G2.Tau, - &phase1.Parameters.G2.Beta, + &p.PublicKeys.Tau.SG, + &p.PublicKeys.Tau.SXG, + &p.PublicKeys.Tau.XR, + &p.PublicKeys.Alpha.SG, + &p.PublicKeys.Alpha.SXG, + &p.PublicKeys.Alpha.XR, + &p.PublicKeys.Beta.SG, + &p.PublicKeys.Beta.SXG, + &p.PublicKeys.Beta.XR, + &p.Parameters.G1.Tau, + &p.Parameters.G1.AlphaTau, + &p.Parameters.G1.BetaTau, + &p.Parameters.G2.Tau, + &p.Parameters.G2.Beta, } dec := curve.NewDecoder(reader) @@ -83,8 +83,8 @@ func (phase1 *Phase1) ReadFrom(reader io.Reader) (int64, error) { return dec.BytesRead(), err } } - phase1.Hash = make([]byte, 32) - nBytes, err := reader.Read(phase1.Hash) + p.Hash = make([]byte, 32) + nBytes, err := reader.Read(p.Hash) return dec.BytesRead() + int64(nBytes), err } diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index 34a157411f..1099d727c2 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -17,23 +17,21 @@ package mpcsetup import ( + "bytes" "crypto/sha256" "errors" "fmt" - "github.com/consensys/gnark-crypto/ecc" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" "math" "math/big" - "runtime" - "sync" ) // Phase1 represents the Phase1 of the MPC described in // https://eprint.iacr.org/2017/1050.pdf // // Also known as "Powers of Tau" -type phase1 struct { +type Phase1 struct { Principal struct { // "main" contributions Tau, Alpha, Beta valueUpdate } @@ -48,27 +46,9 @@ type phase1 struct { Challenge []byte // Hash of the transcript PRIOR to this participant } -func eraseBigInts(i ...*big.Int) { - for _, i := range i { - if i != nil { - for j := range i.Bits() { - i.Bits()[j] = 0 - } - } - } -} - -func eraseFrVectors(v ...[]fr.Element) { - for _, v := range v { - for i := range v { - v[i].SetZero() - } - } -} - -// Contribute contributes randomness to the phase1 object. This mutates phase1. +// Contribute contributes randomness to the Phase1 object. This mutates Phase1. // p is trusted to be well-formed. The ReadFrom function performs such basic sanity checks. -func (p *phase1) Contribute() { +func (p *Phase1) Contribute() { N := len(p.G2Derived.Tau) challenge := p.hash() @@ -104,33 +84,16 @@ func (p *phase1) Contribute() { p.Challenge = challenge } -// Phase1 represents the Phase1 of the MPC described in -// https://eprint.iacr.org/2017/1050.pdf -// -// Also known as "Powers of Tau" -type Phase1 struct { - Parameters struct { - G1 struct { - Tau []curve.G1Affine // {[τ⁰]₁, [τ¹]₁, [τ²]₁, …, [τ²ⁿ⁻²]₁} - AlphaTau []curve.G1Affine // {α[τ⁰]₁, α[τ¹]₁, α[τ²]₁, …, α[τⁿ⁻¹]₁} - BetaTau []curve.G1Affine // {β[τ⁰]₁, β[τ¹]₁, β[τ²]₁, …, β[τⁿ⁻¹]₁} - } - G2 struct { - Tau []curve.G2Affine // {[τ⁰]₂, [τ¹]₂, [τ²]₂, …, [τⁿ⁻¹]₂} - Beta curve.G2Affine // [β]₂ - } - } - PublicKeys struct { - Tau, Alpha, Beta PublicKey - } - Hash []byte // sha256 hash -} - // InitPhase1 initialize phase 1 of the MPC. This is called once by the coordinator before // any randomness contribution is made (see Contribute()). func InitPhase1(power int) (phase1 Phase1) { N := int(math.Pow(2, float64(power))) + _, _, g1, g2 := curve.Generators() + + phase1.Challenge = []byte{0} + phase1.Principal.Alpha.setEmpty() + // Generate key pairs var tau, alpha, beta fr.Element tau.SetOne() @@ -167,7 +130,7 @@ func InitPhase1(power int) (phase1 Phase1) { func VerifyPhase1(c0, c1 *Phase1, c ...*Phase1) error { contribs := append([]*Phase1{c0, c1}, c...) for i := 0; i < len(contribs)-1; i++ { - if err := verifyPhase1(contribs[i], contribs[i+1]); err != nil { + if err := contribs[i].Verify(contribs[i+1]); err != nil { return err } } @@ -175,7 +138,14 @@ func VerifyPhase1(c0, c1 *Phase1, c ...*Phase1) error { } // Verify assumes previous is correct -func (p *phase1) Verify(previous *phase1) error { +func (p *Phase1) Verify(previous *Phase1) error { + + if prevHash := previous.hash(); !bytes.Equal(p.Challenge, previous.hash()) { // if chain-verifying contributions, challenge fields are optional as they can be computed as we go + if len(p.Challenge) != 0 { + return errors.New("the challenge does not match the previous phase's hash") + } + p.Challenge = prevHash + } if err := p.Principal.Tau.verify(previous.Principal.Tau.updatedCommitment, p.Challenge, 1); err != nil { return fmt.Errorf("failed to verify contribution to τ: %w", err) @@ -205,29 +175,8 @@ func (p *phase1) Verify(previous *phase1) error { tauT1, tauS1 := linearCombinationsG1(r, p.G1Derived.Tau) tauT2, tauS2 := linearCombinationsG2(r, p.G2Derived.Tau) - - nc := runtime.NumCPU() - var ( - alphaTS, betaTS curve.G1Affine - wg sync.WaitGroup - ) - - mulExpG1 := func(v *curve.G1Affine, points []curve.G1Affine, nbTasks int) { - if _, err := v.MultiExp(points, r[:len(points)], ecc.MultiExpConfig{NbTasks: nbTasks}); err != nil { - panic(err) - } - wg.Done() - } - - if nc >= 2 { - wg.Add(2) // small tasks over 𝔾₁ - go mulExpG1(&alphaTS, p.G1Derived.AlphaTau[1:], nc/2) - mulExpG1(&betaTS, p.G1Derived.BetaTau[1:], nc-nc/2) - wg.Wait() - } else { - mulExpG1(&alphaTS, p.G1Derived.AlphaTau[1:], nc) - mulExpG1(&betaTS, p.G1Derived.BetaTau[1:], nc) - } + alphaTT, alphaTS := linearCombinationsG1(r, p.G1Derived.AlphaTau) + betaTT, betaTS := linearCombinationsG1(r, p.G1Derived.BetaTau) if !sameRatioUnsafe(tauS1, tauT1, *p.Principal.Tau.updatedCommitment.g2, g2) { return errors.New("couldn't verify 𝔾₁ representations of the τⁱ") @@ -237,89 +186,26 @@ func (p *phase1) Verify(previous *phase1) error { return errors.New("couldn't verify 𝔾₂ representations of the τⁱ") } - // for 1 ≤ i < N we want to check ατⁱ/τⁱ = α + // for 0 ≤ i < N we want to check the ατⁱ + // By well-formedness checked by ReadFrom, we assume that ατ⁰ = α + // For 0 < i < N we check that ατⁱ/ατⁱ⁻¹ = τ, since we have a representation of τ in 𝔾₂ // with a similar bi-linearity argument as above we can do this with a single pairing check - // Note that the check at i = 0 is part of the well-formedness requirement and is not checked here, - // but guaranteed by ReadFrom. - if !sameRatioUnsafe(alphaTS, tauS1, *p.Principal.Alpha.updatedCommitment.g2, g2) { + if !sameRatioUnsafe(alphaTS, alphaTT, *p.Principal.Tau.updatedCommitment.g2, g2) { return errors.New("couldn't verify the ατⁱ") } - if !sameRatioUnsafe(betaTS, tauS1, *p.Principal.Beta.updatedCommitment.g2, g2) { + if !sameRatioUnsafe(betaTS, betaTT, *p.Principal.Tau.updatedCommitment.g2, g2) { return errors.New("couldn't verify the βτⁱ") } return nil } -// verifyPhase1 checks that a contribution is based on a known previous Phase1 state. -func verifyPhase1(current, contribution *Phase1) error { - // Compute R for τ, α, β - tauR := genR(contribution.PublicKeys.Tau.SG, contribution.PublicKeys.Tau.SXG, current.Hash[:], 1) - alphaR := genR(contribution.PublicKeys.Alpha.SG, contribution.PublicKeys.Alpha.SXG, current.Hash[:], 2) - betaR := genR(contribution.PublicKeys.Beta.SG, contribution.PublicKeys.Beta.SXG, current.Hash[:], 3) - - // Check for knowledge of toxic parameters - if !sameRatio(contribution.PublicKeys.Tau.SG, contribution.PublicKeys.Tau.SXG, contribution.PublicKeys.Tau.XR, tauR) { - return errors.New("couldn't verify public key of τ") - } - if !sameRatio(contribution.PublicKeys.Alpha.SG, contribution.PublicKeys.Alpha.SXG, contribution.PublicKeys.Alpha.XR, alphaR) { - return errors.New("couldn't verify public key of α") - } - if !sameRatio(contribution.PublicKeys.Beta.SG, contribution.PublicKeys.Beta.SXG, contribution.PublicKeys.Beta.XR, betaR) { - return errors.New("couldn't verify public key of β") - } - - // Check for valid updates using previous parameters - // - if !sameRatio(contribution.Parameters.G1.Tau[1], current.Parameters.G1.Tau[1], tauR, contribution.PublicKeys.Tau.XR) { - return errors.New("couldn't verify that [τ]₁ is based on previous contribution") - } - if !sameRatio(contribution.Parameters.G1.AlphaTau[0], current.Parameters.G1.AlphaTau[0], alphaR, contribution.PublicKeys.Alpha.XR) { - return errors.New("couldn't verify that [α]₁ is based on previous contribution") - } - if !sameRatio(contribution.Parameters.G1.BetaTau[0], current.Parameters.G1.BetaTau[0], betaR, contribution.PublicKeys.Beta.XR) { - return errors.New("couldn't verify that [β]₁ is based on previous contribution") - } - if !sameRatio(contribution.PublicKeys.Tau.SG, contribution.PublicKeys.Tau.SXG, contribution.Parameters.G2.Tau[1], current.Parameters.G2.Tau[1]) { - return errors.New("couldn't verify that [τ]₂ is based on previous contribution") - } - if !sameRatio(contribution.PublicKeys.Beta.SG, contribution.PublicKeys.Beta.SXG, contribution.Parameters.G2.Beta, current.Parameters.G2.Beta) { - return errors.New("couldn't verify that [β]₂ is based on previous contribution") +func (p *Phase1) hash() []byte { + if len(p.Challenge) == 0 { + panic("challenge field missing") } - - // Check for valid updates using powers of τ - _, _, g1, g2 := curve.Generators() - tauL1, tauL2 := linearCombinationG1(contribution.Parameters.G1.Tau) - if !sameRatio(tauL1, tauL2, contribution.Parameters.G2.Tau[1], g2) { - return errors.New("couldn't verify valid powers of τ in G₁") - } - alphaL1, alphaL2 := linearCombinationG1(contribution.Parameters.G1.AlphaTau) - if !sameRatio(alphaL1, alphaL2, contribution.Parameters.G2.Tau[1], g2) { - return errors.New("couldn't verify valid powers of α(τ) in G₁") - } - betaL1, betaL2 := linearCombinationG1(contribution.Parameters.G1.BetaTau) - if !sameRatio(betaL1, betaL2, contribution.Parameters.G2.Tau[1], g2) { - return errors.New("couldn't verify valid powers of α(τ) in G₁") - } - tau2L1, tau2L2 := linearCombinationG2(contribution.Parameters.G2.Tau) - if !sameRatio(contribution.Parameters.G1.Tau[1], g1, tau2L1, tau2L2) { - return errors.New("couldn't verify valid powers of τ in G₂") - } - - // Check hash of the contribution - h := contribution.hash() - for i := 0; i < len(h); i++ { - if h[i] != contribution.Hash[i] { - return errors.New("couldn't verify hash of contribution") - } - } - - return nil -} - -func (phase1 *Phase1) hash() []byte { sha := sha256.New() - phase1.writeTo(sha) + p.writeTo(sha) return sha.Sum(nil) } diff --git a/backend/groth16/bn254/mpcsetup/setup_test.go b/backend/groth16/bn254/mpcsetup/setup_test.go index 63b717cac4..797af8119b 100644 --- a/backend/groth16/bn254/mpcsetup/setup_test.go +++ b/backend/groth16/bn254/mpcsetup/setup_test.go @@ -168,17 +168,17 @@ func (circuit *Circuit) Define(api frontend.API) error { return nil } -func (phase1 *Phase1) clone() Phase1 { +func (p *Phase1) clone() Phase1 { r := Phase1{} - r.Parameters.G1.Tau = append(r.Parameters.G1.Tau, phase1.Parameters.G1.Tau...) - r.Parameters.G1.AlphaTau = append(r.Parameters.G1.AlphaTau, phase1.Parameters.G1.AlphaTau...) - r.Parameters.G1.BetaTau = append(r.Parameters.G1.BetaTau, phase1.Parameters.G1.BetaTau...) + r.Parameters.G1.Tau = append(r.Parameters.G1.Tau, p.Parameters.G1.Tau...) + r.Parameters.G1.AlphaTau = append(r.Parameters.G1.AlphaTau, p.Parameters.G1.AlphaTau...) + r.Parameters.G1.BetaTau = append(r.Parameters.G1.BetaTau, p.Parameters.G1.BetaTau...) - r.Parameters.G2.Tau = append(r.Parameters.G2.Tau, phase1.Parameters.G2.Tau...) - r.Parameters.G2.Beta = phase1.Parameters.G2.Beta + r.Parameters.G2.Tau = append(r.Parameters.G2.Tau, p.Parameters.G2.Tau...) + r.Parameters.G2.Beta = p.Parameters.G2.Beta - r.PublicKeys = phase1.PublicKeys - r.Hash = append(r.Hash, phase1.Hash...) + r.PublicKeys = p.PublicKeys + r.Hash = append(r.Hash, p.Hash...) return r } diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index 6a446e3236..c1a334b739 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -300,6 +300,15 @@ func (x *valueUpdate) verify(prevCommitment pair, challenge []byte, dst byte) er return nil } +// setEmpty does not provide proofs, only sets the value to [1] +func (x *valueUpdate) setEmpty(g1Only bool) { + _, _, g1, g2 := curve.Generators() + x.updatedCommitment.g1.Set(&g1) + if !g1Only { + x.updatedCommitment.g2 = &g2 + } +} + func toRefs[T any](s []T) []*T { res := make([]*T, len(s)) for i := range s { @@ -328,3 +337,21 @@ func areInSubGroupG2(s []curve.G2Affine) bool { func truncate[T any](s []T) []T { return s[:len(s)-1] } + +func eraseBigInts(i ...*big.Int) { + for _, i := range i { + if i != nil { + for j := range i.Bits() { + i.Bits()[j] = 0 + } + } + } +} + +func eraseFrVectors(v ...[]fr.Element) { + for _, v := range v { + for i := range v { + v[i].SetZero() + } + } +} From 4601ed7be8b5acdeff97deee173ff1bd380bca07 Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Mon, 14 Oct 2024 17:34:51 -0500 Subject: [PATCH 09/25] refactor initPhase1 --- .../groth16/bn254/mpcsetup/marshal_test.go | 2 +- backend/groth16/bn254/mpcsetup/phase1.go | 49 +++++++------------ backend/groth16/bn254/mpcsetup/setup_test.go | 8 +-- 3 files changed, 22 insertions(+), 37 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/marshal_test.go b/backend/groth16/bn254/mpcsetup/marshal_test.go index 386e3faf66..c8d7a6b004 100644 --- a/backend/groth16/bn254/mpcsetup/marshal_test.go +++ b/backend/groth16/bn254/mpcsetup/marshal_test.go @@ -34,7 +34,7 @@ func TestContributionSerialization(t *testing.T) { assert := require.New(t) // Phase 1 - srs1 := InitPhase1(9) + srs1 := NewPhase1(9) srs1.Contribute() assert.NoError(gnarkio.RoundTripCheck(&srs1, func() interface{} { return new(Phase1) })) diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index 1099d727c2..7eddd73e78 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -84,46 +84,31 @@ func (p *Phase1) Contribute() { p.Challenge = challenge } -// InitPhase1 initialize phase 1 of the MPC. This is called once by the coordinator before +// NewPhase1 initialize phase 1 of the MPC. This is called once by the coordinator before // any randomness contribution is made (see Contribute()). -func InitPhase1(power int) (phase1 Phase1) { +func NewPhase1(power int) (phase1 Phase1) { N := int(math.Pow(2, float64(power))) _, _, g1, g2 := curve.Generators() phase1.Challenge = []byte{0} - phase1.Principal.Alpha.setEmpty() - - // Generate key pairs - var tau, alpha, beta fr.Element - tau.SetOne() - alpha.SetOne() - beta.SetOne() - phase1.PublicKeys.Tau = newPublicKey(tau, nil, 1) - phase1.PublicKeys.Alpha = newPublicKey(alpha, nil, 2) - phase1.PublicKeys.Beta = newPublicKey(beta, nil, 3) - - // First contribution use generators - _, _, g1, g2 := curve.Generators() - phase1.Parameters.G2.Beta.Set(&g2) - phase1.Parameters.G1.Tau = make([]curve.G1Affine, 2*N-1) - phase1.Parameters.G2.Tau = make([]curve.G2Affine, N) - phase1.Parameters.G1.AlphaTau = make([]curve.G1Affine, N) - phase1.Parameters.G1.BetaTau = make([]curve.G1Affine, N) - for i := 0; i < len(phase1.Parameters.G1.Tau); i++ { - phase1.Parameters.G1.Tau[i].Set(&g1) - } - for i := 0; i < len(phase1.Parameters.G2.Tau); i++ { - phase1.Parameters.G2.Tau[i].Set(&g2) - phase1.Parameters.G1.AlphaTau[i].Set(&g1) - phase1.Parameters.G1.BetaTau[i].Set(&g1) + phase1.Principal.Alpha.setEmpty(true) + phase1.Principal.Beta.setEmpty(true) + phase1.Principal.Tau.setEmpty(false) + + phase1.G1Derived.Tau = make([]curve.G1Affine, 2*N-1) + phase1.G2Derived.Tau = make([]curve.G2Affine, N) + phase1.G1Derived.AlphaTau = make([]curve.G1Affine, N) + phase1.G1Derived.BetaTau = make([]curve.G1Affine, N) + for i := range phase1.G1Derived.Tau { + phase1.G1Derived.Tau[i].Set(&g1) + } + for i := range phase1.G2Derived.Tau { + phase1.G2Derived.Tau[i].Set(&g2) + phase1.G1Derived.AlphaTau[i].Set(&g1) + phase1.G1Derived.BetaTau[i].Set(&g1) } - phase1.Parameters.G2.Beta.Set(&g2) - - // Compute hash of Contribution - phase1.Hash = phase1.hash() - return } diff --git a/backend/groth16/bn254/mpcsetup/setup_test.go b/backend/groth16/bn254/mpcsetup/setup_test.go index 797af8119b..3689969c69 100644 --- a/backend/groth16/bn254/mpcsetup/setup_test.go +++ b/backend/groth16/bn254/mpcsetup/setup_test.go @@ -40,7 +40,7 @@ func TestSetupCircuit(t *testing.T) { assert := require.New(t) - srs1 := InitPhase1(power) + srs1 := NewPhase1(power) // Make and verify contributions for phase1 for i := 1; i < nContributionsPhase1; i++ { @@ -104,12 +104,12 @@ func BenchmarkPhase1(b *testing.B) { b.Run("init", func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _ = InitPhase1(power) + _ = NewPhase1(power) } }) b.Run("contrib", func(b *testing.B) { - srs1 := InitPhase1(power) + srs1 := NewPhase1(power) b.ResetTimer() for i := 0; i < b.N; i++ { srs1.Contribute() @@ -120,7 +120,7 @@ func BenchmarkPhase1(b *testing.B) { func BenchmarkPhase2(b *testing.B) { const power = 14 - srs1 := InitPhase1(power) + srs1 := NewPhase1(power) srs1.Contribute() var myCircuit Circuit From 9b82466305117a832de6fb3ee88981fd9683d24d Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Tue, 15 Oct 2024 15:45:13 -0500 Subject: [PATCH 10/25] feat phase1 marshal --- backend/groth16/bn254/mpcsetup/marshal.go | 98 ++++++++++--------- .../groth16/bn254/mpcsetup/marshal_test.go | 3 +- backend/groth16/bn254/mpcsetup/phase1.go | 51 +++++----- backend/groth16/bn254/mpcsetup/utils.go | 24 +---- 4 files changed, 82 insertions(+), 94 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/marshal.go b/backend/groth16/bn254/mpcsetup/marshal.go index 2985022bbd..34c7b95a23 100644 --- a/backend/groth16/bn254/mpcsetup/marshal.go +++ b/backend/groth16/bn254/mpcsetup/marshal.go @@ -21,36 +21,48 @@ import ( "io" ) -// WriteTo implements io.WriterTo -func (p *Phase1) WriteTo(writer io.Writer) (int64, error) { - n, err := p.writeTo(writer) - if err != nil { - return n, err +func appendToSlice[T any](s []interface{}, v []T) []interface{} { + for i := range v { + s = append(s, v[i]) } - nBytes, err := writer.Write(p.Hash) - return int64(nBytes) + n, err + return s } -func (p *Phase1) writeTo(writer io.Writer) (int64, error) { - toEncode := []interface{}{ - &p.PublicKeys.Tau.SG, - &p.PublicKeys.Tau.SXG, - &p.PublicKeys.Tau.XR, - &p.PublicKeys.Alpha.SG, - &p.PublicKeys.Alpha.SXG, - &p.PublicKeys.Alpha.XR, - &p.PublicKeys.Beta.SG, - &p.PublicKeys.Beta.SXG, - &p.PublicKeys.Beta.XR, - p.Parameters.G1.Tau, - p.Parameters.G1.AlphaTau, - p.Parameters.G1.BetaTau, - p.Parameters.G2.Tau, - &p.Parameters.G2.Beta, +func (p *Phase1) toSlice() []interface{} { + N := len(p.G2Derived.Tau) + estimatedNbElems := 5*N + 5 + // size N 1 + // commitment, proof of knowledge, and 𝔾₁ representation for τ, α, and β 9 + // 𝔾₂ representation for τ and β 2 + // [τⁱ]₁ for 2 ≤ i ≤ 2N-2 2N-3 + // [τⁱ]₂ for 2 ≤ i ≤ N-1 N-2 + // [ατⁱ]₁ for 1 ≤ i ≤ N-1 N-1 + // [βτⁱ]₁ for 1 ≤ i ≤ N-1 N-1 + + toEncode := make([]interface{}, 1, estimatedNbElems) + + toEncode[0] = N + toEncode = p.Principal.Tau.appendRefsToSlice(toEncode) + toEncode = p.Principal.Alpha.appendRefsToSlice(toEncode) + toEncode = p.Principal.Beta.appendRefsToSlice(toEncode) + + toEncode = appendToSlice(toEncode, p.G1Derived.Tau[2:]) + toEncode = appendToSlice(toEncode, p.G2Derived.Tau[2:]) + toEncode = appendToSlice(toEncode, p.G1Derived.BetaTau[1:]) + toEncode = appendToSlice(toEncode, p.G1Derived.AlphaTau[1:]) + + if len(toEncode) != estimatedNbElems { + panic("incorrect length estimate") } + return toEncode +} + +// WriteTo implements io.WriterTo +func (p *Phase1) WriteTo(writer io.Writer) (int64, error) { + enc := curve.NewEncoder(writer) - for _, v := range toEncode { + for _, v := range p.toSlice() { if err := enc.Encode(v); err != nil { return enc.BytesWritten(), err } @@ -60,32 +72,21 @@ func (p *Phase1) writeTo(writer io.Writer) (int64, error) { // ReadFrom implements io.ReaderFrom func (p *Phase1) ReadFrom(reader io.Reader) (int64, error) { - toEncode := []interface{}{ - &p.PublicKeys.Tau.SG, - &p.PublicKeys.Tau.SXG, - &p.PublicKeys.Tau.XR, - &p.PublicKeys.Alpha.SG, - &p.PublicKeys.Alpha.SXG, - &p.PublicKeys.Alpha.XR, - &p.PublicKeys.Beta.SG, - &p.PublicKeys.Beta.SXG, - &p.PublicKeys.Beta.XR, - &p.Parameters.G1.Tau, - &p.Parameters.G1.AlphaTau, - &p.Parameters.G1.BetaTau, - &p.Parameters.G2.Tau, - &p.Parameters.G2.Beta, + var N uint64 + dec := curve.NewDecoder(reader) + if err := dec.Decode(&N); err != nil { + return dec.BytesRead(), err } - dec := curve.NewDecoder(reader) - for _, v := range toEncode { + p.Initialize(N) + toDecode := p.toSlice() + + for _, v := range toDecode[1:] { // we've already decoded N if err := dec.Decode(v); err != nil { return dec.BytesRead(), err } } - p.Hash = make([]byte, 32) - nBytes, err := reader.Read(p.Hash) - return dec.BytesRead() + int64(nBytes), err + return dec.BytesRead(), nil } // WriteTo implements io.WriterTo @@ -179,3 +180,12 @@ func (c *Phase2Evaluations) ReadFrom(reader io.Reader) (int64, error) { return dec.BytesRead(), nil } + +// appendRefsToSlice appends references to values in x to s +func (x *valueUpdate) appendRefsToSlice(s []interface{}) []interface{} { + s = append(s, &x.contributionCommitment, &x.contributionPok, &x.updatedCommitment.g1) + if x.updatedCommitment.g2 != nil { + return append(s, x.updatedCommitment.g2) + } + return s +} diff --git a/backend/groth16/bn254/mpcsetup/marshal_test.go b/backend/groth16/bn254/mpcsetup/marshal_test.go index c8d7a6b004..9b69bb8e91 100644 --- a/backend/groth16/bn254/mpcsetup/marshal_test.go +++ b/backend/groth16/bn254/mpcsetup/marshal_test.go @@ -34,7 +34,8 @@ func TestContributionSerialization(t *testing.T) { assert := require.New(t) // Phase 1 - srs1 := NewPhase1(9) + var srs1 Phase1 + srs1.Initialize(1 << 9) srs1.Contribute() assert.NoError(gnarkio.RoundTripCheck(&srs1, func() interface{} { return new(Phase1) })) diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index 7eddd73e78..ba2c082902 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -23,7 +23,6 @@ import ( "fmt" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "math" "math/big" ) @@ -37,8 +36,8 @@ type Phase1 struct { } G1Derived struct { Tau []curve.G1Affine // {[τ⁰]₁, [τ¹]₁, [τ²]₁, …, [τ²ⁿ⁻²]₁} - AlphaTau []curve.G1Affine // {α[τ⁰]₁, α[τ¹]₁, α[τ²]₁, …, α[τⁿ⁻¹]₁} - BetaTau []curve.G1Affine // {β[τ⁰]₁, β[τ¹]₁, β[τ²]₁, …, β[τⁿ⁻¹]₁} + AlphaTau []curve.G1Affine // {[ατ⁰]₁, [ατ¹]₁, [ατ²]₁, …, [ατⁿ⁻¹]₁} + BetaTau []curve.G1Affine // {[βτ⁰]₁, [βτ¹]₁, [βτ²]₁, …, [βτⁿ⁻¹]₁} } G2Derived struct { Tau []curve.G2Affine // {[τ⁰]₂, [τ¹]₂, [τ²]₂, …, [τⁿ⁻¹]₂} @@ -84,29 +83,26 @@ func (p *Phase1) Contribute() { p.Challenge = challenge } -// NewPhase1 initialize phase 1 of the MPC. This is called once by the coordinator before -// any randomness contribution is made (see Contribute()). -func NewPhase1(power int) (phase1 Phase1) { - N := int(math.Pow(2, float64(power))) - +// Initialize an empty object of size N +func (p *Phase1) Initialize(N uint64) { _, _, g1, g2 := curve.Generators() - phase1.Challenge = []byte{0} - phase1.Principal.Alpha.setEmpty(true) - phase1.Principal.Beta.setEmpty(true) - phase1.Principal.Tau.setEmpty(false) + p.Challenge = []byte{0} + p.Principal.Alpha.setEmpty(true) + p.Principal.Beta.setEmpty(false) + p.Principal.Tau.setEmpty(false) - phase1.G1Derived.Tau = make([]curve.G1Affine, 2*N-1) - phase1.G2Derived.Tau = make([]curve.G2Affine, N) - phase1.G1Derived.AlphaTau = make([]curve.G1Affine, N) - phase1.G1Derived.BetaTau = make([]curve.G1Affine, N) - for i := range phase1.G1Derived.Tau { - phase1.G1Derived.Tau[i].Set(&g1) + p.G1Derived.Tau = make([]curve.G1Affine, 2*N-1) + p.G2Derived.Tau = make([]curve.G2Affine, N) + p.G1Derived.AlphaTau = make([]curve.G1Affine, N) + p.G1Derived.BetaTau = make([]curve.G1Affine, N) + for i := range p.G1Derived.Tau { + p.G1Derived.Tau[i].Set(&g1) } - for i := range phase1.G2Derived.Tau { - phase1.G2Derived.Tau[i].Set(&g2) - phase1.G1Derived.AlphaTau[i].Set(&g1) - phase1.G1Derived.BetaTau[i].Set(&g1) + for i := range p.G2Derived.Tau { + p.G2Derived.Tau[i].Set(&g2) + p.G1Derived.AlphaTau[i].Set(&g1) + p.G1Derived.BetaTau[i].Set(&g1) } return @@ -158,10 +154,10 @@ func (p *Phase1) Verify(previous *Phase1) error { r := linearCombCoeffs(len(p.G1Derived.Tau) - 1) // the longest of all lengths // will be reusing the coefficient TODO @Tabaie make sure that's okay - tauT1, tauS1 := linearCombinationsG1(r, p.G1Derived.Tau) - tauT2, tauS2 := linearCombinationsG2(r, p.G2Derived.Tau) - alphaTT, alphaTS := linearCombinationsG1(r, p.G1Derived.AlphaTau) - betaTT, betaTS := linearCombinationsG1(r, p.G1Derived.BetaTau) + tauT1, tauS1 := linearCombinationsG1(p.G1Derived.Tau[1:], r) + tauT2, tauS2 := linearCombinationsG2(p.G2Derived.Tau[1:], r) + alphaTT, alphaTS := linearCombinationsG1(p.G1Derived.AlphaTau, r) + betaTT, betaTS := linearCombinationsG1(p.G1Derived.BetaTau, r) if !sameRatioUnsafe(tauS1, tauT1, *p.Principal.Tau.updatedCommitment.g2, g2) { return errors.New("couldn't verify 𝔾₁ representations of the τⁱ") @@ -191,6 +187,7 @@ func (p *Phase1) hash() []byte { panic("challenge field missing") } sha := sha256.New() - p.writeTo(sha) + p.WriteTo(sha) + sha.Write(p.Challenge) return sha.Sum(nil) } diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index c1a334b739..f6ba644893 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -148,29 +148,9 @@ func merge(A, B []curve.G1Affine) (a, b curve.G1Affine) { return } -// truncated = ∑ rᵢAᵢ, shifted = ∑ rᵢAᵢ₊₁∈𝔾₁ -func linearCombinationG1(r []fr.Element, A []curve.G1Affine, nbTasks int) curve.G1Affine { - n := len(A) - r = r[:n-1] - var res curve.G1Affine - res.MultiExp(A[:n-1], r, ecc.MultiExpConfig{NbTasks: nc / 2}) - shifted.MultiExp(A[1:], r, ecc.MultiExpConfig{NbTasks: nc / 2}) - return -} - -// truncated = ∑ rᵢAᵢ, shifted = ∑ rᵢAᵢ₊₁∈𝔾₂ -func linearCombinationG2(r []fr.Element, A []curve.G2Affine) (truncated, shifted curve.G2Affine) { - nc := runtime.NumCPU() - n := len(A) - r = r[:n-1] - truncated.MultiExp(A[:n-1], r, ecc.MultiExpConfig{NbTasks: nc / 2}) - shifted.MultiExp(A[1:], r, ecc.MultiExpConfig{NbTasks: nc / 2}) - return -} - // linearCombinationsG1 assumes, and does not check, that rPowers[i+1] = rPowers[1].rPowers[i] for all applicable i // Also assumed that 3 ≤ N ≔ len(A) ≤ len(rPowers) -func linearCombinationsG1(rPowers []fr.Element, A []curve.G1Affine) (truncated, shifted curve.G1Affine) { +func linearCombinationsG1(A []curve.G1Affine, rPowers []fr.Element) (truncated, shifted curve.G1Affine) { // the common section, 1 to N-2 var common curve.G1Affine common.MultiExp(A[1:len(A)-1], rPowers[:len(A)-2], ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}) // A[1] + r.A[2] + ... + rᴺ⁻³.A[N-2] @@ -187,7 +167,7 @@ func linearCombinationsG1(rPowers []fr.Element, A []curve.G1Affine) (truncated, // linearCombinationsG2 assumes, and does not check, that rPowers[i+1] = rPowers[1].rPowers[i] for all applicable i // Also assumed that 3 ≤ N ≔ len(A) ≤ len(rPowers) -func linearCombinationsG2(rPowers []fr.Element, A []curve.G2Affine) (truncated, shifted curve.G2Affine) { +func linearCombinationsG2(A []curve.G2Affine, rPowers []fr.Element) (truncated, shifted curve.G2Affine) { // the common section, 1 to N-2 var common curve.G2Affine common.MultiExp(A[1:len(A)-1], rPowers[:len(A)-2], ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}) // A[1] + r.A[2] + ... + rᴺ⁻³.A[N-2] From c352cd9611025c88e65b3c4459b417ef04aac80b Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Fri, 18 Oct 2024 17:16:42 -0500 Subject: [PATCH 11/25] refactor "Parameters" --- backend/groth16/bn254/mpcsetup/marshal.go | 135 ++++++++----- backend/groth16/bn254/mpcsetup/phase1.go | 192 ++++++++++++------- backend/groth16/bn254/mpcsetup/phase2.go | 1 + backend/groth16/bn254/mpcsetup/setup_test.go | 16 +- backend/groth16/bn254/mpcsetup/utils.go | 96 +++++----- 5 files changed, 265 insertions(+), 175 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/marshal.go b/backend/groth16/bn254/mpcsetup/marshal.go index 34c7b95a23..8f249bbda6 100644 --- a/backend/groth16/bn254/mpcsetup/marshal.go +++ b/backend/groth16/bn254/mpcsetup/marshal.go @@ -21,72 +21,58 @@ import ( "io" ) -func appendToSlice[T any](s []interface{}, v []T) []interface{} { +func appendRefs[T any](s []interface{}, v []T) []interface{} { for i := range v { - s = append(s, v[i]) + s = append(s, &v[i]) } return s } -func (p *Phase1) toSlice() []interface{} { - N := len(p.G2Derived.Tau) - estimatedNbElems := 5*N + 5 - // size N 1 - // commitment, proof of knowledge, and 𝔾₁ representation for τ, α, and β 9 - // 𝔾₂ representation for τ and β 2 - // [τⁱ]₁ for 2 ≤ i ≤ 2N-2 2N-3 - // [τⁱ]₂ for 2 ≤ i ≤ N-1 N-2 - // [ατⁱ]₁ for 1 ≤ i ≤ N-1 N-1 - // [βτⁱ]₁ for 1 ≤ i ≤ N-1 N-1 - - toEncode := make([]interface{}, 1, estimatedNbElems) - - toEncode[0] = N - toEncode = p.Principal.Tau.appendRefsToSlice(toEncode) - toEncode = p.Principal.Alpha.appendRefsToSlice(toEncode) - toEncode = p.Principal.Beta.appendRefsToSlice(toEncode) - - toEncode = appendToSlice(toEncode, p.G1Derived.Tau[2:]) - toEncode = appendToSlice(toEncode, p.G2Derived.Tau[2:]) - toEncode = appendToSlice(toEncode, p.G1Derived.BetaTau[1:]) - toEncode = appendToSlice(toEncode, p.G1Derived.AlphaTau[1:]) - - if len(toEncode) != estimatedNbElems { - panic("incorrect length estimate") +// proofRefsSlice produces a slice consisting of references to all proof sub-elements +// prepended by the size parameter, to be used in WriteTo and ReadFrom functions +func (p *Phase1) proofRefsSlice() []interface{} { + return []interface{}{ + &p.proofs.Tau.contributionCommitment, + &p.proofs.Tau.contributionPok, + &p.proofs.Alpha.contributionCommitment, + &p.proofs.Alpha.contributionPok, + &p.proofs.Beta.contributionCommitment, + &p.proofs.Beta.contributionPok, } - - return toEncode } // WriteTo implements io.WriterTo -func (p *Phase1) WriteTo(writer io.Writer) (int64, error) { +// It does not write the Challenge from the previous contribution +func (p *Phase1) WriteTo(writer io.Writer) (n int64, err error) { + + if n, err = p.parameters.WriteTo(writer); err != nil { + return + } enc := curve.NewEncoder(writer) - for _, v := range p.toSlice() { - if err := enc.Encode(v); err != nil { - return enc.BytesWritten(), err + for _, v := range p.proofRefsSlice() { + if err = enc.Encode(v); err != nil { + return n + enc.BytesWritten(), err } } - return enc.BytesWritten(), nil + return n + enc.BytesWritten(), nil } // ReadFrom implements io.ReaderFrom -func (p *Phase1) ReadFrom(reader io.Reader) (int64, error) { - var N uint64 - dec := curve.NewDecoder(reader) - if err := dec.Decode(&N); err != nil { - return dec.BytesRead(), err - } +// It does not read the Challenge from the previous contribution +func (p *Phase1) ReadFrom(reader io.Reader) (n int64, err error) { - p.Initialize(N) - toDecode := p.toSlice() + if n, err = p.parameters.ReadFrom(reader); err != nil { + return + } - for _, v := range toDecode[1:] { // we've already decoded N - if err := dec.Decode(v); err != nil { - return dec.BytesRead(), err + dec := curve.NewDecoder(reader) + for _, v := range p.proofRefsSlice() { // we've already decoded N + if err = dec.Decode(v); err != nil { + return n + dec.BytesRead(), err } } - return dec.BytesRead(), nil + return n + dec.BytesRead(), nil } // WriteTo implements io.WriterTo @@ -181,11 +167,56 @@ func (c *Phase2Evaluations) ReadFrom(reader io.Reader) (int64, error) { return dec.BytesRead(), nil } -// appendRefsToSlice appends references to values in x to s -func (x *valueUpdate) appendRefsToSlice(s []interface{}) []interface{} { - s = append(s, &x.contributionCommitment, &x.contributionPok, &x.updatedCommitment.g1) - if x.updatedCommitment.g2 != nil { - return append(s, x.updatedCommitment.g2) +// refsSlice produces a slice consisting of references to all sub-elements +// prepended by the size parameter, to be used in WriteTo and ReadFrom functions +func (c *SrsCommons) refsSlice() []interface{} { + N := len(c.G2.Tau) + estimatedNbElems := 5*N - 1 + // size N 1 + // 𝔾₂ representation for β 1 + // [τⁱ]₁ for 1 ≤ i ≤ 2N-2 2N-2 + // [τⁱ]₂ for 1 ≤ i ≤ N-1 N-1 + // [ατⁱ]₁ for 0 ≤ i ≤ N-1 N + // [βτⁱ]₁ for 0 ≤ i ≤ N-1 N + refs := make([]interface{}, 1, estimatedNbElems) + refs[0] = N + + refs = appendRefs(refs, c.G1.Tau[1:]) + refs = appendRefs(refs, c.G2.Tau[1:]) + refs = appendRefs(refs, c.G1.BetaTau) + refs = appendRefs(refs, c.G1.AlphaTau) + + if len(refs) != estimatedNbElems { + panic("incorrect length estimate") } - return s + + return refs +} + +func (c *SrsCommons) WriteTo(writer io.Writer) (int64, error) { + enc := curve.NewEncoder(writer) + for _, v := range c.refsSlice() { + if err := enc.Encode(v); err != nil { + return enc.BytesWritten(), err + } + } + return enc.BytesWritten(), nil +} + +// ReadFrom implements io.ReaderFrom +func (c *SrsCommons) ReadFrom(reader io.Reader) (n int64, err error) { + var N uint64 + dec := curve.NewDecoder(reader) + if err = dec.Decode(&N); err != nil { + return dec.BytesRead(), err + } + + c.setZero(N) + + for _, v := range c.refsSlice()[1:] { // we've already decoded N + if err = dec.Decode(v); err != nil { + return dec.BytesRead(), err + } + } + return dec.BytesRead(), nil } diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index ba2c082902..4b8df877e7 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -1,4 +1,4 @@ -// Copyright 2020 ConsenSys Software Inc. +// Copyright 2020 Consensys Software Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -31,81 +31,126 @@ import ( // // Also known as "Powers of Tau" type Phase1 struct { - Principal struct { // "main" contributions + proofs struct { // "main" contributions Tau, Alpha, Beta valueUpdate } - G1Derived struct { - Tau []curve.G1Affine // {[τ⁰]₁, [τ¹]₁, [τ²]₁, …, [τ²ⁿ⁻²]₁} - AlphaTau []curve.G1Affine // {[ατ⁰]₁, [ατ¹]₁, [ατ²]₁, …, [ατⁿ⁻¹]₁} - BetaTau []curve.G1Affine // {[βτ⁰]₁, [βτ¹]₁, [βτ²]₁, …, [βτⁿ⁻¹]₁} - } - G2Derived struct { - Tau []curve.G2Affine // {[τ⁰]₂, [τ¹]₂, [τ²]₂, …, [τⁿ⁻¹]₂} - } - Challenge []byte // Hash of the transcript PRIOR to this participant + parameters SrsCommons + Challenge []byte // Hash of the transcript PRIOR to this participant } // Contribute contributes randomness to the Phase1 object. This mutates Phase1. // p is trusted to be well-formed. The ReadFrom function performs such basic sanity checks. func (p *Phase1) Contribute() { - N := len(p.G2Derived.Tau) - challenge := p.hash() + p.Challenge = p.hash() // Generate main value updates - var tau, alpha, beta *big.Int - p.Principal.Tau, tau = updateValue(p.Principal.Tau.updatedCommitment, challenge, 1) - p.Principal.Alpha, alpha = updateValue(p.Principal.Alpha.updatedCommitment, challenge, 2) - p.Principal.Beta, beta = updateValue(p.Principal.Beta.updatedCommitment, challenge, 3) + var ( + tauContrib, alphaContrib, betaContrib fr.Element + ) + p.proofs.Tau, p.parameters.G1.Tau[1], tauContrib = updateValue(p.parameters.G1.Tau[1], p.Challenge, 1) + p.proofs.Alpha, p.parameters.G1.AlphaTau[0], alphaContrib = updateValue(p.parameters.G1.AlphaTau[0], p.Challenge, 2) + p.proofs.Beta, p.parameters.G1.BetaTau[0], betaContrib = updateValue(p.parameters.G1.BetaTau[0], p.Challenge, 3) + + p.parameters.update(&tauContrib, &alphaContrib, &betaContrib, true) +} - defer eraseBigInts(tau, alpha, beta) +// SrsCommons are the circuit-independent components of the Groth16 SRS, +// computed by the first phase. +type SrsCommons struct { + G1 struct { + Tau []curve.G1Affine // {[τ⁰]₁, [τ¹]₁, [τ²]₁, …, [τ²ⁿ⁻²]₁} + AlphaTau []curve.G1Affine // {α[τ⁰]₁, α[τ¹]₁, α[τ²]₁, …, α[τⁿ⁻¹]₁} + BetaTau []curve.G1Affine // {β[τ⁰]₁, β[τ¹]₁, β[τ²]₁, …, β[τⁿ⁻¹]₁} + } + G2 struct { + Tau []curve.G2Affine // {[τ⁰]₂, [τ¹]₂, [τ²]₂, …, [τⁿ⁻¹]₂} + Beta curve.G2Affine // [β]₂ + } +} - // Compute τ, ατ, and βτ - taus := powers(tau, 2*N-1) - alphaTau := make([]fr.Element, N) - betaTau := make([]fr.Element, N) +// setZero instantiates the parameters, and sets all contributions to zero +func (c *SrsCommons) setZero(N uint64) { + c.G1.Tau = make([]curve.G1Affine, 2*N-2) + c.G2.Tau = make([]curve.G2Affine, N) + c.G1.AlphaTau = make([]curve.G1Affine, N) + c.G1.BetaTau = make([]curve.G1Affine, N) + _, _, c.G1.Tau[0], c.G2.Tau[0] = curve.Generators() +} - defer eraseFrVectors(taus, alphaTau, betaTau) +// setOne instantiates the parameters, and sets all contributions to one +func (c *SrsCommons) setOne(N uint64) { + c.setZero(N) + for i := range c.G1.Tau { + c.G1.Tau[i] = c.G1.Tau[0] + } + for i := range c.G1.AlphaTau { + c.G1.AlphaTau[i] = c.G1.AlphaTau[0] + c.G1.BetaTau[i] = c.G1.AlphaTau[0] + c.G2.Tau[i] = c.G2.Tau[0] + } + c.G2.Beta = c.G2.Tau[0] +} - alphaTau[0].SetBigInt(alpha) - betaTau[0].SetBigInt(beta) - for i := 1; i < N; i++ { - alphaTau[i].Mul(&taus[i], &alphaTau[0]) - betaTau[i].Mul(&taus[i], &betaTau[0]) +// from the fourth argument on this just gives an opportunity to avoid recomputing some scalar multiplications +func (c *SrsCommons) update(tauUpdate, alphaUpdate, betaUpdate *fr.Element, principalG1sPrecomputed bool) { + i0 := 0 + if principalG1sPrecomputed { + i0 = 1 } - // Update using previous parameters // TODO @gbotrel working with jacobian points here will help with perf. - scaleG1InPlace(p.G1Derived.Tau, taus) - scaleG2InPlace(p.G2Derived.Tau, taus[0:N]) - scaleG1InPlace(p.G1Derived.AlphaTau, alphaTau) - scaleG1InPlace(p.G1Derived.BetaTau, betaTau) - - p.Challenge = challenge -} -// Initialize an empty object of size N -func (p *Phase1) Initialize(N uint64) { - _, _, g1, g2 := curve.Generators() + tauUpdates := powers(tauUpdate, len(c.G1.Tau)) + // saving 3 exactly scalar muls among millions. Not a significant gain but might as well. + scaleG1InPlace(c.G1.Tau[i0+1:], tauUpdates[i0+1:]) // first element remains 1. second element may have been precomputed. + scaleG2InPlace(c.G2.Tau[1:], tauUpdates[1:]) - p.Challenge = []byte{0} - p.Principal.Alpha.setEmpty(true) - p.Principal.Beta.setEmpty(false) - p.Principal.Tau.setEmpty(false) + alphaUpdates := make([]fr.Element, len(c.G1.AlphaTau)) + alphaUpdates[0].Set(alphaUpdate) + for i := i0; i < len(alphaUpdates); i++ { + alphaUpdates[i].Mul(&tauUpdates[i], &alphaUpdates[1]) + } + scaleG1InPlace(c.G1.AlphaTau[i0:], alphaUpdates[i0:]) // first element may have been precomputed - p.G1Derived.Tau = make([]curve.G1Affine, 2*N-1) - p.G2Derived.Tau = make([]curve.G2Affine, N) - p.G1Derived.AlphaTau = make([]curve.G1Affine, N) - p.G1Derived.BetaTau = make([]curve.G1Affine, N) - for i := range p.G1Derived.Tau { - p.G1Derived.Tau[i].Set(&g1) + betaUpdates := make([]fr.Element, len(c.G1.BetaTau)) + betaUpdates[0].Set(betaUpdate) + for i := i0; i < len(betaUpdates); i++ { + alphaUpdates[i].Mul(&tauUpdates[i], &betaUpdates[1]) } - for i := range p.G2Derived.Tau { - p.G2Derived.Tau[i].Set(&g2) - p.G1Derived.AlphaTau[i].Set(&g1) - p.G1Derived.BetaTau[i].Set(&g1) + scaleG1InPlace(c.G1.BetaTau[i0:], betaUpdates[i0:]) + + var betaUpdateI big.Int + betaUpdate.SetBigInt(&betaUpdateI) + c.G2.Beta.ScalarMultiplication(&c.G2.Beta, &betaUpdateI) +} + +// Seal performs the final contribution and outputs the final parameters. +// No randomization is performed at this step. +// A verifier should simply re-run this and check +// that it produces the same values. +// The inner workings of the random beacon are out of scope. +// WARNING: Seal modifies p, just as Contribute does. +// The result will be an INVALID Phase1 object. +func (p *Phase1) Seal(beaconChallenge []byte) SrsCommons { + var ( + bb bytes.Buffer + err error + ) + bb.Write(p.hash()) + bb.Write(beaconChallenge) + + newContribs := make([]fr.Element, 3) + // cryptographically unlikely for this to be run more than once + for newContribs[0].IsZero() || newContribs[1].IsZero() || newContribs[2].IsZero() { + if newContribs, err = fr.Hash(bb.Bytes(), []byte("Groth16 SRS generation ceremony - Phase 1 Final Step"), 3); err != nil { + panic(err) + } + bb.WriteByte('=') // padding just so that the hash is different next time } - return + p.parameters.update(&newContribs[0], &newContribs[1], &newContribs[2], false) + + return p.parameters } func VerifyPhase1(c0, c1 *Phase1, c ...*Phase1) error { @@ -128,20 +173,29 @@ func (p *Phase1) Verify(previous *Phase1) error { p.Challenge = prevHash } - if err := p.Principal.Tau.verify(previous.Principal.Tau.updatedCommitment, p.Challenge, 1); err != nil { + if err := p.proofs.Tau.verify( + pair{previous.parameters.G1.Tau[1], &previous.parameters.G2.Tau[1]}, + pair{p.parameters.G1.Tau[1], &p.parameters.G2.Tau[1]}, + p.Challenge, 1); err != nil { return fmt.Errorf("failed to verify contribution to τ: %w", err) } - if err := p.Principal.Alpha.verify(previous.Principal.Alpha.updatedCommitment, p.Challenge, 2); err != nil { + if err := p.proofs.Alpha.verify( + pair{previous.parameters.G1.AlphaTau[0], nil}, + pair{p.parameters.G1.AlphaTau[0], nil}, + p.Challenge, 2); err != nil { return fmt.Errorf("failed to verify contribution to α: %w", err) } - if err := p.Principal.Beta.verify(previous.Principal.Beta.updatedCommitment, p.Challenge, 3); err != nil { + if err := p.proofs.Beta.verify( + pair{previous.parameters.G1.BetaTau[0], &previous.parameters.G2.Beta}, + pair{p.parameters.G1.BetaTau[0], &p.parameters.G2.Beta}, + p.Challenge, 3); err != nil { return fmt.Errorf("failed to verify contribution to β: %w", err) } - if !areInSubGroupG1(p.G1Derived.Tau) || !areInSubGroupG1(p.G1Derived.BetaTau) || !areInSubGroupG1(p.G1Derived.AlphaTau) { + if !areInSubGroupG1(p.parameters.G1.Tau[2:]) || !areInSubGroupG1(p.parameters.G1.BetaTau[1:]) || !areInSubGroupG1(p.parameters.G1.AlphaTau[1:]) { return errors.New("derived values 𝔾₁ subgroup check failed") } - if !areInSubGroupG2(p.G2Derived.Tau) { + if !areInSubGroupG2(p.parameters.G2.Tau[2:]) { return errors.New("derived values 𝔾₂ subgroup check failed") } @@ -151,19 +205,19 @@ func (p *Phase1) Verify(previous *Phase1) error { // i.e. e(τⁱ⁺¹,[1]₂) = e(τⁱ,[τ]₂). Due to bi-linearity we can instead check // e(∑rⁱ⁻¹τⁱ⁺¹,[1]₂) = e(∑rⁱ⁻¹τⁱ,[τ]₂), which is tantamount to the check // ∑rⁱ⁻¹τⁱ⁺¹ / ∑rⁱ⁻¹τⁱ = τ - r := linearCombCoeffs(len(p.G1Derived.Tau) - 1) // the longest of all lengths + r := linearCombCoeffs(len(p.parameters.G1.Tau) - 1) // the longest of all lengths // will be reusing the coefficient TODO @Tabaie make sure that's okay - tauT1, tauS1 := linearCombinationsG1(p.G1Derived.Tau[1:], r) - tauT2, tauS2 := linearCombinationsG2(p.G2Derived.Tau[1:], r) - alphaTT, alphaTS := linearCombinationsG1(p.G1Derived.AlphaTau, r) - betaTT, betaTS := linearCombinationsG1(p.G1Derived.BetaTau, r) + tauT1, tauS1 := linearCombinationsG1(p.parameters.G1.Tau[1:], r) + tauT2, tauS2 := linearCombinationsG2(p.parameters.G2.Tau[1:], r) + alphaTT, alphaTS := linearCombinationsG1(p.parameters.G1.AlphaTau, r) + betaTT, betaTS := linearCombinationsG1(p.parameters.G1.BetaTau, r) - if !sameRatioUnsafe(tauS1, tauT1, *p.Principal.Tau.updatedCommitment.g2, g2) { + if !sameRatioUnsafe(tauS1, tauT1, p.parameters.G2.Tau[1], g2) { return errors.New("couldn't verify 𝔾₁ representations of the τⁱ") } - if !sameRatioUnsafe(p.Principal.Tau.updatedCommitment.g1, g1, tauS2, tauT2) { + if !sameRatioUnsafe(p.parameters.G1.Tau[1], g1, tauS2, tauT2) { return errors.New("couldn't verify 𝔾₂ representations of the τⁱ") } @@ -172,13 +226,15 @@ func (p *Phase1) Verify(previous *Phase1) error { // For 0 < i < N we check that ατⁱ/ατⁱ⁻¹ = τ, since we have a representation of τ in 𝔾₂ // with a similar bi-linearity argument as above we can do this with a single pairing check - if !sameRatioUnsafe(alphaTS, alphaTT, *p.Principal.Tau.updatedCommitment.g2, g2) { + if !sameRatioUnsafe(alphaTS, alphaTT, p.parameters.G2.Tau[1], g2) { return errors.New("couldn't verify the ατⁱ") } - if !sameRatioUnsafe(betaTS, betaTT, *p.Principal.Tau.updatedCommitment.g2, g2) { + if !sameRatioUnsafe(betaTS, betaTT, p.parameters.G2.Tau[1], g2) { return errors.New("couldn't verify the βτⁱ") } + // TODO @Tabaie combine all pairing checks except the second one + return nil } diff --git a/backend/groth16/bn254/mpcsetup/phase2.go b/backend/groth16/bn254/mpcsetup/phase2.go index 3fcafb30da..058489e28f 100644 --- a/backend/groth16/bn254/mpcsetup/phase2.go +++ b/backend/groth16/bn254/mpcsetup/phase2.go @@ -51,6 +51,7 @@ type Phase2 struct { } func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { + srs := srs1.Parameters size := len(srs.G1.AlphaTau) if size < r1cs.GetNbConstraints() { diff --git a/backend/groth16/bn254/mpcsetup/setup_test.go b/backend/groth16/bn254/mpcsetup/setup_test.go index 3689969c69..cb71d9ea6a 100644 --- a/backend/groth16/bn254/mpcsetup/setup_test.go +++ b/backend/groth16/bn254/mpcsetup/setup_test.go @@ -40,12 +40,13 @@ func TestSetupCircuit(t *testing.T) { assert := require.New(t) - srs1 := NewPhase1(power) + var srs1 Phase1 + srs1.Initialize(1 << power) // Make and verify contributions for phase1 for i := 1; i < nContributionsPhase1; i++ { // we clone test purposes; but in practice, participant will receive a []byte, deserialize it, - // add his contribution and send back to coordinator. + // add its contribution and send back to coordinator. prev := srs1.clone() srs1.Contribute() @@ -66,7 +67,7 @@ func TestSetupCircuit(t *testing.T) { // Make and verify contributions for phase1 for i := 1; i < nContributionsPhase2; i++ { // we clone for test purposes; but in practice, participant will receive a []byte, deserialize it, - // add his contribution and send back to coordinator. + // add its contribution and send back to coordinator. prev := srs2.clone() srs2.Contribute() @@ -103,13 +104,15 @@ func BenchmarkPhase1(b *testing.B) { b.Run("init", func(b *testing.B) { b.ResetTimer() + var srs1 Phase1 for i := 0; i < b.N; i++ { - _ = NewPhase1(power) + srs1.Initialize(1 << power) } }) b.Run("contrib", func(b *testing.B) { - srs1 := NewPhase1(power) + var srs1 Phase1 + srs1.Initialize(1 << power) b.ResetTimer() for i := 0; i < b.N; i++ { srs1.Contribute() @@ -120,7 +123,8 @@ func BenchmarkPhase1(b *testing.B) { func BenchmarkPhase2(b *testing.B) { const power = 14 - srs1 := NewPhase1(power) + var srs1 Phase1 + srs1.Initialize(1 << power) srs1.Contribute() var myCircuit Circuit diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index f6ba644893..a868b43f68 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -18,7 +18,6 @@ package mpcsetup import ( "bytes" - "crypto/rand" "errors" "math/big" "math/bits" @@ -73,27 +72,42 @@ func bitReverse[T any](a []T) { } func linearCombCoeffs(n int) []fr.Element { - a, err := rand.Int(rand.Reader, fr.Modulus()) - if err != nil { + var a fr.Element + if _, err := a.SetRandom(); err != nil { panic(err) } - return powers(a, n) + return powers(&a, n) } -// Returns [1, a, a², ..., aⁿ⁻¹ ] -func powers(a *big.Int, n int) []fr.Element { +func powersI(a *big.Int, n int) []fr.Element { var aMont fr.Element aMont.SetBigInt(a) + return powers(&aMont, n) +} + +// Returns [1, a, a², ..., aⁿ⁻¹ ] +func powers(a *fr.Element, n int) []fr.Element { + result := make([]fr.Element, n) - result[0] = fr.NewElement(1) - for i := 1; i < n; i++ { - result[i].Mul(&result[i-1], &aMont) + if n >= 1 { + result[0] = fr.NewElement(1) + } + if n >= 2 { + result[1].Set(a) + } + for i := 2; i < n; i++ { + result[i].Mul(&result[i-1], a) } return result } // Returns [aᵢAᵢ, ...]∈𝔾₁ +// it assumes len(A) ≤ len(a) func scaleG1InPlace(A []curve.G1Affine, a []fr.Element) { + /*if a[0].IsOne() { + A = A[1:] + a = a[1:] + }*/ utils.Parallelize(len(A), func(start, end int) { var tmp big.Int for i := start; i < end; i++ { @@ -104,7 +118,12 @@ func scaleG1InPlace(A []curve.G1Affine, a []fr.Element) { } // Returns [aᵢAᵢ, ...]∈𝔾₂ +// it assumes len(A) ≤ len(a) func scaleG2InPlace(A []curve.G2Affine, a []fr.Element) { + /*if a[0].IsOne() { + A = A[1:] + a = a[1:] + }*/ utils.Parallelize(len(A), func(start, end int) { var tmp big.Int for i := start; i < end; i++ { @@ -217,69 +236,66 @@ func (p *pair) validUpdate() bool { type valueUpdate struct { contributionCommitment curve.G1Affine // x or [Xⱼ]₁ contributionPok curve.G2Affine // π ≔ x.r ∈ 𝔾₂ - updatedCommitment pair // [X₁..Xⱼ] + //updatedCommitment pair // [X₁..Xⱼ] } // updateValue produces values associated with contribution to an existing value. // if prevCommitment contains only a 𝔾₁ value, then so will updatedCommitment // the second output is toxic waste. It is the caller's responsibility to safely "dispose" of it. -func updateValue(prevCommitment pair, challenge []byte, dst byte) (valueUpdate, *big.Int) { - var x valueUpdate - contributionValue, err := rand.Int(rand.Reader, fr.Modulus()) - - if err != nil { +func updateValue(prev curve.G1Affine, challenge []byte, dst byte) (proof valueUpdate, updated curve.G1Affine, contributionValue fr.Element) { + if _, err := contributionValue.SetRandom(); err != nil { panic(err) } + var contributionValueI big.Int + contributionValue.BigInt(&contributionValueI) _, _, g1, _ := curve.Generators() - x.contributionCommitment.ScalarMultiplication(&g1, contributionValue) - x.updatedCommitment.g1.ScalarMultiplication(&prevCommitment.g1, contributionValue) - if prevCommitment.g2 != nil { // TODO make sure this is correct - x.updatedCommitment.g2 = new(curve.G2Affine).ScalarMultiplication(prevCommitment.g2, contributionValue) - } + proof.contributionCommitment.ScalarMultiplication(&g1, &contributionValueI) + updated.ScalarMultiplication(&prev, &contributionValueI) // proof of knowledge to commitment. Algorithm 3 from section 3.7 - pokBase := genR(x.contributionCommitment, x.updatedCommitment.g1, challenge, dst) // r - x.contributionPok.ScalarMultiplication(&pokBase, contributionValue) + pokBase := genR(proof.contributionCommitment, updated, challenge, dst) // r + proof.contributionPok.ScalarMultiplication(&pokBase, &contributionValueI) - return x, contributionValue + return } // verify corresponds with verification steps {i, i+3} with 1 ≤ i ≤ 3 in section 7.1 of Bowe-Gabizon17 // it checks the proof of knowledge of the contribution, and the fact that the product of the contribution // and previous commitment makes the new commitment. // prevCommitment is assumed to be valid. No subgroup check and the like. -func (x *valueUpdate) verify(prevCommitment pair, challenge []byte, dst byte) error { - noG2 := prevCommitment.g2 == nil - if noG2 != (x.updatedCommitment.g2 == nil) { +func (x *valueUpdate) verify(prev, updated pair, challenge []byte, dst byte) error { + noG2 := prev.g2 == nil + if noG2 != (updated.g2 == nil) { return errors.New("erasing or creating g2 values") } - if !x.contributionPok.IsInSubGroup() || !x.contributionCommitment.IsInSubGroup() || !x.updatedCommitment.validUpdate() { + if !x.contributionPok.IsInSubGroup() || !x.contributionCommitment.IsInSubGroup() || !updated.validUpdate() { return errors.New("contribution values subgroup check failed") } // verify commitment proof of knowledge. CheckPOK, algorithm 4 from section 3.7 - r := genR(x.contributionCommitment, x.updatedCommitment.g1, challenge, dst) // verification challenge in the form of a g2 base + r := genR(x.contributionCommitment, updated.g1, challenge, dst) // verification challenge in the form of a g2 base _, _, g1, _ := curve.Generators() if !sameRatioUnsafe(x.contributionCommitment, g1, x.contributionPok, r) { // π =? x.r i.e. x/g1 =? π/r return errors.New("contribution proof of knowledge verification failed") } // check that the updated/previous ratio is consistent between the 𝔾₁ and 𝔾₂ representations. Based on CONSISTENT, algorithm 2 in Section 3.6. - if !noG2 && !sameRatioUnsafe(x.updatedCommitment.g1, prevCommitment.g1, *x.updatedCommitment.g2, *prevCommitment.g2) { + if !noG2 && !sameRatioUnsafe(updated.g1, prev.g1, *updated.g2, *prev.g2) { return errors.New("g2 update inconsistent") } // now verify that updated₁/previous₁ = x ( = x/g1 = π/r ) // have to use the latter value for the RHS because we sameRatio needs both 𝔾₁ and 𝔾₂ values - if !sameRatioUnsafe(x.updatedCommitment.g1, prevCommitment.g1, x.contributionPok, r) { + if !sameRatioUnsafe(updated.g1, prev.g1, x.contributionPok, r) { return errors.New("g1 update inconsistent") } return nil } +/* // setEmpty does not provide proofs, only sets the value to [1] func (x *valueUpdate) setEmpty(g1Only bool) { _, _, g1, g2 := curve.Generators() @@ -287,7 +303,7 @@ func (x *valueUpdate) setEmpty(g1Only bool) { if !g1Only { x.updatedCommitment.g2 = &g2 } -} +}*/ func toRefs[T any](s []T) []*T { res := make([]*T, len(s)) @@ -317,21 +333,3 @@ func areInSubGroupG2(s []curve.G2Affine) bool { func truncate[T any](s []T) []T { return s[:len(s)-1] } - -func eraseBigInts(i ...*big.Int) { - for _, i := range i { - if i != nil { - for j := range i.Bits() { - i.Bits()[j] = 0 - } - } - } -} - -func eraseFrVectors(v ...[]fr.Element) { - for _, v := range v { - for i := range v { - v[i].SetZero() - } - } -} From 90f4ed78a18b02506cf9421a507c1c8d594ef7df Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Wed, 23 Oct 2024 16:11:56 -0500 Subject: [PATCH 12/25] chore some comments and name changes --- backend/groth16/bn254/mpcsetup/lagrange.go | 4 +- backend/groth16/bn254/mpcsetup/marshal.go | 4 +- backend/groth16/bn254/mpcsetup/phase2.go | 66 ++++++++++++-------- backend/groth16/bn254/mpcsetup/setup.go | 2 +- backend/groth16/bn254/mpcsetup/setup_test.go | 2 +- 5 files changed, 46 insertions(+), 32 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/lagrange.go b/backend/groth16/bn254/mpcsetup/lagrange.go index 886e489248..54bb692ed1 100644 --- a/backend/groth16/bn254/mpcsetup/lagrange.go +++ b/backend/groth16/bn254/mpcsetup/lagrange.go @@ -86,7 +86,7 @@ func butterflyG2(a *curve.G2Affine, b *curve.G2Affine) { b.Sub(&t, b) } -// kerDIF8 is a kernel that process a FFT of size 8 +// kerDIF8 is a kernel that processes an FFT of size 8 func kerDIF8G1(a []curve.G1Affine, twiddles [][]fr.Element, stage int) { butterflyG1(&a[0], &a[4]) butterflyG1(&a[1], &a[5]) @@ -114,7 +114,7 @@ func kerDIF8G1(a []curve.G1Affine, twiddles [][]fr.Element, stage int) { butterflyG1(&a[6], &a[7]) } -// kerDIF8 is a kernel that process a FFT of size 8 +// kerDIF8 is a kernel that processes an FFT of size 8 func kerDIF8G2(a []curve.G2Affine, twiddles [][]fr.Element, stage int) { butterflyG2(&a[0], &a[4]) butterflyG2(&a[1], &a[5]) diff --git a/backend/groth16/bn254/mpcsetup/marshal.go b/backend/groth16/bn254/mpcsetup/marshal.go index 8f249bbda6..b82c8a44ac 100644 --- a/backend/groth16/bn254/mpcsetup/marshal.go +++ b/backend/groth16/bn254/mpcsetup/marshal.go @@ -92,7 +92,7 @@ func (c *Phase2) writeTo(writer io.Writer) (int64, error) { &c.PublicKey.SXG, &c.PublicKey.XR, &c.Parameters.G1.Delta, - c.Parameters.G1.L, + c.Parameters.G1.PKK, c.Parameters.G1.Z, &c.Parameters.G2.Delta, } @@ -114,7 +114,7 @@ func (c *Phase2) ReadFrom(reader io.Reader) (int64, error) { &c.PublicKey.SXG, &c.PublicKey.XR, &c.Parameters.G1.Delta, - &c.Parameters.G1.L, + &c.Parameters.G1.PKK, &c.Parameters.G1.Z, &c.Parameters.G2.Delta, } diff --git a/backend/groth16/bn254/mpcsetup/phase2.go b/backend/groth16/bn254/mpcsetup/phase2.go index 058489e28f..65f12402d2 100644 --- a/backend/groth16/bn254/mpcsetup/phase2.go +++ b/backend/groth16/bn254/mpcsetup/phase2.go @@ -19,6 +19,7 @@ package mpcsetup import ( "crypto/sha256" "errors" + "github.com/consensys/gnark-crypto/ecc/bn254/fr/pedersen" "math/big" curve "github.com/consensys/gnark-crypto/ecc/bn254" @@ -29,36 +30,47 @@ import ( type Phase2Evaluations struct { G1 struct { - A, B, VKK []curve.G1Affine + A []curve.G1Affine // A are the left coefficient polynomials for each witness element, evaluated at τ + B []curve.G1Affine // B are the right coefficient polynomials for each witness element, evaluated at τ + VKK []curve.G1Affine // VKK are the coefficients of the public witness } G2 struct { - B []curve.G2Affine + B []curve.G2Affine // B are the right coefficient polynomials for each witness element, evaluated at τ } } type Phase2 struct { Parameters struct { G1 struct { - Delta curve.G1Affine - L, Z []curve.G1Affine + Delta curve.G1Affine + PKK, Z []curve.G1Affine // Z is the domain vanishing polynomial } G2 struct { Delta curve.G2Affine } + CommitmentKeys pedersen.ProvingKey } - PublicKey PublicKey + Sigmas []valueUpdate // commitment key secrets + PublicKey PublicKey // commitment to delta Hash []byte } +// Init is to be run by the coordinator +// It involves no coin tosses. A verifier should +// simply rerun all the steps +func (p *Phase2) Init(commons SrsCommons) { + +} + func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { - srs := srs1.Parameters + srs := srs1.parameters size := len(srs.G1.AlphaTau) if size < r1cs.GetNbConstraints() { panic("Number of constraints is larger than expected") } - c2 := Phase2{} + var c2 Phase2 accumulateG1 := func(res *curve.G1Affine, t constraint.Term, value *curve.G1Affine) { cID := t.CoeffID() @@ -101,26 +113,28 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { } // Prepare Lagrange coefficients of [τ...]₁, [τ...]₂, [ατ...]₁, [βτ...]₁ - coeffTau1 := lagrangeCoeffsG1(srs.G1.Tau, size) - coeffTau2 := lagrangeCoeffsG2(srs.G2.Tau, size) - coeffAlphaTau1 := lagrangeCoeffsG1(srs.G1.AlphaTau, size) - coeffBetaTau1 := lagrangeCoeffsG1(srs.G1.BetaTau, size) + coeffTau1 := lagrangeCoeffsG1(srs.G1.Tau, size) // [L_{ω⁰}(τ)]₁, [L_{ω¹}(τ)]₁, ... where ω is a primitive sizeᵗʰ root of unity + coeffTau2 := lagrangeCoeffsG2(srs.G2.Tau, size) // [L_{ω⁰}(τ)]₂, [L_{ω¹}(τ)]₂, ... + coeffAlphaTau1 := lagrangeCoeffsG1(srs.G1.AlphaTau, size) // [L_{ω⁰}(ατ)]₁, [L_{ω¹}(ατ)]₁, ... + coeffBetaTau1 := lagrangeCoeffsG1(srs.G1.BetaTau, size) // [L_{ω⁰}(βτ)]₁, [L_{ω¹}(βτ)]₁, ... internal, secret, public := r1cs.GetNbVariables() nWires := internal + secret + public var evals Phase2Evaluations - evals.G1.A = make([]curve.G1Affine, nWires) - evals.G1.B = make([]curve.G1Affine, nWires) - evals.G2.B = make([]curve.G2Affine, nWires) + evals.G1.A = make([]curve.G1Affine, nWires) // recall: A are the left coefficients in DIZK parlance + evals.G1.B = make([]curve.G1Affine, nWires) // recall: B are the right coefficients in DIZK parlance + evals.G2.B = make([]curve.G2Affine, nWires) // recall: A only appears in 𝔾₁ elements in the proof, but B needs to appear in a 𝔾₂ element so the verifier can compute something resembling (A.x).(B.x) via pairings bA := make([]curve.G1Affine, nWires) aB := make([]curve.G1Affine, nWires) C := make([]curve.G1Affine, nWires) - // TODO @gbotrel use constraint iterator when available. - i := 0 it := r1cs.GetR1CIterator() for c := it.Next(); c != nil; c = it.Next() { + // each constraint is sparse, i.e. involves a small portion of all variables. + // so we iterate over the variables involved and add the constraint's contribution + // to every variable's A, B, and C values + // A for _, t := range c.L { accumulateG1(&evals.G1.A[t.WireID()], t, &coeffTau1[i]) @@ -154,9 +168,9 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { bitReverse(c2.Parameters.G1.Z) c2.Parameters.G1.Z = c2.Parameters.G1.Z[:n-1] - // Evaluate L + // Evaluate PKK nPrivate := internal + secret - c2.Parameters.G1.L = make([]curve.G1Affine, nPrivate) + c2.Parameters.G1.PKK = make([]curve.G1Affine, nPrivate) evals.G1.VKK = make([]curve.G1Affine, public) offset := public for i := 0; i < nWires; i++ { @@ -166,7 +180,7 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { if i < public { evals.G1.VKK[i].Set(&tmp) } else { - c2.Parameters.G1.L[i-offset].Set(&tmp) + c2.Parameters.G1.PKK[i-offset].Set(&tmp) } } // Set δ public key @@ -201,9 +215,9 @@ func (c *Phase2) Contribute() { c.Parameters.G1.Z[i].ScalarMultiplication(&c.Parameters.G1.Z[i], &deltaInvBI) } - // Update L using δ⁻¹ - for i := 0; i < len(c.Parameters.G1.L); i++ { - c.Parameters.G1.L[i].ScalarMultiplication(&c.Parameters.G1.L[i], &deltaInvBI) + // Update PKK using δ⁻¹ + for i := 0; i < len(c.Parameters.G1.PKK); i++ { + c.Parameters.G1.PKK[i].ScalarMultiplication(&c.Parameters.G1.PKK[i], &deltaInvBI) } // 4. Hash contribution @@ -237,14 +251,14 @@ func verifyPhase2(current, contribution *Phase2) error { return errors.New("couldn't verify that [δ]₂ is based on previous contribution") } - // Check for valid updates of L and Z using - L, prevL := merge(contribution.Parameters.G1.L, current.Parameters.G1.L) + // Check for valid updates of PKK and Z using + L, prevL := merge(contribution.Parameters.G1.PKK, current.Parameters.G1.PKK) if !sameRatio(L, prevL, contribution.Parameters.G2.Delta, current.Parameters.G2.Delta) { - return errors.New("couldn't verify valid updates of L using δ⁻¹") + return errors.New("couldn't verify valid updates of PKK using δ⁻¹") } Z, prevZ := merge(contribution.Parameters.G1.Z, current.Parameters.G1.Z) if !sameRatio(Z, prevZ, contribution.Parameters.G2.Delta, current.Parameters.G2.Delta) { - return errors.New("couldn't verify valid updates of L using δ⁻¹") + return errors.New("couldn't verify valid updates of PKK using δ⁻¹") } // Check hash of the contribution diff --git a/backend/groth16/bn254/mpcsetup/setup.go b/backend/groth16/bn254/mpcsetup/setup.go index 4946e9f597..0f7ff26d99 100644 --- a/backend/groth16/bn254/mpcsetup/setup.go +++ b/backend/groth16/bn254/mpcsetup/setup.go @@ -33,7 +33,7 @@ func ExtractKeys(srs1 *Phase1, srs2 *Phase2, evals *Phase2Evaluations, nConstrai pk.G1.Z = srs2.Parameters.G1.Z bitReverse(pk.G1.Z) - pk.G1.K = srs2.Parameters.G1.L + pk.G1.K = srs2.Parameters.G1.PKK pk.G2.Beta.Set(&srs1.Parameters.G2.Beta) pk.G2.Delta.Set(&srs2.Parameters.G2.Delta) diff --git a/backend/groth16/bn254/mpcsetup/setup_test.go b/backend/groth16/bn254/mpcsetup/setup_test.go index cb71d9ea6a..b4b013c46b 100644 --- a/backend/groth16/bn254/mpcsetup/setup_test.go +++ b/backend/groth16/bn254/mpcsetup/setup_test.go @@ -190,7 +190,7 @@ func (p *Phase1) clone() Phase1 { func (phase2 *Phase2) clone() Phase2 { r := Phase2{} r.Parameters.G1.Delta = phase2.Parameters.G1.Delta - r.Parameters.G1.L = append(r.Parameters.G1.L, phase2.Parameters.G1.L...) + r.Parameters.G1.PKK = append(r.Parameters.G1.PKK, phase2.Parameters.G1.PKK...) r.Parameters.G1.Z = append(r.Parameters.G1.Z, phase2.Parameters.G1.Z...) r.Parameters.G2.Delta = phase2.Parameters.G2.Delta r.PublicKey = phase2.PublicKey From 7afabbfb1fab2b476774aa8a72f3f2c1c1b82402 Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Thu, 24 Oct 2024 13:09:31 -0500 Subject: [PATCH 13/25] feat phase2 init --- backend/groth16/bn254/mpcsetup/phase2.go | 68 ++++++++++++++++-------- backend/groth16/bn254/setup.go | 26 ++++----- backend/groth16/internal/utils.go | 58 ++++++++++++++++++++ 3 files changed, 114 insertions(+), 38 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/phase2.go b/backend/groth16/bn254/mpcsetup/phase2.go index 65f12402d2..6d739dbe45 100644 --- a/backend/groth16/bn254/mpcsetup/phase2.go +++ b/backend/groth16/bn254/mpcsetup/phase2.go @@ -20,7 +20,9 @@ import ( "crypto/sha256" "errors" "github.com/consensys/gnark-crypto/ecc/bn254/fr/pedersen" + "github.com/consensys/gnark/backend/groth16/internal" "math/big" + "slices" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" @@ -32,7 +34,7 @@ type Phase2Evaluations struct { G1 struct { A []curve.G1Affine // A are the left coefficient polynomials for each witness element, evaluated at τ B []curve.G1Affine // B are the right coefficient polynomials for each witness element, evaluated at τ - VKK []curve.G1Affine // VKK are the coefficients of the public witness + VKK []curve.G1Affine // VKK are the coefficients of the public witness (and commitments) } G2 struct { B []curve.G2Affine // B are the right coefficient polynomials for each witness element, evaluated at τ @@ -42,17 +44,19 @@ type Phase2Evaluations struct { type Phase2 struct { Parameters struct { G1 struct { - Delta curve.G1Affine - PKK, Z []curve.G1Affine // Z is the domain vanishing polynomial + Delta curve.G1Affine + Z []curve.G1Affine // Z is the domain vanishing polynomial + PKK []curve.G1Affine // PKK are the coefficients of the private witness } G2 struct { Delta curve.G2Affine + Sigma curve.G2Affine } - CommitmentKeys pedersen.ProvingKey + CommitmentKeys []pedersen.ProvingKey } - Sigmas []valueUpdate // commitment key secrets - PublicKey PublicKey // commitment to delta - Hash []byte + Sigmas []valueUpdate // commitment key secrets + Delta valueUpdate // updates to delta + Hash []byte } // Init is to be run by the coordinator @@ -118,8 +122,8 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { coeffAlphaTau1 := lagrangeCoeffsG1(srs.G1.AlphaTau, size) // [L_{ω⁰}(ατ)]₁, [L_{ω¹}(ατ)]₁, ... coeffBetaTau1 := lagrangeCoeffsG1(srs.G1.BetaTau, size) // [L_{ω⁰}(βτ)]₁, [L_{ω¹}(βτ)]₁, ... - internal, secret, public := r1cs.GetNbVariables() - nWires := internal + secret + public + nbInternal, nbSecret, nbPublic := r1cs.GetNbVariables() + nWires := nbInternal + nbSecret + nbPublic var evals Phase2Evaluations evals.G1.A = make([]curve.G1Affine, nWires) // recall: A are the left coefficients in DIZK parlance evals.G1.B = make([]curve.G1Affine, nWires) // recall: B are the right coefficients in DIZK parlance @@ -157,6 +161,7 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { _, _, g1, g2 := curve.Generators() c2.Parameters.G1.Delta = g1 c2.Parameters.G2.Delta = g2 + c2.Parameters.G2.Sigma = g2 // Build Z in PK as τⁱ(τⁿ - 1) = τ⁽ⁱ⁺ⁿ⁾ - τⁱ for i ∈ [0, n-2] // τⁱ(τⁿ - 1) = τ⁽ⁱ⁺ⁿ⁾ - τⁱ for i ∈ [0, n-2] @@ -168,35 +173,56 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { bitReverse(c2.Parameters.G1.Z) c2.Parameters.G1.Z = c2.Parameters.G1.Z[:n-1] + commitments := r1cs.CommitmentInfo.(constraint.Groth16Commitments) + c2.Sigmas = make([]valueUpdate, len(commitments)) + c2.Parameters.CommitmentKeys = make([]pedersen.ProvingKey, len(commitments)) + for j := range commitments { + c2.Parameters.CommitmentKeys[i].Basis = make([]curve.G1Affine, 0, len(commitments[j].PrivateCommitted)) + } + nbCommitted := internal.NbElements(commitments.GetPrivateCommitted()) + // Evaluate PKK - nPrivate := internal + secret - c2.Parameters.G1.PKK = make([]curve.G1Affine, nPrivate) - evals.G1.VKK = make([]curve.G1Affine, public) - offset := public + + c2.Parameters.G1.PKK = make([]curve.G1Affine, 0, nbInternal+nbSecret-nbCommitted-len(commitments)) + evals.G1.VKK = make([]curve.G1Affine, 0, nbPublic+len(commitments)) + committedIterator := internal.NewMergeIterator(commitments.GetPrivateCommitted()) + nbCommitmentsSeen := 0 for i := 0; i < nWires; i++ { + // since as yet δ, γ = 1, the VKK and PKK are computed identically, as βA + αB + C var tmp curve.G1Affine tmp.Add(&bA[i], &aB[i]) tmp.Add(&tmp, &C[i]) - if i < public { - evals.G1.VKK[i].Set(&tmp) + commitmentIndex := committedIterator.IndexIfNext(i) + isCommitment := nbCommitmentsSeen < len(commitments) && commitments[nbCommitmentsSeen].CommitmentIndex == i + if commitmentIndex != -1 { + c2.Parameters.CommitmentKeys[commitmentIndex].Basis = append(c2.Parameters.CommitmentKeys[commitmentIndex].Basis, tmp) + } else if i < nbPublic || isCommitment { + evals.G1.VKK = append(evals.G1.VKK, tmp) } else { - c2.Parameters.G1.PKK[i-offset].Set(&tmp) + c2.Parameters.G1.PKK = append(c2.Parameters.G1.PKK, tmp) + } + if isCommitment { + nbCommitmentsSeen++ } } - // Set δ public key - var delta fr.Element - delta.SetOne() - c2.PublicKey = newPublicKey(delta, nil, 1) + + for i := range commitments { + c2.Parameters.CommitmentKeys[i].BasisExpSigma = slices.Clone(c2.Parameters.CommitmentKeys[i].Basis) + } // Hash initial contribution - c2.Hash = c2.hash() + c2.Hash = c2.hash() // TODO remove return c2, evals } func (c *Phase2) Contribute() { // Sample toxic δ var delta, deltaInv fr.Element + var sigma fr.Element var deltaBI, deltaInvBI big.Int + + updateValue() + delta.SetRandom() deltaInv.Inverse(&delta) diff --git a/backend/groth16/bn254/setup.go b/backend/groth16/bn254/setup.go index 13ddcd61d3..6ea996b146 100644 --- a/backend/groth16/bn254/setup.go +++ b/backend/groth16/bn254/setup.go @@ -144,7 +144,7 @@ func Setup(r1cs *cs.R1CS, pk *ProvingKey, vk *VerifyingKey) error { vkK := make([]fr.Element, nbPublicWires) ckK := make([][]fr.Element, len(commitmentInfo)) for i := range commitmentInfo { - ckK[i] = make([]fr.Element, len(privateCommitted[i])) + ckK[i] = make([]fr.Element, 0, len(privateCommitted[i])) } var t0, t1 fr.Element @@ -156,37 +156,29 @@ func Setup(r1cs *cs.R1CS, pk *ProvingKey, vk *VerifyingKey) error { Add(&t1, &C[i]). Mul(&t1, coeff) } - vI := 0 // number of public wires seen so far - cI := make([]int, len(commitmentInfo)) // number of private committed wires seen so far for each commitment - nbPrivateCommittedSeen := 0 // = ∑ᵢ cI[i] + vI := 0 // number of public wires seen so far + committedIterator := internal.NewMergeIterator(privateCommitted) + nbPrivateCommittedSeen := 0 // = ∑ᵢ cI[i] nbCommitmentsSeen := 0 for i := range A { - commitment := -1 // index of the commitment that commits to this variable as a private or commitment value - var isCommitment, isPublic bool - if isPublic = i < r1cs.GetNbPublicVariables(); !isPublic { + commitmentIndex := committedIterator.IndexIfNext(i) // the index of the commitment that commits to the wire i. -1 if i is not committed + isCommitment, isPublic := false, i < r1cs.GetNbPublicVariables() + if !isPublic { if nbCommitmentsSeen < len(commitmentWires) && commitmentWires[nbCommitmentsSeen] == i { isCommitment = true nbCommitmentsSeen++ } - - for j := range commitmentInfo { // does commitment j commit to i? - if cI[j] < len(privateCommitted[j]) && privateCommitted[j][cI[j]] == i { - commitment = j - break // frontend guarantees that no private variable is committed to more than once - } - } } - if isPublic || commitment != -1 || isCommitment { + if isPublic || isCommitment || commitmentIndex != -1 { computeK(i, &toxicWaste.gammaInv) if isPublic || isCommitment { vkK[vI] = t1 vI++ } else { // committed and private - ckK[commitment][cI[commitment]] = t1 - cI[commitment]++ + ckK[commitmentIndex] = append(ckK[commitmentIndex], t1) nbPrivateCommittedSeen++ } } else { diff --git a/backend/groth16/internal/utils.go b/backend/groth16/internal/utils.go index 6062ef57ce..67648b2104 100644 --- a/backend/groth16/internal/utils.go +++ b/backend/groth16/internal/utils.go @@ -1,5 +1,10 @@ package internal +import ( + "math" + "slices" +) + func ConcatAll(slices ...[]int) []int { // copyright note: written by GitHub Copilot totalLen := 0 for _, s := range slices { @@ -20,3 +25,56 @@ func NbElements(slices [][]int) int { // copyright note: written by GitHub Copil } return totalLen } + +// NewMergeIterator assumes that all slices in s are sorted +func NewMergeIterator(s [][]int) *MergeIterator { + res := &MergeIterator{slices: slices.Clone(s)} + res.findLeast() + return res +} + +// MergeIterator iterates through a merging of multiple sorted slices +type MergeIterator struct { + slices [][]int + leastIndex int +} + +func (i *MergeIterator) findLeast() { + value := math.MaxInt + i.leastIndex = -1 + for j := range i.slices { + if len(i.slices[j]) == 0 { + continue + } + if v := i.slices[j][0]; v < value { + value = v + i.leastIndex = j + } + } + return +} + +// Peek returns the next smallest value and the index of the slice it came from +// If the iterator is empty, Peek returns (math.MaxInt, -1) +func (i *MergeIterator) Peek() (value, index int) { + return i.slices[i.leastIndex][0], i.leastIndex +} + +// Next returns the next smallest value and the index of the slice it came from, and advances the iterator +// If the iterator is empty, Next returns (math.MaxInt, -1) +func (i *MergeIterator) Next() (value, index int) { + value, index = i.Peek() + i.findLeast() + i.slices[i.leastIndex] = i.slices[i.leastIndex][1:] + return +} + +// IndexIfNext returns the index of the slice and advances the iterator if the next value is value, otherwise returns -1 +// If the iterator is empty, IndexIfNext returns -1 +func (i *MergeIterator) IndexIfNext(value int) int { + if v, index := i.Peek(); v == value { + i.Next() + return index + } + return -1 +} From b4bbb2ddbca558adc27402b2b03b5bf8f3a0744f Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Mon, 9 Dec 2024 18:02:28 -0600 Subject: [PATCH 14/25] checkpoint --- backend/groth16/bn254/mpcsetup/marshal.go | 6 +- backend/groth16/bn254/mpcsetup/phase1.go | 79 +++++--- backend/groth16/bn254/mpcsetup/phase2.go | 200 ++++++++++++------- backend/groth16/bn254/mpcsetup/setup_test.go | 2 +- backend/groth16/bn254/mpcsetup/utils.go | 57 +++--- 5 files changed, 206 insertions(+), 138 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/marshal.go b/backend/groth16/bn254/mpcsetup/marshal.go index b82c8a44ac..b3bd05063c 100644 --- a/backend/groth16/bn254/mpcsetup/marshal.go +++ b/backend/groth16/bn254/mpcsetup/marshal.go @@ -81,7 +81,7 @@ func (phase2 *Phase2) WriteTo(writer io.Writer) (int64, error) { if err != nil { return n, err } - nBytes, err := writer.Write(phase2.Hash) + nBytes, err := writer.Write(phase2.Challenge) return int64(nBytes) + n, err } @@ -125,8 +125,8 @@ func (c *Phase2) ReadFrom(reader io.Reader) (int64, error) { } } - c.Hash = make([]byte, 32) - n, err := reader.Read(c.Hash) + c.Challenge = make([]byte, 32) + n, err := reader.Read(c.Challenge) return int64(n) + dec.BytesRead(), err } diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index 4b8df877e7..3a6dddc5c1 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -21,9 +21,11 @@ import ( "crypto/sha256" "errors" "fmt" + "github.com/consensys/gnark-crypto/ecc" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" "math/big" + "runtime" ) // Phase1 represents the Phase1 of the MPC described in @@ -47,9 +49,9 @@ func (p *Phase1) Contribute() { var ( tauContrib, alphaContrib, betaContrib fr.Element ) - p.proofs.Tau, p.parameters.G1.Tau[1], tauContrib = updateValue(p.parameters.G1.Tau[1], p.Challenge, 1) - p.proofs.Alpha, p.parameters.G1.AlphaTau[0], alphaContrib = updateValue(p.parameters.G1.AlphaTau[0], p.Challenge, 2) - p.proofs.Beta, p.parameters.G1.BetaTau[0], betaContrib = updateValue(p.parameters.G1.BetaTau[0], p.Challenge, 3) + p.proofs.Tau, tauContrib = updateValue(&p.parameters.G1.Tau[1], p.Challenge, 1) + p.proofs.Alpha, alphaContrib = updateValue(&p.parameters.G1.AlphaTau[0], p.Challenge, 2) + p.proofs.Beta, betaContrib = updateValue(&p.parameters.G1.BetaTau[0], p.Challenge, 3) p.parameters.update(&tauContrib, &alphaContrib, &betaContrib, true) } @@ -164,38 +166,55 @@ func VerifyPhase1(c0, c1 *Phase1, c ...*Phase1) error { } // Verify assumes previous is correct -func (p *Phase1) Verify(previous *Phase1) error { +func (p *Phase1) Verify(next *Phase1) error { - if prevHash := previous.hash(); !bytes.Equal(p.Challenge, previous.hash()) { // if chain-verifying contributions, challenge fields are optional as they can be computed as we go - if len(p.Challenge) != 0 { + if prevHash := p.hash(); !bytes.Equal(next.Challenge, p.hash()) { // if chain-verifying contributions, challenge fields are optional as they can be computed as we go + if len(next.Challenge) != 0 { return errors.New("the challenge does not match the previous phase's hash") } - p.Challenge = prevHash + next.Challenge = prevHash } - if err := p.proofs.Tau.verify( - pair{previous.parameters.G1.Tau[1], &previous.parameters.G2.Tau[1]}, + // TODO compare sizes + + r := linearCombCoeffs(len(next.parameters.G1.Tau) - 1) // the longest of all lengths + // will be reusing the coefficients TODO @Tabaie make sure that's okay + + N := len(next.parameters.G2.Tau) + var taus, alphaTaus, betaTaus curve.G1Affine + if _, err := taus.MultiExp(next.parameters.G1.Tau[1:N], r, ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}); err != nil { // τ¹ + r.τ² + … + rᴺ⁻².τⁿ⁻¹ + return err + } + if _, err := alphaTaus.MultiExp(next.parameters.G1.AlphaTau[1:], r, ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}); err != nil { // ατ¹ + r.ατ² + … + rᴺ⁻².ατⁿ⁻¹ + return err + } + if _, err := betaTaus.MultiExp(next.parameters.G1.BetaTau[1:], r, ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}); err != nil { // βτ¹ + r.βτ² + … + rᴺ⁻².βτⁿ⁻¹ + return err + } + + if err := next.proofs.Tau.verify( pair{p.parameters.G1.Tau[1], &p.parameters.G2.Tau[1]}, - p.Challenge, 1); err != nil { + pair{next.parameters.G1.Tau[1], &next.parameters.G2.Tau[1]}, + next.Challenge, 1); err != nil { return fmt.Errorf("failed to verify contribution to τ: %w", err) } - if err := p.proofs.Alpha.verify( - pair{previous.parameters.G1.AlphaTau[0], nil}, - pair{p.parameters.G1.AlphaTau[0], nil}, - p.Challenge, 2); err != nil { + if err := next.proofs.Alpha.verify( // TODO Get ACTUAL updated tau + pair{taus, nil}, + pair{alphaTaus, nil}, + next.Challenge, 2); err != nil { return fmt.Errorf("failed to verify contribution to α: %w", err) } - if err := p.proofs.Beta.verify( - pair{previous.parameters.G1.BetaTau[0], &previous.parameters.G2.Beta}, - pair{p.parameters.G1.BetaTau[0], &p.parameters.G2.Beta}, - p.Challenge, 3); err != nil { + if err := next.proofs.Beta.verify( + pair{p.parameters.G1.BetaTau[0], &p.parameters.G2.Beta}, // TODO @Tabaie combine the verification of all βτⁱ + pair{next.parameters.G1.BetaTau[0], &next.parameters.G2.Beta}, + next.Challenge, 3); err != nil { return fmt.Errorf("failed to verify contribution to β: %w", err) } - if !areInSubGroupG1(p.parameters.G1.Tau[2:]) || !areInSubGroupG1(p.parameters.G1.BetaTau[1:]) || !areInSubGroupG1(p.parameters.G1.AlphaTau[1:]) { + if !areInSubGroupG1(next.parameters.G1.Tau[2:]) || !areInSubGroupG1(next.parameters.G1.BetaTau[1:]) || !areInSubGroupG1(next.parameters.G1.AlphaTau[1:]) { return errors.New("derived values 𝔾₁ subgroup check failed") } - if !areInSubGroupG2(p.parameters.G2.Tau[2:]) { + if !areInSubGroupG2(next.parameters.G2.Tau[2:]) { return errors.New("derived values 𝔾₂ subgroup check failed") } @@ -205,19 +224,17 @@ func (p *Phase1) Verify(previous *Phase1) error { // i.e. e(τⁱ⁺¹,[1]₂) = e(τⁱ,[τ]₂). Due to bi-linearity we can instead check // e(∑rⁱ⁻¹τⁱ⁺¹,[1]₂) = e(∑rⁱ⁻¹τⁱ,[τ]₂), which is tantamount to the check // ∑rⁱ⁻¹τⁱ⁺¹ / ∑rⁱ⁻¹τⁱ = τ - r := linearCombCoeffs(len(p.parameters.G1.Tau) - 1) // the longest of all lengths - // will be reusing the coefficient TODO @Tabaie make sure that's okay - tauT1, tauS1 := linearCombinationsG1(p.parameters.G1.Tau[1:], r) - tauT2, tauS2 := linearCombinationsG2(p.parameters.G2.Tau[1:], r) - alphaTT, alphaTS := linearCombinationsG1(p.parameters.G1.AlphaTau, r) - betaTT, betaTS := linearCombinationsG1(p.parameters.G1.BetaTau, r) + tauT1, tauS1 := linearCombinationsG1(next.parameters.G1.Tau[1:], r) + tauT2, tauS2 := linearCombinationsG2(next.parameters.G2.Tau[1:], r) + alphaTT, alphaTS := linearCombinationsG1(next.parameters.G1.AlphaTau, r) + betaTT, betaTS := linearCombinationsG1(next.parameters.G1.BetaTau, r) - if !sameRatioUnsafe(tauS1, tauT1, p.parameters.G2.Tau[1], g2) { + if !sameRatioUnsafe(tauS1, tauT1, next.parameters.G2.Tau[1], g2) { return errors.New("couldn't verify 𝔾₁ representations of the τⁱ") } - if !sameRatioUnsafe(p.parameters.G1.Tau[1], g1, tauS2, tauT2) { + if !sameRatioUnsafe(next.parameters.G1.Tau[1], g1, tauS2, tauT2) { return errors.New("couldn't verify 𝔾₂ representations of the τⁱ") } @@ -226,10 +243,12 @@ func (p *Phase1) Verify(previous *Phase1) error { // For 0 < i < N we check that ατⁱ/ατⁱ⁻¹ = τ, since we have a representation of τ in 𝔾₂ // with a similar bi-linearity argument as above we can do this with a single pairing check - if !sameRatioUnsafe(alphaTS, alphaTT, p.parameters.G2.Tau[1], g2) { + // TODO eliminate these by combining with update checking + + if !sameRatioUnsafe(alphaTS, alphaTT, next.parameters.G2.Tau[1], g2) { return errors.New("couldn't verify the ατⁱ") } - if !sameRatioUnsafe(betaTS, betaTT, p.parameters.G2.Tau[1], g2) { + if !sameRatioUnsafe(betaTS, betaTT, next.parameters.G2.Tau[1], g2) { return errors.New("couldn't verify the βτⁱ") } diff --git a/backend/groth16/bn254/mpcsetup/phase2.go b/backend/groth16/bn254/mpcsetup/phase2.go index 6d739dbe45..6c6c15f7a3 100644 --- a/backend/groth16/bn254/mpcsetup/phase2.go +++ b/backend/groth16/bn254/mpcsetup/phase2.go @@ -17,9 +17,10 @@ package mpcsetup import ( + "bytes" "crypto/sha256" "errors" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/pedersen" + "fmt" "github.com/consensys/gnark/backend/groth16/internal" "math/big" "slices" @@ -32,9 +33,10 @@ import ( type Phase2Evaluations struct { G1 struct { - A []curve.G1Affine // A are the left coefficient polynomials for each witness element, evaluated at τ - B []curve.G1Affine // B are the right coefficient polynomials for each witness element, evaluated at τ - VKK []curve.G1Affine // VKK are the coefficients of the public witness (and commitments) + A []curve.G1Affine // A are the left coefficient polynomials for each witness element, evaluated at τ + B []curve.G1Affine // B are the right coefficient polynomials for each witness element, evaluated at τ + VKK []curve.G1Affine // VKK are the coefficients of the public witness and commitments + CKK [][]curve.G1Affine // CKK are the coefficients of the committed values } G2 struct { B []curve.G2Affine // B are the right coefficient polynomials for each witness element, evaluated at τ @@ -44,19 +46,111 @@ type Phase2Evaluations struct { type Phase2 struct { Parameters struct { G1 struct { - Delta curve.G1Affine - Z []curve.G1Affine // Z is the domain vanishing polynomial - PKK []curve.G1Affine // PKK are the coefficients of the private witness + Delta curve.G1Affine + Z []curve.G1Affine // Z are multiples of the domain vanishing polynomial + PKK []curve.G1Affine // PKK are the coefficients of the private witness + SigmaCKK [][]curve.G1Affine // Commitment bases } G2 struct { Delta curve.G2Affine - Sigma curve.G2Affine + Sigma []curve.G2Affine } - CommitmentKeys []pedersen.ProvingKey } - Sigmas []valueUpdate // commitment key secrets - Delta valueUpdate // updates to delta - Hash []byte + + // Proofs of update correctness + Sigmas []valueUpdate + Delta valueUpdate + + // Challenge is the hash of the PREVIOUS contribution + Challenge []byte +} + +func (c *Phase2) Verify(next *Phase2) error { + if challenge := c.hash(); len(next.Challenge) != 0 && !bytes.Equal(next.Challenge, challenge) { + return errors.New("the challenge does not match the previous phase's hash") + } + + if err := next.Delta.verify( + pair{c.Parameters.G1.Delta, &c.Parameters.G2.Delta}, + pair{next.Parameters.G1.Delta, &next.Parameters.G2.Delta}, + next.Challenge, 1); err != nil { + return fmt.Errorf("failed to verify contribution to δ: %w", err) + } + + for i := range c.Sigmas { + if err := next.Sigmas[i].verify( + pair{c.Parameters.G1.SigmaCKK[i][0], &c.Parameters.G2.Sigma[i]}, + pair{next.Parameters.G1.SigmaCKK[i][0], &next.Parameters.G2.Sigma[i]}, + next.Challenge, byte(2+i)); err != nil { + return fmt.Errorf("failed to verify contribution to σ[%d]: %w", i, err) + } + if !areInSubGroupG1(next.Parameters.G1.SigmaCKK[i][1:]) { + return errors.New("commitment proving key subgroup check failed") + } + } + + if !areInSubGroupG1(next.Parameters.G1.Z) || !areInSubGroupG1(next.Parameters.G1.PKK) { + return errors.New("derived values 𝔾₁ subgroup check failed") + } + + r := linearCombCoeffs(len(next.Parameters.G1.Z)) + + for i := range c.Sigmas { + prevComb, nextComb := linearCombination(c.Parameters.G1.SigmaCKK[i], next.Parameters.G1.SigmaCKK[i], r) + if !sameRatioUnsafe(nextComb, prevComb, next.Parameters.G2.Sigma[i], c.Parameters.G2.Sigma[i]) { + return fmt.Errorf("failed to verify contribution to σ[%d]", i) + } + } + + linearCombination() +} + +func (c *Phase2) Contribute() { + // Sample toxic δ + var delta, deltaInv fr.Element + var deltaBI, deltaInvBI big.Int + + c.Challenge = c.hash() + + if len(c.Parameters.G1.SigmaCKK) > 255 { + panic("too many commitments") // DST collision + } + for i := range c.Parameters.G1.SigmaCKK { + var ( + sigmaContribution fr.Element + sigmaContributionI big.Int + ) + + pk := c.Parameters.G1.SigmaCKK[i] + c.Sigmas[i], sigmaContribution = updateValue(&pk[0], c.Challenge, byte(2+i)) + sigmaContribution.BigInt(&sigmaContributionI) + for j := 1; j < len(pk); j++ { + pk[j].ScalarMultiplication(&pk[j], &sigmaContributionI) + } + c.Parameters.G2.Sigma[i].ScalarMultiplication(&c.Parameters.G2.Sigma[i], &sigmaContributionI) + } + + c.Delta, delta = updateValue(&c.Parameters.G1.Delta, c.Challenge, 1) + + deltaInv.Inverse(&delta) + delta.BigInt(&deltaBI) + deltaInv.BigInt(&deltaInvBI) + + // Update [δ]₂ + c.Parameters.G2.Delta.ScalarMultiplication(&c.Parameters.G2.Delta, &deltaBI) + + c.Parameters.G1.Delta.ScalarMultiplication(&c.Parameters.G1.Delta, &deltaBI) + c.Parameters.G2.Delta.ScalarMultiplication(&c.Parameters.G2.Delta, &deltaBI) + + // Update Z using δ⁻¹ + for i := 0; i < len(c.Parameters.G1.Z); i++ { + c.Parameters.G1.Z[i].ScalarMultiplication(&c.Parameters.G1.Z[i], &deltaInvBI) + } + + // Update PKK using δ⁻¹ + for i := 0; i < len(c.Parameters.G1.PKK); i++ { + c.Parameters.G1.PKK[i].ScalarMultiplication(&c.Parameters.G1.PKK[i], &deltaInvBI) + } } // Init is to be run by the coordinator @@ -161,24 +255,29 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { _, _, g1, g2 := curve.Generators() c2.Parameters.G1.Delta = g1 c2.Parameters.G2.Delta = g2 - c2.Parameters.G2.Sigma = g2 // Build Z in PK as τⁱ(τⁿ - 1) = τ⁽ⁱ⁺ⁿ⁾ - τⁱ for i ∈ [0, n-2] // τⁱ(τⁿ - 1) = τ⁽ⁱ⁺ⁿ⁾ - τⁱ for i ∈ [0, n-2] n := len(srs.G1.AlphaTau) c2.Parameters.G1.Z = make([]curve.G1Affine, n) - for i := 0; i < n-1; i++ { + for i := 0; i < n-1; i++ { // TODO @Tabaie why is the last element always 0? c2.Parameters.G1.Z[i].Sub(&srs.G1.Tau[i+n], &srs.G1.Tau[i]) } bitReverse(c2.Parameters.G1.Z) c2.Parameters.G1.Z = c2.Parameters.G1.Z[:n-1] commitments := r1cs.CommitmentInfo.(constraint.Groth16Commitments) + + evals.G1.CKK = make([][]curve.G1Affine, len(commitments)) c2.Sigmas = make([]valueUpdate, len(commitments)) - c2.Parameters.CommitmentKeys = make([]pedersen.ProvingKey, len(commitments)) + c2.Parameters.G1.SigmaCKK = make([][]curve.G1Affine, len(commitments)) + c2.Parameters.G2.Sigma = make([]curve.G2Affine, len(commitments)) + for j := range commitments { - c2.Parameters.CommitmentKeys[i].Basis = make([]curve.G1Affine, 0, len(commitments[j].PrivateCommitted)) + evals.G1.CKK[i] = make([]curve.G1Affine, 0, len(commitments[j].PrivateCommitted)) + c2.Parameters.G2.Sigma[j] = g2 } + nbCommitted := internal.NbElements(commitments.GetPrivateCommitted()) // Evaluate PKK @@ -187,16 +286,16 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { evals.G1.VKK = make([]curve.G1Affine, 0, nbPublic+len(commitments)) committedIterator := internal.NewMergeIterator(commitments.GetPrivateCommitted()) nbCommitmentsSeen := 0 - for i := 0; i < nWires; i++ { + for j := 0; j < nWires; j++ { // since as yet δ, γ = 1, the VKK and PKK are computed identically, as βA + αB + C var tmp curve.G1Affine - tmp.Add(&bA[i], &aB[i]) - tmp.Add(&tmp, &C[i]) - commitmentIndex := committedIterator.IndexIfNext(i) - isCommitment := nbCommitmentsSeen < len(commitments) && commitments[nbCommitmentsSeen].CommitmentIndex == i + tmp.Add(&bA[j], &aB[j]) + tmp.Add(&tmp, &C[j]) + commitmentIndex := committedIterator.IndexIfNext(j) + isCommitment := nbCommitmentsSeen < len(commitments) && commitments[nbCommitmentsSeen].CommitmentIndex == j if commitmentIndex != -1 { - c2.Parameters.CommitmentKeys[commitmentIndex].Basis = append(c2.Parameters.CommitmentKeys[commitmentIndex].Basis, tmp) - } else if i < nbPublic || isCommitment { + evals.G1.CKK[commitmentIndex] = append(evals.G1.CKK[commitmentIndex], tmp) + } else if j < nbPublic || isCommitment { evals.G1.VKK = append(evals.G1.VKK, tmp) } else { c2.Parameters.G1.PKK = append(c2.Parameters.G1.PKK, tmp) @@ -206,50 +305,15 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { } } - for i := range commitments { - c2.Parameters.CommitmentKeys[i].BasisExpSigma = slices.Clone(c2.Parameters.CommitmentKeys[i].Basis) + for j := range commitments { + c2.Parameters.G1.SigmaCKK[j] = slices.Clone(evals.G1.CKK[j]) } // Hash initial contribution - c2.Hash = c2.hash() // TODO remove + c2.Challenge = c2.hash() // TODO remove return c2, evals } -func (c *Phase2) Contribute() { - // Sample toxic δ - var delta, deltaInv fr.Element - var sigma fr.Element - var deltaBI, deltaInvBI big.Int - - updateValue() - - delta.SetRandom() - deltaInv.Inverse(&delta) - - delta.BigInt(&deltaBI) - deltaInv.BigInt(&deltaInvBI) - - // Set δ public key - c.PublicKey = newPublicKey(delta, c.Hash, 1) - - // Update δ - c.Parameters.G1.Delta.ScalarMultiplication(&c.Parameters.G1.Delta, &deltaBI) - c.Parameters.G2.Delta.ScalarMultiplication(&c.Parameters.G2.Delta, &deltaBI) - - // Update Z using δ⁻¹ - for i := 0; i < len(c.Parameters.G1.Z); i++ { - c.Parameters.G1.Z[i].ScalarMultiplication(&c.Parameters.G1.Z[i], &deltaInvBI) - } - - // Update PKK using δ⁻¹ - for i := 0; i < len(c.Parameters.G1.PKK); i++ { - c.Parameters.G1.PKK[i].ScalarMultiplication(&c.Parameters.G1.PKK[i], &deltaInvBI) - } - - // 4. Hash contribution - c.Hash = c.hash() -} - func VerifyPhase2(c0, c1 *Phase2, c ...*Phase2) error { contribs := append([]*Phase2{c0, c1}, c...) for i := 0; i < len(contribs)-1; i++ { @@ -262,7 +326,7 @@ func VerifyPhase2(c0, c1 *Phase2, c ...*Phase2) error { func verifyPhase2(current, contribution *Phase2) error { // Compute R for δ - deltaR := genR(contribution.PublicKey.SG, contribution.PublicKey.SXG, current.Hash[:], 1) + deltaR := genR(contribution.PublicKey.SG, contribution.PublicKey.SXG, current.Challenge[:], 1) // Check for knowledge of δ if !sameRatio(contribution.PublicKey.SG, contribution.PublicKey.SXG, contribution.PublicKey.XR, deltaR) { @@ -278,23 +342,15 @@ func verifyPhase2(current, contribution *Phase2) error { } // Check for valid updates of PKK and Z using - L, prevL := merge(contribution.Parameters.G1.PKK, current.Parameters.G1.PKK) + L, prevL := linearCombination(contribution.Parameters.G1.PKK, current.Parameters.G1.PKK) if !sameRatio(L, prevL, contribution.Parameters.G2.Delta, current.Parameters.G2.Delta) { return errors.New("couldn't verify valid updates of PKK using δ⁻¹") } - Z, prevZ := merge(contribution.Parameters.G1.Z, current.Parameters.G1.Z) + Z, prevZ := linearCombination(contribution.Parameters.G1.Z, current.Parameters.G1.Z) if !sameRatio(Z, prevZ, contribution.Parameters.G2.Delta, current.Parameters.G2.Delta) { return errors.New("couldn't verify valid updates of PKK using δ⁻¹") } - // Check hash of the contribution - h := contribution.hash() - for i := 0; i < len(h); i++ { - if h[i] != contribution.Hash[i] { - return errors.New("couldn't verify hash of contribution") - } - } - return nil } diff --git a/backend/groth16/bn254/mpcsetup/setup_test.go b/backend/groth16/bn254/mpcsetup/setup_test.go index b4b013c46b..1aeebc91eb 100644 --- a/backend/groth16/bn254/mpcsetup/setup_test.go +++ b/backend/groth16/bn254/mpcsetup/setup_test.go @@ -194,7 +194,7 @@ func (phase2 *Phase2) clone() Phase2 { r.Parameters.G1.Z = append(r.Parameters.G1.Z, phase2.Parameters.G1.Z...) r.Parameters.G2.Delta = phase2.Parameters.G2.Delta r.PublicKey = phase2.PublicKey - r.Hash = append(r.Hash, phase2.Hash...) + r.Challenge = append(r.Challenge, phase2.Challenge...) return r } diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index a868b43f68..d18cf14945 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -156,14 +156,14 @@ func sameRatioUnsafe(n1, d1 curve.G1Affine, n2, d2 curve.G2Affine) bool { } // returns a = ∑ rᵢAᵢ, b = ∑ rᵢBᵢ -func merge(A, B []curve.G1Affine) (a, b curve.G1Affine) { +func linearCombination(A, B []curve.G1Affine, r []fr.Element) (a, b curve.G1Affine) { nc := runtime.NumCPU() - r := make([]fr.Element, len(A)) - for i := 0; i < len(A); i++ { - r[i].SetRandom() + if _, err := a.MultiExp(A, r[:len(A)], ecc.MultiExpConfig{NbTasks: nc}); err != nil { + panic(err) + } + if _, err := b.MultiExp(B, r[:len(B)], ecc.MultiExpConfig{NbTasks: nc}); err != nil { + panic(err) } - a.MultiExp(A, r, ecc.MultiExpConfig{NbTasks: nc / 2}) - b.MultiExp(B, r, ecc.MultiExpConfig{NbTasks: nc / 2}) return } @@ -172,8 +172,9 @@ func merge(A, B []curve.G1Affine) (a, b curve.G1Affine) { func linearCombinationsG1(A []curve.G1Affine, rPowers []fr.Element) (truncated, shifted curve.G1Affine) { // the common section, 1 to N-2 var common curve.G1Affine - common.MultiExp(A[1:len(A)-1], rPowers[:len(A)-2], ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}) // A[1] + r.A[2] + ... + rᴺ⁻³.A[N-2] - + if _, err := common.MultiExp(A[1:len(A)-1], rPowers[:len(A)-2], ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}); err != nil { // A[1] + r.A[2] + ... + rᴺ⁻³.A[N-2] + panic(err) + } var c big.Int rPowers[1].BigInt(&c) truncated.ScalarMultiplication(&common, &c).Add(&truncated, &A[0]) // A[0] + r.A[1] + r².A[2] + ... + rᴺ⁻².A[N-2] @@ -189,8 +190,9 @@ func linearCombinationsG1(A []curve.G1Affine, rPowers []fr.Element) (truncated, func linearCombinationsG2(A []curve.G2Affine, rPowers []fr.Element) (truncated, shifted curve.G2Affine) { // the common section, 1 to N-2 var common curve.G2Affine - common.MultiExp(A[1:len(A)-1], rPowers[:len(A)-2], ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}) // A[1] + r.A[2] + ... + rᴺ⁻³.A[N-2] - + if _, err := common.MultiExp(A[1:len(A)-1], rPowers[:len(A)-2], ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}); err != nil { // A[1] + r.A[2] + ... + rᴺ⁻³.A[N-2] + panic(err) + } var c big.Int rPowers[1].BigInt(&c) truncated.ScalarMultiplication(&common, &c).Add(&truncated, &A[0]) // A[0] + r.A[1] + r².A[2] + ... + rᴺ⁻².A[N-2] @@ -242,7 +244,7 @@ type valueUpdate struct { // updateValue produces values associated with contribution to an existing value. // if prevCommitment contains only a 𝔾₁ value, then so will updatedCommitment // the second output is toxic waste. It is the caller's responsibility to safely "dispose" of it. -func updateValue(prev curve.G1Affine, challenge []byte, dst byte) (proof valueUpdate, updated curve.G1Affine, contributionValue fr.Element) { +func updateValue(value *curve.G1Affine, challenge []byte, dst byte) (proof valueUpdate, contributionValue fr.Element) { if _, err := contributionValue.SetRandom(); err != nil { panic(err) } @@ -251,10 +253,10 @@ func updateValue(prev curve.G1Affine, challenge []byte, dst byte) (proof valueUp _, _, g1, _ := curve.Generators() proof.contributionCommitment.ScalarMultiplication(&g1, &contributionValueI) - updated.ScalarMultiplication(&prev, &contributionValueI) + value.ScalarMultiplication(value, &contributionValueI) // proof of knowledge to commitment. Algorithm 3 from section 3.7 - pokBase := genR(proof.contributionCommitment, updated, challenge, dst) // r + pokBase := genR(proof.contributionCommitment, *value, challenge, dst) // r proof.contributionPok.ScalarMultiplication(&pokBase, &contributionValueI) return @@ -264,47 +266,38 @@ func updateValue(prev curve.G1Affine, challenge []byte, dst byte) (proof valueUp // it checks the proof of knowledge of the contribution, and the fact that the product of the contribution // and previous commitment makes the new commitment. // prevCommitment is assumed to be valid. No subgroup check and the like. -func (x *valueUpdate) verify(prev, updated pair, challenge []byte, dst byte) error { - noG2 := prev.g2 == nil - if noG2 != (updated.g2 == nil) { +// challengePoint is normally equal to [denom] +func (x *valueUpdate) verify(denom, num pair, challengePoint curve.G1Affine, challenge []byte, dst byte) error { + noG2 := denom.g2 == nil + if noG2 != (num.g2 == nil) { return errors.New("erasing or creating g2 values") } - if !x.contributionPok.IsInSubGroup() || !x.contributionCommitment.IsInSubGroup() || !updated.validUpdate() { + if !x.contributionPok.IsInSubGroup() || !x.contributionCommitment.IsInSubGroup() || !num.validUpdate() { return errors.New("contribution values subgroup check failed") } // verify commitment proof of knowledge. CheckPOK, algorithm 4 from section 3.7 - r := genR(x.contributionCommitment, updated.g1, challenge, dst) // verification challenge in the form of a g2 base + r := genR(x.contributionCommitment, challengePoint, challenge, dst) // verification challenge in the form of a g2 base _, _, g1, _ := curve.Generators() if !sameRatioUnsafe(x.contributionCommitment, g1, x.contributionPok, r) { // π =? x.r i.e. x/g1 =? π/r return errors.New("contribution proof of knowledge verification failed") } - // check that the updated/previous ratio is consistent between the 𝔾₁ and 𝔾₂ representations. Based on CONSISTENT, algorithm 2 in Section 3.6. - if !noG2 && !sameRatioUnsafe(updated.g1, prev.g1, *updated.g2, *prev.g2) { + // check that the num/denom ratio is consistent between the 𝔾₁ and 𝔾₂ representations. Based on CONSISTENT, algorithm 2 in Section 3.6. + if !noG2 && !sameRatioUnsafe(num.g1, denom.g1, *num.g2, *denom.g2) { return errors.New("g2 update inconsistent") } - // now verify that updated₁/previous₁ = x ( = x/g1 = π/r ) + // now verify that num₁/denom₁ = x ( = x/g1 = π/r ) // have to use the latter value for the RHS because we sameRatio needs both 𝔾₁ and 𝔾₂ values - if !sameRatioUnsafe(updated.g1, prev.g1, x.contributionPok, r) { + if !sameRatioUnsafe(num.g1, denom.g1, x.contributionPok, r) { return errors.New("g1 update inconsistent") } return nil } -/* -// setEmpty does not provide proofs, only sets the value to [1] -func (x *valueUpdate) setEmpty(g1Only bool) { - _, _, g1, g2 := curve.Generators() - x.updatedCommitment.g1.Set(&g1) - if !g1Only { - x.updatedCommitment.g2 = &g2 - } -}*/ - func toRefs[T any](s []T) []*T { res := make([]*T, len(s)) for i := range s { From 282484a2c43c5120d592e2579ed82a0c5c5d3a29 Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:26:08 -0600 Subject: [PATCH 15/25] feat phase2 verification --- backend/groth16/bn254/mpcsetup/phase1.go | 24 +++----- backend/groth16/bn254/mpcsetup/phase2.go | 78 +++++++++++++++--------- backend/groth16/bn254/mpcsetup/utils.go | 29 ++++----- 3 files changed, 71 insertions(+), 60 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index 90700abff3..717d60069e 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -47,14 +47,15 @@ func (p *Phase1) Contribute() { // SrsCommons are the circuit-independent components of the Groth16 SRS, // computed by the first phase. +// in all that follows, N is the domain size type SrsCommons struct { G1 struct { - Tau []curve.G1Affine // {[τ⁰]₁, [τ¹]₁, [τ²]₁, …, [τ²ⁿ⁻²]₁} - AlphaTau []curve.G1Affine // {α[τ⁰]₁, α[τ¹]₁, α[τ²]₁, …, α[τⁿ⁻¹]₁} - BetaTau []curve.G1Affine // {β[τ⁰]₁, β[τ¹]₁, β[τ²]₁, …, β[τⁿ⁻¹]₁} + Tau []curve.G1Affine // {[τ⁰]₁, [τ¹]₁, [τ²]₁, …, [τ²ᴺ⁻²]₁} + AlphaTau []curve.G1Affine // {α[τ⁰]₁, α[τ¹]₁, α[τ²]₁, …, α[τᴺ⁻¹]₁} + BetaTau []curve.G1Affine // {β[τ⁰]₁, β[τ¹]₁, β[τ²]₁, …, β[τᴺ⁻¹]₁} } G2 struct { - Tau []curve.G2Affine // {[τ⁰]₂, [τ¹]₂, [τ²]₂, …, [τⁿ⁻¹]₂} + Tau []curve.G2Affine // {[τ⁰]₂, [τ¹]₂, [τ²]₂, …, [τᴺ⁻¹]₂} Beta curve.G2Affine // [β]₂ } } @@ -181,22 +182,13 @@ func (p *Phase1) Verify(next *Phase1) error { return err } - if err := next.proofs.Tau.verify( - pair{p.parameters.G1.Tau[1], &p.parameters.G2.Tau[1]}, - pair{next.parameters.G1.Tau[1], &next.parameters.G2.Tau[1]}, - next.Challenge, 1); err != nil { + if err := next.proofs.Tau.verify(pair{p.parameters.G1.Tau[1], &p.parameters.G2.Tau[1]}, pair{next.parameters.G1.Tau[1], &next.parameters.G2.Tau[1]}, next.Challenge, 1); err != nil { return fmt.Errorf("failed to verify contribution to τ: %w", err) } - if err := next.proofs.Alpha.verify( // TODO Get ACTUAL updated tau - pair{taus, nil}, - pair{alphaTaus, nil}, - next.Challenge, 2); err != nil { + if err := next.proofs.Alpha.verify(pair{taus, nil}, pair{alphaTaus, nil}, next.Challenge, 2); err != nil { return fmt.Errorf("failed to verify contribution to α: %w", err) } - if err := next.proofs.Beta.verify( - pair{p.parameters.G1.BetaTau[0], &p.parameters.G2.Beta}, // TODO @Tabaie combine the verification of all βτⁱ - pair{next.parameters.G1.BetaTau[0], &next.parameters.G2.Beta}, - next.Challenge, 3); err != nil { + if err := next.proofs.Beta.verify(pair{p.parameters.G1.BetaTau[0], &p.parameters.G2.Beta}, pair{next.parameters.G1.BetaTau[0], &next.parameters.G2.Beta}, next.Challenge, 3); err != nil { return fmt.Errorf("failed to verify contribution to β: %w", err) } diff --git a/backend/groth16/bn254/mpcsetup/phase2.go b/backend/groth16/bn254/mpcsetup/phase2.go index 2f5d271dee..6f1758b5f9 100644 --- a/backend/groth16/bn254/mpcsetup/phase2.go +++ b/backend/groth16/bn254/mpcsetup/phase2.go @@ -20,7 +20,7 @@ import ( cs "github.com/consensys/gnark/constraint/bn254" ) -type Phase2Evaluations struct { +type Phase2Evaluations struct { // TODO @Tabaie rename G1 struct { A []curve.G1Affine // A are the left coefficient polynomials for each witness element, evaluated at τ B []curve.G1Affine // B are the right coefficient polynomials for each witness element, evaluated at τ @@ -36,13 +36,13 @@ type Phase2 struct { Parameters struct { G1 struct { Delta curve.G1Affine - Z []curve.G1Affine // Z are multiples of the domain vanishing polynomial - PKK []curve.G1Affine // PKK are the coefficients of the private witness - SigmaCKK [][]curve.G1Affine // Commitment bases + Z []curve.G1Affine // Z[i] = xⁱt(x)/δ where t is the domain vanishing polynomial 0 ≤ i ≤ N-2 + PKK []curve.G1Affine // PKK are the coefficients of the private witness, needed for the proving key. They have a denominator of δ + SigmaCKK [][]curve.G1Affine // Commitment proof bases: SigmaCKK[i][j] = Cᵢⱼσᵢ where Cᵢⱼ is the commitment basis for the jᵗʰ committed element from the iᵗʰ commitment } G2 struct { Delta curve.G2Affine - Sigma []curve.G2Affine + Sigma []curve.G2Affine // the secret σ value for each commitment } } @@ -55,43 +55,53 @@ type Phase2 struct { } func (c *Phase2) Verify(next *Phase2) error { - if challenge := c.hash(); len(next.Challenge) != 0 && !bytes.Equal(next.Challenge, challenge) { + challenge := c.hash() + if len(next.Challenge) != 0 && !bytes.Equal(next.Challenge, challenge) { return errors.New("the challenge does not match the previous phase's hash") } + next.Challenge = challenge - if err := next.Delta.verify( - pair{c.Parameters.G1.Delta, &c.Parameters.G2.Delta}, - pair{next.Parameters.G1.Delta, &next.Parameters.G2.Delta}, - next.Challenge, 1); err != nil { - return fmt.Errorf("failed to verify contribution to δ: %w", err) + if len(next.Parameters.G1.Z) != len(c.Parameters.G1.Z) || + len(next.Parameters.G1.PKK) != len(c.Parameters.G1.PKK) || + len(next.Parameters.G1.SigmaCKK) != len(c.Parameters.G1.SigmaCKK) || + len(next.Parameters.G2.Sigma) != len(c.Parameters.G2.Sigma) { + return errors.New("contribution size mismatch") } - for i := range c.Sigmas { - if err := next.Sigmas[i].verify( - pair{c.Parameters.G1.SigmaCKK[i][0], &c.Parameters.G2.Sigma[i]}, - pair{next.Parameters.G1.SigmaCKK[i][0], &next.Parameters.G2.Sigma[i]}, - next.Challenge, byte(2+i)); err != nil { - return fmt.Errorf("failed to verify contribution to σ[%d]: %w", i, err) - } - if !areInSubGroupG1(next.Parameters.G1.SigmaCKK[i][1:]) { + r := linearCombCoeffs(len(next.Parameters.G1.Z) + len(next.Parameters.G1.PKK) + 1) // TODO @Tabaie If all contributions are being verified in one go, we could reuse r + + verifyContribution := func(update *valueUpdate, g1Denominator, g1Numerator []curve.G1Affine, g2Denominator, g2Numerator *curve.G2Affine, dst byte) error { + g1Num := linearCombination(g1Numerator, r) + g1Denom := linearCombination(g1Denominator, r) + + return update.verify(pair{g1Denom, g2Denominator}, pair{g1Num, g2Denominator}, challenge, dst) + } + + // verify proof of knowledge of contributions to the σᵢ + // and the correctness of updates to Parameters.G2.Sigma[i] and the Parameters.G1.SigmaCKK[i] + for i := range c.Sigmas { // match the first commitment basis elem against the contribution commitment + if !areInSubGroupG1(next.Parameters.G1.SigmaCKK[i]) { return errors.New("commitment proving key subgroup check failed") } + + if err := verifyContribution(&c.Sigmas[i], c.Parameters.G1.SigmaCKK[i], next.Parameters.G1.SigmaCKK[i], &c.Parameters.G2.Sigma[i], &next.Parameters.G2.Sigma[i], 2+byte(i)); err != nil { + return fmt.Errorf("failed to verify contribution to σ[%d]: %w", i, err) + } } + // verify proof of knowledge of contribution to δ + // and the correctness of updates to Parameters.Gi.Delta, PKK[i], and Z[i] if !areInSubGroupG1(next.Parameters.G1.Z) || !areInSubGroupG1(next.Parameters.G1.PKK) { return errors.New("derived values 𝔾₁ subgroup check failed") } - r := linearCombCoeffs(len(next.Parameters.G1.Z)) - - for i := range c.Sigmas { - prevComb, nextComb := linearCombination(c.Parameters.G1.SigmaCKK[i], next.Parameters.G1.SigmaCKK[i], r) - if !sameRatioUnsafe(nextComb, prevComb, next.Parameters.G2.Sigma[i], c.Parameters.G2.Sigma[i]) { - return fmt.Errorf("failed to verify contribution to σ[%d]", i) - } + denom := cloneAppend([]curve.G1Affine{c.Parameters.G1.Delta}, next.Parameters.G1.Z, next.Parameters.G1.PKK) + num := cloneAppend([]curve.G1Affine{next.Parameters.G1.Delta}, c.Parameters.G1.Z, c.Parameters.G1.PKK) + if err := verifyContribution(&c.Delta, denom, num, &c.Parameters.G2.Delta, &next.Parameters.G2.Delta, 1); err != nil { + return fmt.Errorf("failed to verify contribution to δ: %w", err) } - linearCombination() + return nil } func (c *Phase2) Contribute() { @@ -315,7 +325,7 @@ func VerifyPhase2(c0, c1 *Phase2, c ...*Phase2) error { func verifyPhase2(current, contribution *Phase2) error { // Compute R for δ - deltaR := genR(contribution.PublicKey.SG, contribution.PublicKey.SXG, current.Challenge[:], 1) + deltaR := genR(contribution.PublicKey.SG, current.Challenge[:], 1) // Check for knowledge of δ if !sameRatio(contribution.PublicKey.SG, contribution.PublicKey.SXG, contribution.PublicKey.XR, deltaR) { @@ -348,3 +358,15 @@ func (c *Phase2) hash() []byte { c.writeTo(sha) return sha.Sum(nil) } + +func cloneAppend(s ...[]curve.G1Affine) []curve.G1Affine { + l := 0 + for _, s := range s { + l += len(s) + } + res := make([]curve.G1Affine, 0, l) + for _, s := range s { + res = append(res, s...) + } + return res +} diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index 49057f637a..038482a366 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -41,7 +41,7 @@ func newPublicKey(x fr.Element, challenge []byte, dst byte) PublicKey { pk.SXG.ScalarMultiplication(&pk.SG, &xBi) // generate R based on sG1, sxG1, challenge, and domain separation tag (tau, alpha or beta) - R := genR(pk.SG, pk.SXG, challenge, dst) + R := genR(pk.SG, challenge, dst) // compute x*spG2 pk.XR.ScalarMultiplication(&R, &xBi) @@ -74,7 +74,7 @@ func powersI(a *big.Int, n int) []fr.Element { return powers(&aMont, n) } -// Returns [1, a, a², ..., aⁿ⁻¹ ] +// Returns [1, a, a², ..., aᴺ⁻¹ ] func powers(a *fr.Element, n int) []fr.Element { result := make([]fr.Element, n) @@ -144,20 +144,19 @@ func sameRatioUnsafe(n1, d1 curve.G1Affine, n2, d2 curve.G2Affine) bool { return res } -// returns a = ∑ rᵢAᵢ, b = ∑ rᵢBᵢ -func linearCombination(A, B []curve.G1Affine, r []fr.Element) (a, b curve.G1Affine) { +// returns ∑ rᵢAᵢ +func linearCombination(A []curve.G1Affine, r []fr.Element) curve.G1Affine { nc := runtime.NumCPU() - if _, err := a.MultiExp(A, r[:len(A)], ecc.MultiExpConfig{NbTasks: nc}); err != nil { + var res curve.G1Affine + if _, err := res.MultiExp(A, r[:len(A)], ecc.MultiExpConfig{NbTasks: nc}); err != nil { panic(err) } - if _, err := b.MultiExp(B, r[:len(B)], ecc.MultiExpConfig{NbTasks: nc}); err != nil { - panic(err) - } - return + return res } // linearCombinationsG1 assumes, and does not check, that rPowers[i+1] = rPowers[1].rPowers[i] for all applicable i // Also assumed that 3 ≤ N ≔ len(A) ≤ len(rPowers) +// the results are truncated = ∑_{i=0}^{N-2} rⁱAᵢ, shifted = ∑_{i=1}^{N-1} rⁱAᵢ func linearCombinationsG1(A []curve.G1Affine, rPowers []fr.Element) (truncated, shifted curve.G1Affine) { // the common section, 1 to N-2 var common curve.G1Affine @@ -176,6 +175,7 @@ func linearCombinationsG1(A []curve.G1Affine, rPowers []fr.Element) (truncated, // linearCombinationsG2 assumes, and does not check, that rPowers[i+1] = rPowers[1].rPowers[i] for all applicable i // Also assumed that 3 ≤ N ≔ len(A) ≤ len(rPowers) +// the results are truncated = ∑_{i=0}^{N-2} rⁱAᵢ, shifted = ∑_{i=1}^{N-1} rⁱAᵢ func linearCombinationsG2(A []curve.G2Affine, rPowers []fr.Element) (truncated, shifted curve.G2Affine) { // the common section, 1 to N-2 var common curve.G2Affine @@ -195,11 +195,10 @@ func linearCombinationsG2(A []curve.G2Affine, rPowers []fr.Element) (truncated, // Generate R∈𝔾₂ as Hash(gˢ, gˢˣ, challenge, dst) // it is to be used as a challenge for generating a proof of knowledge to x // π ≔ x.r; e([1]₁, π) =﹖ e([x]₁, r) -func genR(sG1, sxG1 curve.G1Affine, challenge []byte, dst byte) curve.G2Affine { +func genR(sG1 curve.G1Affine, challenge []byte, dst byte) curve.G2Affine { var buf bytes.Buffer buf.Grow(len(challenge) + curve.SizeOfG1AffineUncompressed*2) buf.Write(sG1.Marshal()) - buf.Write(sxG1.Marshal()) buf.Write(challenge) spG2, err := curve.HashToG2(buf.Bytes(), []byte{dst}) if err != nil { @@ -227,7 +226,6 @@ func (p *pair) validUpdate() bool { type valueUpdate struct { contributionCommitment curve.G1Affine // x or [Xⱼ]₁ contributionPok curve.G2Affine // π ≔ x.r ∈ 𝔾₂ - //updatedCommitment pair // [X₁..Xⱼ] } // updateValue produces values associated with contribution to an existing value. @@ -245,7 +243,7 @@ func updateValue(value *curve.G1Affine, challenge []byte, dst byte) (proof value value.ScalarMultiplication(value, &contributionValueI) // proof of knowledge to commitment. Algorithm 3 from section 3.7 - pokBase := genR(proof.contributionCommitment, *value, challenge, dst) // r + pokBase := genR(proof.contributionCommitment, challenge, dst) // r proof.contributionPok.ScalarMultiplication(&pokBase, &contributionValueI) return @@ -255,8 +253,7 @@ func updateValue(value *curve.G1Affine, challenge []byte, dst byte) (proof value // it checks the proof of knowledge of the contribution, and the fact that the product of the contribution // and previous commitment makes the new commitment. // prevCommitment is assumed to be valid. No subgroup check and the like. -// challengePoint is normally equal to [denom] -func (x *valueUpdate) verify(denom, num pair, challengePoint curve.G1Affine, challenge []byte, dst byte) error { +func (x *valueUpdate) verify(denom, num pair, challenge []byte, dst byte) error { noG2 := denom.g2 == nil if noG2 != (num.g2 == nil) { return errors.New("erasing or creating g2 values") @@ -267,7 +264,7 @@ func (x *valueUpdate) verify(denom, num pair, challengePoint curve.G1Affine, cha } // verify commitment proof of knowledge. CheckPOK, algorithm 4 from section 3.7 - r := genR(x.contributionCommitment, challengePoint, challenge, dst) // verification challenge in the form of a g2 base + r := genR(x.contributionCommitment, challenge, dst) // verification challenge in the form of a g2 base _, _, g1, _ := curve.Generators() if !sameRatioUnsafe(x.contributionCommitment, g1, x.contributionPok, r) { // π =? x.r i.e. x/g1 =? π/r return errors.New("contribution proof of knowledge verification failed") From b37e663fbd70aa5ff96aadb77805f7fbb941e6cc Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Thu, 12 Dec 2024 17:18:18 -0600 Subject: [PATCH 16/25] docs lemma --- backend/groth16/bn254/mpcsetup/marshal.go | 77 ++++++----- backend/groth16/bn254/mpcsetup/phase1.go | 154 ++++++++++++++++------ 2 files changed, 154 insertions(+), 77 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/marshal.go b/backend/groth16/bn254/mpcsetup/marshal.go index 964b45a371..3daf9fa35c 100644 --- a/backend/groth16/bn254/mpcsetup/marshal.go +++ b/backend/groth16/bn254/mpcsetup/marshal.go @@ -17,51 +17,42 @@ func appendRefs[T any](s []interface{}, v []T) []interface{} { return s } -// proofRefsSlice produces a slice consisting of references to all proof sub-elements -// prepended by the size parameter, to be used in WriteTo and ReadFrom functions -func (p *Phase1) proofRefsSlice() []interface{} { - return []interface{}{ - &p.proofs.Tau.contributionCommitment, - &p.proofs.Tau.contributionPok, - &p.proofs.Alpha.contributionCommitment, - &p.proofs.Alpha.contributionPok, - &p.proofs.Beta.contributionCommitment, - &p.proofs.Beta.contributionPok, - } -} - // WriteTo implements io.WriterTo // It does not write the Challenge from the previous contribution func (p *Phase1) WriteTo(writer io.Writer) (n int64, err error) { - - if n, err = p.parameters.WriteTo(writer); err != nil { - return - } - - enc := curve.NewEncoder(writer) - for _, v := range p.proofRefsSlice() { - if err = enc.Encode(v); err != nil { - return n + enc.BytesWritten(), err + var dn int64 + for _, v := range []io.WriterTo{ + &p.proofs.Tau, + &p.proofs.Alpha, + &p.proofs.Beta, + &p.parameters, + } { + dn, err = v.WriteTo(writer) + n += dn + if err != nil { + return } } - return n + enc.BytesWritten(), nil + return } // ReadFrom implements io.ReaderFrom // It does not read the Challenge from the previous contribution func (p *Phase1) ReadFrom(reader io.Reader) (n int64, err error) { - - if n, err = p.parameters.ReadFrom(reader); err != nil { - return - } - - dec := curve.NewDecoder(reader) - for _, v := range p.proofRefsSlice() { // we've already decoded N - if err = dec.Decode(v); err != nil { - return n + dec.BytesRead(), err + var dn int64 + for _, v := range []io.ReaderFrom{ + &p.proofs.Tau, + &p.proofs.Alpha, + &p.proofs.Beta, + &p.parameters, + } { + dn, err = v.ReadFrom(reader) + n += dn + if err != nil { + return } } - return n + dec.BytesRead(), nil + return } // WriteTo implements io.WriterTo @@ -162,7 +153,7 @@ func (c *SrsCommons) refsSlice() []interface{} { N := len(c.G2.Tau) estimatedNbElems := 5*N - 1 // size N 1 - // 𝔾₂ representation for β 1 + // [β]₂ 1 // [τⁱ]₁ for 1 ≤ i ≤ 2N-2 2N-2 // [τⁱ]₂ for 1 ≤ i ≤ N-1 N-1 // [ατⁱ]₁ for 0 ≤ i ≤ N-1 N @@ -209,3 +200,21 @@ func (c *SrsCommons) ReadFrom(reader io.Reader) (n int64, err error) { } return dec.BytesRead(), nil } + +func (x *valueUpdate) WriteTo(writer io.Writer) (n int64, err error) { + enc := curve.NewEncoder(writer) + if err = enc.Encode(&x.contributionCommitment); err != nil { + return enc.BytesWritten(), err + } + err = enc.Encode(&x.contributionPok) + return enc.BytesWritten(), err +} + +func (x *valueUpdate) ReadFrom(reader io.Reader) (n int64, err error) { + dec := curve.NewDecoder(reader) + if err = dec.Decode(&x.contributionCommitment); err != nil { + return dec.BytesRead(), err + } + err = dec.Decode(&x.contributionPok) + return dec.BytesRead(), err +} diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index 717d60069e..1e1c527e4c 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -10,14 +10,27 @@ import ( "crypto/sha256" "errors" "fmt" - "github.com/consensys/gnark-crypto/ecc" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" "math/big" - "runtime" ) -// Phase1 represents the Phase1 of the MPC described in +// SrsCommons are the circuit-independent components of the Groth16 SRS, +// computed by the first phase. +// in all that follows, N is the domain size +type SrsCommons struct { + G1 struct { + Tau []curve.G1Affine // {[τ⁰]₁, [τ¹]₁, [τ²]₁, …, [τ²ᴺ⁻²]₁} + AlphaTau []curve.G1Affine // {α[τ⁰]₁, α[τ¹]₁, α[τ²]₁, …, α[τᴺ⁻¹]₁} + BetaTau []curve.G1Affine // {β[τ⁰]₁, β[τ¹]₁, β[τ²]₁, …, β[τᴺ⁻¹]₁} + } + G2 struct { + Tau []curve.G2Affine // {[τ⁰]₂, [τ¹]₂, [τ²]₂, …, [τᴺ⁻¹]₂} + Beta curve.G2Affine // [β]₂ + } +} + +// Phase1 in line with Phase1 of the MPC described in // https://eprint.iacr.org/2017/1050.pdf // // Also known as "Powers of Tau" @@ -45,21 +58,6 @@ func (p *Phase1) Contribute() { p.parameters.update(&tauContrib, &alphaContrib, &betaContrib, true) } -// SrsCommons are the circuit-independent components of the Groth16 SRS, -// computed by the first phase. -// in all that follows, N is the domain size -type SrsCommons struct { - G1 struct { - Tau []curve.G1Affine // {[τ⁰]₁, [τ¹]₁, [τ²]₁, …, [τ²ᴺ⁻²]₁} - AlphaTau []curve.G1Affine // {α[τ⁰]₁, α[τ¹]₁, α[τ²]₁, …, α[τᴺ⁻¹]₁} - BetaTau []curve.G1Affine // {β[τ⁰]₁, β[τ¹]₁, β[τ²]₁, …, β[τᴺ⁻¹]₁} - } - G2 struct { - Tau []curve.G2Affine // {[τ⁰]₂, [τ¹]₂, [τ²]₂, …, [τᴺ⁻¹]₂} - Beta curve.G2Affine // [β]₂ - } -} - // setZero instantiates the parameters, and sets all contributions to zero func (c *SrsCommons) setZero(N uint64) { c.G1.Tau = make([]curve.G1Affine, 2*N-2) @@ -122,7 +120,7 @@ func (c *SrsCommons) update(tauUpdate, alphaUpdate, betaUpdate *fr.Element, prin // that it produces the same values. // The inner workings of the random beacon are out of scope. // WARNING: Seal modifies p, just as Contribute does. -// The result will be an INVALID Phase1 object. +// The result will be an INVALID Phase1 object, since no proof of correctness is produced. func (p *Phase1) Seal(beaconChallenge []byte) SrsCommons { var ( bb bytes.Buffer @@ -158,37 +156,31 @@ func VerifyPhase1(c0, c1 *Phase1, c ...*Phase1) error { // Verify assumes previous is correct func (p *Phase1) Verify(next *Phase1) error { - if prevHash := p.hash(); !bytes.Equal(next.Challenge, p.hash()) { // if chain-verifying contributions, challenge fields are optional as they can be computed as we go - if len(next.Challenge) != 0 { - return errors.New("the challenge does not match the previous phase's hash") - } - next.Challenge = prevHash + challenge := p.hash() + if len(next.Challenge) != 0 && !bytes.Equal(next.Challenge, challenge) { + return errors.New("the challenge does not match the previous phase's hash") } + next.Challenge = challenge - // TODO compare sizes - - r := linearCombCoeffs(len(next.parameters.G1.Tau) - 1) // the longest of all lengths - // will be reusing the coefficients TODO @Tabaie make sure that's okay - + // the internal consistency of the vector sizes in next is assumed + // so is its well-formedness i.e. Tau[0] = 1 + // it remains to check it is consistent with p N := len(next.parameters.G2.Tau) - var taus, alphaTaus, betaTaus curve.G1Affine - if _, err := taus.MultiExp(next.parameters.G1.Tau[1:N], r, ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}); err != nil { // τ¹ + r.τ² + … + rᴺ⁻².τⁿ⁻¹ - return err - } - if _, err := alphaTaus.MultiExp(next.parameters.G1.AlphaTau[1:], r, ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}); err != nil { // ατ¹ + r.ατ² + … + rᴺ⁻².ατⁿ⁻¹ - return err - } - if _, err := betaTaus.MultiExp(next.parameters.G1.BetaTau[1:], r, ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}); err != nil { // βτ¹ + r.βτ² + … + rᴺ⁻².βτⁿ⁻¹ - return err + if N != len(p.parameters.G2.Tau) { + return errors.New("domain size mismatch") } - if err := next.proofs.Tau.verify(pair{p.parameters.G1.Tau[1], &p.parameters.G2.Tau[1]}, pair{next.parameters.G1.Tau[1], &next.parameters.G2.Tau[1]}, next.Challenge, 1); err != nil { + r := linearCombCoeffs(len(next.parameters.G1.Tau) + len(next.parameters.G1.AlphaTau) + len(next.parameters.G1.BetaTau) - 1) // the longest of all lengths + // will be reusing the coefficients + + // verify updates to τ, α, β + if err := next.proofs.Tau.verify(pair{p.parameters.G1.Tau[1], nil}, pair{next.parameters.G1.Tau[1], nil}, challenge, 1); err != nil { return fmt.Errorf("failed to verify contribution to τ: %w", err) } - if err := next.proofs.Alpha.verify(pair{taus, nil}, pair{alphaTaus, nil}, next.Challenge, 2); err != nil { + if err := next.proofs.Alpha.verify(pair{p.parameters.G1.AlphaTau[0], nil}, pair{p.parameters.G1.AlphaTau[0], nil}, challenge, 2); err != nil { return fmt.Errorf("failed to verify contribution to α: %w", err) } - if err := next.proofs.Beta.verify(pair{p.parameters.G1.BetaTau[0], &p.parameters.G2.Beta}, pair{next.parameters.G1.BetaTau[0], &next.parameters.G2.Beta}, next.Challenge, 3); err != nil { + if err := next.proofs.Beta.verify(pair{p.parameters.G1.BetaTau[0], &p.parameters.G2.Beta}, pair{next.parameters.G1.BetaTau[0], &next.parameters.G2.Beta}, challenge, 3); err != nil { return fmt.Errorf("failed to verify contribution to β: %w", err) } @@ -201,6 +193,77 @@ func (p *Phase1) Verify(next *Phase1) error { _, _, g1, g2 := curve.Generators() + // lemma: let R be an integral domain and + // F = ∑ fᵢⱼ XⁱYʲ F' = ∑ f'ᵢⱼ XⁱYʲ + // G = ∑ gᵢⱼ ZⁱTʲ G' = ∑ g'ᵢⱼ ZⁱTʲ + // polynomials in R[X,Y,Z,T]. + // if F/F' = G/G' + // then F/F' = G/G' ∈ FracR + // + // view our polynomials in FracR[X,Y,Z,T] + // By multiplying out the polynomials we get + // FG' = F'G ⇒ ∑ fᵢⱼg'ₖₗ XᶦYʲZᵏTˡ = ∑ f'ᵢⱼgₖₗ XᶦYʲZᵏTˡ + // pick i0 ,j0 , k0, l0 where f'ᵢ₀ⱼ₀, g'ₖ₀ₗ₀ ≠ 0 + // let x ≔ fᵢ₀ⱼ₀/f'ᵢ₀ⱼ₀ = gₖ₀ₗ₀/g'ₖ₀ₗ₀ + // now for any i,j: fᵢⱼg'ₖ₀ₗ₀ = f'ᵢⱼgₖ₀ₗ₀ ⇒ + // fᵢⱼ = x f'ᵢⱼ + // likewise for any i,j: fᵢ₀ⱼ₀g'ᵢⱼ = f'ᵢ₀ⱼ₀gᵢⱼ ⇒ + // gᵢⱼ = x g'ᵢⱼ + + // now we use this to check that: + // 1. aᵢ ≔ G1.Tau[i] = [τⁱ]₁ + // 2. bᵢ ≔ G2.Tau[i] = [τⁱ]₂ + // 3. cᵢ ≔ G1.AlphaTau[i] = [ατⁱ]₁ + // 4. dᵢ ≔ G1.BetaTau[i] = [βτⁱ]₁ + + // + // we already know that a₀ = 1, a₁ = τ, + // c₀ = α, d₀ = β, b₀ = 1, + // construct the polynomials + // F ≔ a₀ + a₁X + ... + a₂ₙ₋₃X²ᴺ⁻³ + c₀Y + c₁XY + ... + cₙ₋₂Xᴺ⁻²Y + d₀Y² + d₁XY² + ... + dₙ₋₂Xᴺ⁻²Y² + // F' ≔ a₁ + a₂X + ... + a₂ₙ₋₂X²ᴺ⁻³ + c₁Y + c₂XY + ... + cₙ₋₁Xᴺ⁻²Y + d₁Y² + d₂XY² + ... + dₙ₋₁Xᴺ⁻²Y² + // G ≔ b + + // we want to establish G1.AlphaTau[i] = [ατⁱ]₁, + // already known for i = 0 from the contribution checks + // let [cᵢ]₁ = G1.AlphaTau[i] + // let C1 ≔ c₀ + rc₁ + ... + rᴺ⁻²cₙ₋₂ + // C2 ≔ c₁ + rc₂ + ... + rᴺ⁻²cₙ₋₁ + // then if indeed cᵢ = ατⁱ, we get + // C1/C2 = 1/τ + // conversely, from C1/C2 = 1/τ we get + // c₁ + rc₂ + ... + rᴺ⁻²cₙ₋₁ = τc₀ + rτc₁ + ... + rᴺ⁻²τcₙ₋₂ + // which by the Schwartz-Zippel lemma and a simple induction + // implies the desired result with overwhelming probability. + + // The same argument works for G1.BetaTau[i] + + // we also want to establish Gⱼ.Tau[i] = [τⁱ]ⱼ + // let [aᵢ]₁ = G1.Tau[i] and [bᵢ]₂ = G2.Tau[i] + // let A1 ≔ a₀ + ra₁ + ... + r²ᴺ⁻³a₂ₙ₋₃ + // A2 ≔ a₁ + ra₂ + ... + r²ᴺ⁻³a₂ₙ₋₂ + // B1 ≔ b₀ + sb₁ + ... + sᴺ⁻²bₙ₋₂ + // B2 ≔ b₁ + sb₂ + ... + sᴺ⁻²bₙ₋₁ + // for random r,s + // if the values are correct clearly we get A1/A2 = B1/B2 + // + // if A1/A2 = B1/B2, by the bivariate Schwartz-Zippel we get + // (a₀ + a₁X + ... + a₂ₙ₋₃X²ᴺ⁻³)(b₁ + b₂Y + ... + bₙ₋₁Yᴺ⁻²) = + // (a₁ + a₂X + ... + a₂ₙ₋₂X²ᴺ⁻³)(b₀ + b₁Y + ... + bₙ₋₂Yᴺ⁻²) + // furthermore by previous checks we already know that + // a₀=1, a₁= τ + // Assume by induction that for all i < m ≤ N-1: bᵢ = τⁱ + // Then modulo (X, Yᵐ) we get + // τ + τ²Y + ... + τᵐ⁻¹Yᵐ⁻² + bₘYᵐ⁻¹ = + // τ (1 + τ²Y + ... + τᵐ⁻¹Yᵐ⁻¹) + // which gives bₘ = τᵐ + // We then get A1/A2 = 1/τ which by the previous lemma gives + // aᵢ = τⁱ + + // now to combine all the above + + // verify monomials + // for 1 ≤ i ≤ 2N-3 we want to check τⁱ⁺¹/τⁱ = τ // i.e. e(τⁱ⁺¹,[1]₂) = e(τⁱ,[τ]₂). Due to bi-linearity we can instead check // e(∑rⁱ⁻¹τⁱ⁺¹,[1]₂) = e(∑rⁱ⁻¹τⁱ,[τ]₂), which is tantamount to the check @@ -208,8 +271,6 @@ func (p *Phase1) Verify(next *Phase1) error { tauT1, tauS1 := linearCombinationsG1(next.parameters.G1.Tau[1:], r) tauT2, tauS2 := linearCombinationsG2(next.parameters.G2.Tau[1:], r) - alphaTT, alphaTS := linearCombinationsG1(next.parameters.G1.AlphaTau, r) - betaTT, betaTS := linearCombinationsG1(next.parameters.G1.BetaTau, r) if !sameRatioUnsafe(tauS1, tauT1, next.parameters.G2.Tau[1], g2) { return errors.New("couldn't verify 𝔾₁ representations of the τⁱ") @@ -219,6 +280,9 @@ func (p *Phase1) Verify(next *Phase1) error { return errors.New("couldn't verify 𝔾₂ representations of the τⁱ") } + alphaTT, alphaTS := linearCombinationsG1(next.parameters.G1.AlphaTau, r) + betaTT, betaTS := linearCombinationsG1(next.parameters.G1.BetaTau, r) + // for 0 ≤ i < N we want to check the ατⁱ // By well-formedness checked by ReadFrom, we assume that ατ⁰ = α // For 0 < i < N we check that ατⁱ/ατⁱ⁻¹ = τ, since we have a representation of τ in 𝔾₂ @@ -235,6 +299,10 @@ func (p *Phase1) Verify(next *Phase1) error { // TODO @Tabaie combine all pairing checks except the second one + taus := linearCombination(next.parameters.G1.Tau[:N], r) // 1 + r.τ¹ + r.τ² + … + rᴺ⁻¹.τᴺ⁻¹ + alphaTaus := linearCombination(next.parameters.G1.AlphaTau, r) // α + r.ατ¹ + r.ατ² + … + rᴺ⁻¹.ατᴺ⁻¹ + betaTaus := linearCombination(next.parameters.G1.BetaTau, r) // β + r.τ¹ + r.βτ² + … + rᴺ⁻¹.βτᴺ⁻¹ + return nil } From 50cd7e910caf2dbb6c0fe826c81b345fb8f69506 Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Thu, 12 Dec 2024 17:47:57 -0600 Subject: [PATCH 17/25] docs new ratio check method --- backend/groth16/bn254/mpcsetup/phase1.go | 79 +++++-------------- backend/groth16/bn254/mpcsetup/utils.go | 98 +++++++++++++----------- 2 files changed, 73 insertions(+), 104 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index 1e1c527e4c..faf6b21dd0 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -195,20 +195,20 @@ func (p *Phase1) Verify(next *Phase1) error { // lemma: let R be an integral domain and // F = ∑ fᵢⱼ XⁱYʲ F' = ∑ f'ᵢⱼ XⁱYʲ - // G = ∑ gᵢⱼ ZⁱTʲ G' = ∑ g'ᵢⱼ ZⁱTʲ - // polynomials in R[X,Y,Z,T]. + // G = ∑ gᵢ Zⁱ G' = ∑ g'ᵢ Zⁱ + // polynomials in R[X,Y,Z]. // if F/F' = G/G' // then F/F' = G/G' ∈ FracR // - // view our polynomials in FracR[X,Y,Z,T] + // view our polynomials in FracR[X,Y,Z] // By multiplying out the polynomials we get - // FG' = F'G ⇒ ∑ fᵢⱼg'ₖₗ XᶦYʲZᵏTˡ = ∑ f'ᵢⱼgₖₗ XᶦYʲZᵏTˡ - // pick i0 ,j0 , k0, l0 where f'ᵢ₀ⱼ₀, g'ₖ₀ₗ₀ ≠ 0 - // let x ≔ fᵢ₀ⱼ₀/f'ᵢ₀ⱼ₀ = gₖ₀ₗ₀/g'ₖ₀ₗ₀ - // now for any i,j: fᵢⱼg'ₖ₀ₗ₀ = f'ᵢⱼgₖ₀ₗ₀ ⇒ + // FG' = F'G ⇒ ∑ fᵢⱼg'ₖ XᶦYʲZᵏ = ∑ f'ᵢⱼgₖₗ XᶦYʲZᵏ + // pick i0 ,j0 , k0 where f'ᵢ₀ⱼ₀, g'ₖ₀ ≠ 0 + // let x ≔ fᵢ₀ⱼ₀/f'ᵢ₀ⱼ₀ = gₖ₀/g'ₖ₀ + // now for any i,j: fᵢⱼg'ₖ₀ = f'ᵢⱼgₖ₀ ⇒ // fᵢⱼ = x f'ᵢⱼ - // likewise for any i,j: fᵢ₀ⱼ₀g'ᵢⱼ = f'ᵢ₀ⱼ₀gᵢⱼ ⇒ - // gᵢⱼ = x g'ᵢⱼ + // likewise for any i: fᵢ₀ⱼ₀g'ᵢ = f'ᵢ₀ⱼ₀gᵢ ⇒ + // gᵢ = x g'ᵢ // now we use this to check that: // 1. aᵢ ≔ G1.Tau[i] = [τⁱ]₁ @@ -216,58 +216,21 @@ func (p *Phase1) Verify(next *Phase1) error { // 3. cᵢ ≔ G1.AlphaTau[i] = [ατⁱ]₁ // 4. dᵢ ≔ G1.BetaTau[i] = [βτⁱ]₁ - // - // we already know that a₀ = 1, a₁ = τ, - // c₀ = α, d₀ = β, b₀ = 1, // construct the polynomials // F ≔ a₀ + a₁X + ... + a₂ₙ₋₃X²ᴺ⁻³ + c₀Y + c₁XY + ... + cₙ₋₂Xᴺ⁻²Y + d₀Y² + d₁XY² + ... + dₙ₋₂Xᴺ⁻²Y² // F' ≔ a₁ + a₂X + ... + a₂ₙ₋₂X²ᴺ⁻³ + c₁Y + c₂XY + ... + cₙ₋₁Xᴺ⁻²Y + d₁Y² + d₂XY² + ... + dₙ₋₁Xᴺ⁻²Y² - // G ≔ b - - // we want to establish G1.AlphaTau[i] = [ατⁱ]₁, - // already known for i = 0 from the contribution checks - // let [cᵢ]₁ = G1.AlphaTau[i] - // let C1 ≔ c₀ + rc₁ + ... + rᴺ⁻²cₙ₋₂ - // C2 ≔ c₁ + rc₂ + ... + rᴺ⁻²cₙ₋₁ - // then if indeed cᵢ = ατⁱ, we get - // C1/C2 = 1/τ - // conversely, from C1/C2 = 1/τ we get - // c₁ + rc₂ + ... + rᴺ⁻²cₙ₋₁ = τc₀ + rτc₁ + ... + rᴺ⁻²τcₙ₋₂ - // which by the Schwartz-Zippel lemma and a simple induction - // implies the desired result with overwhelming probability. - - // The same argument works for G1.BetaTau[i] - - // we also want to establish Gⱼ.Tau[i] = [τⁱ]ⱼ - // let [aᵢ]₁ = G1.Tau[i] and [bᵢ]₂ = G2.Tau[i] - // let A1 ≔ a₀ + ra₁ + ... + r²ᴺ⁻³a₂ₙ₋₃ - // A2 ≔ a₁ + ra₂ + ... + r²ᴺ⁻³a₂ₙ₋₂ - // B1 ≔ b₀ + sb₁ + ... + sᴺ⁻²bₙ₋₂ - // B2 ≔ b₁ + sb₂ + ... + sᴺ⁻²bₙ₋₁ - // for random r,s - // if the values are correct clearly we get A1/A2 = B1/B2 - // - // if A1/A2 = B1/B2, by the bivariate Schwartz-Zippel we get - // (a₀ + a₁X + ... + a₂ₙ₋₃X²ᴺ⁻³)(b₁ + b₂Y + ... + bₙ₋₁Yᴺ⁻²) = - // (a₁ + a₂X + ... + a₂ₙ₋₂X²ᴺ⁻³)(b₀ + b₁Y + ... + bₙ₋₂Yᴺ⁻²) - // furthermore by previous checks we already know that - // a₀=1, a₁= τ - // Assume by induction that for all i < m ≤ N-1: bᵢ = τⁱ - // Then modulo (X, Yᵐ) we get - // τ + τ²Y + ... + τᵐ⁻¹Yᵐ⁻² + bₘYᵐ⁻¹ = - // τ (1 + τ²Y + ... + τᵐ⁻¹Yᵐ⁻¹) - // which gives bₘ = τᵐ - // We then get A1/A2 = 1/τ which by the previous lemma gives - // aᵢ = τⁱ - - // now to combine all the above - - // verify monomials - - // for 1 ≤ i ≤ 2N-3 we want to check τⁱ⁺¹/τⁱ = τ - // i.e. e(τⁱ⁺¹,[1]₂) = e(τⁱ,[τ]₂). Due to bi-linearity we can instead check - // e(∑rⁱ⁻¹τⁱ⁺¹,[1]₂) = e(∑rⁱ⁻¹τⁱ,[τ]₂), which is tantamount to the check - // ∑rⁱ⁻¹τⁱ⁺¹ / ∑rⁱ⁻¹τⁱ = τ + // G ≔ b₀ + b₁Z + ... + bₙ₋₂Zᴺ⁻² + // G' ≔ b₁ + b₂Z + ... + bₙ₋₁Zᴺ⁻² + + // if F/F' = G/G' we get F/F' = G/G' = a₀/a₁ = 1/τ, which yields: + // for 0 ≤ i ≤ N-2: bᵢ = bᵢ₊₁/τ, cᵢ = cᵢ₊₁/τ, dᵢ = dᵢ₊₁/τ + // for 0 ≤ i ≤ 2N-3: aᵢ = aᵢ₊₁/τ + + // from previous checks we already know: + // 1. a₀ = 1 + // 2. b₀ = 1 + // 3. c₀ = α + // 4. d₀ = β tauT1, tauS1 := linearCombinationsG1(next.parameters.G1.Tau[1:], r) tauT2, tauS2 := linearCombinationsG2(next.parameters.G2.Tau[1:], r) diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index 038482a366..ac7a20f884 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -25,29 +25,6 @@ type PublicKey struct { XR curve.G2Affine // XR = X.R ∈ 𝔾₂ proof of knowledge } -func newPublicKey(x fr.Element, challenge []byte, dst byte) PublicKey { - var pk PublicKey - _, _, g1, _ := curve.Generators() - - var s fr.Element - var sBi big.Int - s.SetRandom() - s.BigInt(&sBi) - pk.SG.ScalarMultiplication(&g1, &sBi) - - // compute x*sG1 - var xBi big.Int - x.BigInt(&xBi) - pk.SXG.ScalarMultiplication(&pk.SG, &xBi) - - // generate R based on sG1, sxG1, challenge, and domain separation tag (tau, alpha or beta) - R := genR(pk.SG, challenge, dst) - - // compute x*spG2 - pk.XR.ScalarMultiplication(&R, &xBi) - return pk -} - func bitReverse[T any](a []T) { n := uint64(len(a)) nn := uint64(64 - bits.TrailingZeros64(n)) @@ -61,17 +38,7 @@ func bitReverse[T any](a []T) { } func linearCombCoeffs(n int) []fr.Element { - var a fr.Element - if _, err := a.SetRandom(); err != nil { - panic(err) - } - return powers(&a, n) -} - -func powersI(a *big.Int, n int) []fr.Element { - var aMont fr.Element - aMont.SetBigInt(a) - return powers(&aMont, n) + return bivariateRandomMonomials(n) } // Returns [1, a, a², ..., aᴺ⁻¹ ] @@ -79,7 +46,7 @@ func powers(a *fr.Element, n int) []fr.Element { result := make([]fr.Element, n) if n >= 1 { - result[0] = fr.NewElement(1) + result[0].SetOne() } if n >= 2 { result[1].Set(a) @@ -122,15 +89,6 @@ func scaleG2InPlace(A []curve.G2Affine, a []fr.Element) { }) } -/* -// Check e(a₁, a₂) = e(b₁, b₂) -func sameRatio(a1, b1 curve.G1Affine, a2, b2 curve.G2Affine) bool { - if !a1.IsInSubGroup() || !b1.IsInSubGroup() || !a2.IsInSubGroup() || !b2.IsInSubGroup() { - panic("invalid point not in subgroup") - } - return sameRatioUnsafe(a1, b1, a2, b2) -}*/ - // Check n₁/d₁ = n₂/d₂ i.e. e(n₁, d₂) = e(d₁, n₂). No subgroup checks. func sameRatioUnsafe(n1, d1 curve.G1Affine, n2, d2 curve.G2Affine) bool { var nd1 curve.G1Affine @@ -309,6 +267,54 @@ func areInSubGroupG2(s []curve.G2Affine) bool { return areInSubGroup(toRefs(s)) } -func truncate[T any](s []T) []T { - return s[:len(s)-1] +// bivariateRandomMonomials returns 1, x, ..., xˣᴰ⁰; y, xy, ..., xˣᴰ¹y; ... +// all concatenated in the same slice +func bivariateRandomMonomials(xD ...int) []fr.Element { + if len(xD) == 0 { + return nil + } + totalSize := xD[0] + for i := 1; i < len(xD); i++ { + totalSize += xD[i] + if xD[i] > xD[0] { + panic("implementation detail: first max degree must be the largest") + } + } + + res := make([]fr.Element, totalSize) + if _, err := res[1].SetRandom(); err != nil { + panic(err) + } + setPowers(res[:xD[0]]) + + if len(xD) == 1 { + return res + } + + y := make([]fr.Element, len(xD)) + if _, err := y[1].SetRandom(); err != nil { + panic(err) + } + setPowers(y) + + totalSize = xD[0] + for d := 1; d < len(xD); d++ { + for i := range res[:xD[d]] { + res[totalSize+i].Mul(&res[i], &y[d]) + } + totalSize += xD[d] + } + + return res +} + +// sets x[i] = x[1]ⁱ +func setPowers(x []fr.Element) { + if len(x) == 0 { + return + } + x[0].SetOne() + for i := 2; i < len(x); i++ { + x[i].Mul(&x[i-1], &x[1]) + } } From 527dd22504efb83745b6e545c4c98ab7e3039ec3 Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Thu, 12 Dec 2024 18:54:05 -0600 Subject: [PATCH 18/25] feat newLinearCombinationsG1 --- backend/groth16/bn254/mpcsetup/phase1.go | 13 ++- backend/groth16/bn254/mpcsetup/utils.go | 103 ++++++++++++++++------- 2 files changed, 84 insertions(+), 32 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index faf6b21dd0..590fefccaa 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -232,7 +232,18 @@ func (p *Phase1) Verify(next *Phase1) error { // 3. c₀ = α // 4. d₀ = β - tauT1, tauS1 := linearCombinationsG1(next.parameters.G1.Tau[1:], r) + ends := partialSums(len(next.parameters.G1.Tau), len(next.parameters.G1.AlphaTau), len(next.parameters.G1.BetaTau)) + + coeffs := bivariateRandomMonomials(ends...) + + g1s := make([]curve.G1Affine, 0, ends[len(ends)-1]) + g1s = append(g1s, next.parameters.G1.Tau...) + g1s = append(g1s, next.parameters.G1.AlphaTau...) + g1s = append(g1s, next.parameters.G1.BetaTau...) + + g1Num, g1Denom := linearCombinationsG1(g1s, coeffs, ends) + + tauT1, tauS1 := linearCombinationsG1(next.parameters.G1.Tau[1:], r, ends) tauT2, tauS2 := linearCombinationsG2(next.parameters.G2.Tau[1:], r) if !sameRatioUnsafe(tauS1, tauT1, next.parameters.G2.Tau[1], g2) { diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index ac7a20f884..86428dc260 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -112,21 +112,54 @@ func linearCombination(A []curve.G1Affine, r []fr.Element) curve.G1Affine { return res } -// linearCombinationsG1 assumes, and does not check, that rPowers[i+1] = rPowers[1].rPowers[i] for all applicable i -// Also assumed that 3 ≤ N ≔ len(A) ≤ len(rPowers) -// the results are truncated = ∑_{i=0}^{N-2} rⁱAᵢ, shifted = ∑_{i=1}^{N-1} rⁱAᵢ -func linearCombinationsG1(A []curve.G1Affine, rPowers []fr.Element) (truncated, shifted curve.G1Affine) { - // the common section, 1 to N-2 - var common curve.G1Affine - if _, err := common.MultiExp(A[1:len(A)-1], rPowers[:len(A)-2], ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}); err != nil { // A[1] + r.A[2] + ... + rᴺ⁻³.A[N-2] +// linearCombinationsG1 returns +// +// powers[0].A[0] + powers[1].A[1] + ... + powers[ends[0]-2].A[ends[0]-2] +// + powers[ends[0]].A[ends[0]] + ... + powers[ends[1]-2].A[ends[1]-2] +// .... (truncated) +// +// powers[0].A[1] + powers[1].A[2] + ... + powers[ends[0]-2].A[ends[0]-1] +// + powers[ends[0]].A[ends[0]+1] + ... + powers[ends[1]-2].A[ends[1]-1] +// .... (shifted) +// +// It assumes without checking that powers[i+1] = powers[i]*powers[1] unless i or i+1 is a partial sum of sizes +// the slices powers and A will be modified +func linearCombinationsG1(A []curve.G1Affine, powers []fr.Element, ends []int) (truncated, shifted curve.G1Affine) { + if ends[len(ends)-1] != len(A) || len(A) != len(powers) { + panic("lengths mismatch") + } + + largeCoeffs := make([]fr.Element, len(ends)) + for i := range ends { + largeCoeffs[i].Neg(&powers[ends[i]-1]) + powers[ends[i]-1].SetZero() + } + + msmCfg := ecc.MultiExpConfig{NbTasks: runtime.NumCPU()} + + if _, err := shifted.MultiExp(A, powers, msmCfg); err != nil { panic(err) } - var c big.Int - rPowers[1].BigInt(&c) - truncated.ScalarMultiplication(&common, &c).Add(&truncated, &A[0]) // A[0] + r.A[1] + r².A[2] + ... + rᴺ⁻².A[N-2] - rPowers[len(A)-1].BigInt(&c) - shifted.ScalarMultiplication(&A[len(A)-1], &c).Add(&shifted, &common) + prevEnd := 0 + for i := range ends { + if ends[i] <= prevEnd { + panic("non-increasing ends") + } + + powers[2*i] = powers[prevEnd] + powers[2*i+1] = largeCoeffs[i] + + A[2*i] = A[prevEnd] + A[2*i+1] = A[ends[i]-1] + + prevEnd = ends[i] + } + // TODO @Tabaie O(1) MSM worth it? + if _, err := truncated.MultiExp(A[:2*len(ends)], powers[:2*len(ends)], msmCfg); err != nil { + panic(err) + } + truncated.Add(&truncated, &shifted) return } @@ -267,42 +300,38 @@ func areInSubGroupG2(s []curve.G2Affine) bool { return areInSubGroup(toRefs(s)) } -// bivariateRandomMonomials returns 1, x, ..., xˣᴰ⁰; y, xy, ..., xˣᴰ¹y; ... +// bivariateRandomMonomials returns 1, x, ..., x^{ends[0]-1}; y, xy, ..., x^{ends[1]-ends[0]-1}y; ... // all concatenated in the same slice -func bivariateRandomMonomials(xD ...int) []fr.Element { - if len(xD) == 0 { +func bivariateRandomMonomials(ends ...int) []fr.Element { + if len(ends) == 0 { return nil } - totalSize := xD[0] - for i := 1; i < len(xD); i++ { - totalSize += xD[i] - if xD[i] > xD[0] { - panic("implementation detail: first max degree must be the largest") - } - } - res := make([]fr.Element, totalSize) + res := make([]fr.Element, ends[]) if _, err := res[1].SetRandom(); err != nil { panic(err) } - setPowers(res[:xD[0]]) + setPowers(res[:ends[0]]) - if len(xD) == 1 { + if len(ends) == 1 { return res } - y := make([]fr.Element, len(xD)) + y := make([]fr.Element, len(ends)) if _, err := y[1].SetRandom(); err != nil { panic(err) } setPowers(y) - totalSize = xD[0] - for d := 1; d < len(xD); d++ { - for i := range res[:xD[d]] { - res[totalSize+i].Mul(&res[i], &y[d]) + for d := 1; d < len(ends); d++ { + xdeg := ends[d] - ends[d-1] + if xdeg > ends[0] { + panic("impl detail: first maximum degree for x must be the greatest") + } + + for i := range xdeg { + res[ends[d-1]+i].Mul(&res[i], &y[d]) } - totalSize += xD[d] } return res @@ -318,3 +347,15 @@ func setPowers(x []fr.Element) { x[i].Mul(&x[i-1], &x[1]) } } + +func partialSums(s ...int) []int { + if len(s) == 0 { + return nil + } + sums := make([]int, len(s)) + sums[0] = s[0] + for i := 1; i < len(s); i++ { + sums[i] = sums[i-1] + s[i] + } + return sums +} From 18411e59bdb4c3d497a86d21bbd912a64ebdb752 Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Thu, 12 Dec 2024 19:00:17 -0600 Subject: [PATCH 19/25] clean phase1 ver --- backend/groth16/bn254/mpcsetup/phase1.go | 44 +++--------------------- 1 file changed, 4 insertions(+), 40 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index 590fefccaa..f16bd9755f 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -170,9 +170,6 @@ func (p *Phase1) Verify(next *Phase1) error { return errors.New("domain size mismatch") } - r := linearCombCoeffs(len(next.parameters.G1.Tau) + len(next.parameters.G1.AlphaTau) + len(next.parameters.G1.BetaTau) - 1) // the longest of all lengths - // will be reusing the coefficients - // verify updates to τ, α, β if err := next.proofs.Tau.verify(pair{p.parameters.G1.Tau[1], nil}, pair{next.parameters.G1.Tau[1], nil}, challenge, 1); err != nil { return fmt.Errorf("failed to verify contribution to τ: %w", err) @@ -191,8 +188,6 @@ func (p *Phase1) Verify(next *Phase1) error { return errors.New("derived values 𝔾₂ subgroup check failed") } - _, _, g1, g2 := curve.Generators() - // lemma: let R be an integral domain and // F = ∑ fᵢⱼ XⁱYʲ F' = ∑ f'ᵢⱼ XⁱYʲ // G = ∑ gᵢ Zⁱ G' = ∑ g'ᵢ Zⁱ @@ -234,48 +229,17 @@ func (p *Phase1) Verify(next *Phase1) error { ends := partialSums(len(next.parameters.G1.Tau), len(next.parameters.G1.AlphaTau), len(next.parameters.G1.BetaTau)) - coeffs := bivariateRandomMonomials(ends...) - g1s := make([]curve.G1Affine, 0, ends[len(ends)-1]) g1s = append(g1s, next.parameters.G1.Tau...) g1s = append(g1s, next.parameters.G1.AlphaTau...) g1s = append(g1s, next.parameters.G1.BetaTau...) - g1Num, g1Denom := linearCombinationsG1(g1s, coeffs, ends) - - tauT1, tauS1 := linearCombinationsG1(next.parameters.G1.Tau[1:], r, ends) - tauT2, tauS2 := linearCombinationsG2(next.parameters.G2.Tau[1:], r) - - if !sameRatioUnsafe(tauS1, tauT1, next.parameters.G2.Tau[1], g2) { - return errors.New("couldn't verify 𝔾₁ representations of the τⁱ") - } - - if !sameRatioUnsafe(next.parameters.G1.Tau[1], g1, tauS2, tauT2) { - return errors.New("couldn't verify 𝔾₂ representations of the τⁱ") - } - - alphaTT, alphaTS := linearCombinationsG1(next.parameters.G1.AlphaTau, r) - betaTT, betaTS := linearCombinationsG1(next.parameters.G1.BetaTau, r) - - // for 0 ≤ i < N we want to check the ατⁱ - // By well-formedness checked by ReadFrom, we assume that ατ⁰ = α - // For 0 < i < N we check that ατⁱ/ατⁱ⁻¹ = τ, since we have a representation of τ in 𝔾₂ - // with a similar bi-linearity argument as above we can do this with a single pairing check - - // TODO eliminate these by combining with update checking + g1Num, g1Denom := linearCombinationsG1(g1s, bivariateRandomMonomials(ends...), ends) + g2Num, g2Denom := linearCombinationsG2(next.parameters.G2.Tau, linearCombCoeffs(len(next.parameters.G2.Tau))) - if !sameRatioUnsafe(alphaTS, alphaTT, next.parameters.G2.Tau[1], g2) { - return errors.New("couldn't verify the ατⁱ") + if !sameRatioUnsafe(g1Num, g1Denom, g2Num, g2Denom) { + return errors.New("value update check failed") } - if !sameRatioUnsafe(betaTS, betaTT, next.parameters.G2.Tau[1], g2) { - return errors.New("couldn't verify the βτⁱ") - } - - // TODO @Tabaie combine all pairing checks except the second one - - taus := linearCombination(next.parameters.G1.Tau[:N], r) // 1 + r.τ¹ + r.τ² + … + rᴺ⁻¹.τᴺ⁻¹ - alphaTaus := linearCombination(next.parameters.G1.AlphaTau, r) // α + r.ατ¹ + r.ατ² + … + rᴺ⁻¹.ατᴺ⁻¹ - betaTaus := linearCombination(next.parameters.G1.BetaTau, r) // β + r.τ¹ + r.βτ² + … + rᴺ⁻¹.βτᴺ⁻¹ return nil } From 849f9d57a654f5a252964c4a078f498049c1cc9c Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Thu, 12 Dec 2024 19:13:50 -0600 Subject: [PATCH 20/25] docs and fixes --- backend/groth16/bn254/mpcsetup/phase1.go | 11 ++++++----- backend/groth16/bn254/mpcsetup/utils.go | 13 ++++++++++--- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index f16bd9755f..c50a68e909 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -188,17 +188,17 @@ func (p *Phase1) Verify(next *Phase1) error { return errors.New("derived values 𝔾₂ subgroup check failed") } - // lemma: let R be an integral domain and + // lemma: let K be a field and // F = ∑ fᵢⱼ XⁱYʲ F' = ∑ f'ᵢⱼ XⁱYʲ // G = ∑ gᵢ Zⁱ G' = ∑ g'ᵢ Zⁱ - // polynomials in R[X,Y,Z]. + // polynomials in K[X,Y,Z]. // if F/F' = G/G' - // then F/F' = G/G' ∈ FracR + // then F/F' = G/G' ∈ K // - // view our polynomials in FracR[X,Y,Z] + // view our polynomials in K[X,Y,Z] // By multiplying out the polynomials we get // FG' = F'G ⇒ ∑ fᵢⱼg'ₖ XᶦYʲZᵏ = ∑ f'ᵢⱼgₖₗ XᶦYʲZᵏ - // pick i0 ,j0 , k0 where f'ᵢ₀ⱼ₀, g'ₖ₀ ≠ 0 + // pick i₀ ,j₀ , k₀ where f'ᵢ₀ⱼ₀, g'ₖ₀ ≠ 0 // let x ≔ fᵢ₀ⱼ₀/f'ᵢ₀ⱼ₀ = gₖ₀/g'ₖ₀ // now for any i,j: fᵢⱼg'ₖ₀ = f'ᵢⱼgₖ₀ ⇒ // fᵢⱼ = x f'ᵢⱼ @@ -226,6 +226,7 @@ func (p *Phase1) Verify(next *Phase1) error { // 2. b₀ = 1 // 3. c₀ = α // 4. d₀ = β + // and so the desired results follow ends := partialSums(len(next.parameters.G1.Tau), len(next.parameters.G1.AlphaTau), len(next.parameters.G1.BetaTau)) diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index 86428dc260..14cd4d5ca9 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -122,7 +122,7 @@ func linearCombination(A []curve.G1Affine, r []fr.Element) curve.G1Affine { // + powers[ends[0]].A[ends[0]+1] + ... + powers[ends[1]-2].A[ends[1]-1] // .... (shifted) // -// It assumes without checking that powers[i+1] = powers[i]*powers[1] unless i or i+1 is a partial sum of sizes +// It is assumed without checking that powers[i+1] = powers[i]*powers[1] unless i+1 is a partial sum of sizes // the slices powers and A will be modified func linearCombinationsG1(A []curve.G1Affine, powers []fr.Element, ends []int) (truncated, shifted curve.G1Affine) { if ends[len(ends)-1] != len(A) || len(A) != len(powers) { @@ -141,6 +141,11 @@ func linearCombinationsG1(A []curve.G1Affine, powers []fr.Element, ends []int) ( panic(err) } + // compute truncated as + // r.shifted + // + powers[0].A[0] + powers[ends[0].A[ends[0]] + ... + // - powers[ends[0]-1].A[ends[0]-1] - powers[ends[1]-1].A[ends[1]-1] - ... + r := powers[1] prevEnd := 0 for i := range ends { if ends[i] <= prevEnd { @@ -155,11 +160,13 @@ func linearCombinationsG1(A []curve.G1Affine, powers []fr.Element, ends []int) ( prevEnd = ends[i] } + powers[len(ends)*2] = r + A[len(ends)*2] = shifted + // TODO @Tabaie O(1) MSM worth it? - if _, err := truncated.MultiExp(A[:2*len(ends)], powers[:2*len(ends)], msmCfg); err != nil { + if _, err := truncated.MultiExp(A[:2*len(ends)+1], powers[:2*len(ends)+1], msmCfg); err != nil { panic(err) } - truncated.Add(&truncated, &shifted) return } From 34b3967bae50f6e9d37b1ac4032eb6182234071b Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Thu, 12 Dec 2024 19:36:51 -0600 Subject: [PATCH 21/25] feat key extraction - commitments --- backend/groth16/bn254/mpcsetup/phase2.go | 90 ++++++++---------------- backend/groth16/bn254/mpcsetup/setup.go | 28 ++++++-- 2 files changed, 49 insertions(+), 69 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/phase2.go b/backend/groth16/bn254/mpcsetup/phase2.go index 6f1758b5f9..2927acbdb8 100644 --- a/backend/groth16/bn254/mpcsetup/phase2.go +++ b/backend/groth16/bn254/mpcsetup/phase2.go @@ -11,13 +11,13 @@ import ( "errors" "fmt" "github.com/consensys/gnark/backend/groth16/internal" + cs "github.com/consensys/gnark/constraint/bn254" "math/big" "slices" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" "github.com/consensys/gnark/constraint" - cs "github.com/consensys/gnark/constraint/bn254" ) type Phase2Evaluations struct { // TODO @Tabaie rename @@ -155,20 +155,13 @@ func (c *Phase2) Contribute() { // Init is to be run by the coordinator // It involves no coin tosses. A verifier should // simply rerun all the steps -func (p *Phase2) Init(commons SrsCommons) { - -} - -func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { +func (p *Phase2) Init(r1cs *cs.R1CS, commons SrsCommons) Phase2Evaluations { - srs := srs1.parameters - size := len(srs.G1.AlphaTau) + size := len(commons.G1.AlphaTau) if size < r1cs.GetNbConstraints() { panic("Number of constraints is larger than expected") } - var c2 Phase2 - accumulateG1 := func(res *curve.G1Affine, t constraint.Term, value *curve.G1Affine) { cID := t.CoeffID() switch cID { @@ -210,10 +203,10 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { } // Prepare Lagrange coefficients of [τ...]₁, [τ...]₂, [ατ...]₁, [βτ...]₁ - coeffTau1 := lagrangeCoeffsG1(srs.G1.Tau, size) // [L_{ω⁰}(τ)]₁, [L_{ω¹}(τ)]₁, ... where ω is a primitive sizeᵗʰ root of unity - coeffTau2 := lagrangeCoeffsG2(srs.G2.Tau, size) // [L_{ω⁰}(τ)]₂, [L_{ω¹}(τ)]₂, ... - coeffAlphaTau1 := lagrangeCoeffsG1(srs.G1.AlphaTau, size) // [L_{ω⁰}(ατ)]₁, [L_{ω¹}(ατ)]₁, ... - coeffBetaTau1 := lagrangeCoeffsG1(srs.G1.BetaTau, size) // [L_{ω⁰}(βτ)]₁, [L_{ω¹}(βτ)]₁, ... + coeffTau1 := lagrangeCoeffsG1(commons.G1.Tau, size) // [L_{ω⁰}(τ)]₁, [L_{ω¹}(τ)]₁, ... where ω is a primitive sizeᵗʰ root of unity + coeffTau2 := lagrangeCoeffsG2(commons.G2.Tau, size) // [L_{ω⁰}(τ)]₂, [L_{ω¹}(τ)]₂, ... + coeffAlphaTau1 := lagrangeCoeffsG1(commons.G1.AlphaTau, size) // [L_{ω⁰}(ατ)]₁, [L_{ω¹}(ατ)]₁, ... + coeffBetaTau1 := lagrangeCoeffsG1(commons.G1.BetaTau, size) // [L_{ω⁰}(βτ)]₁, [L_{ω¹}(βτ)]₁, ... nbInternal, nbSecret, nbPublic := r1cs.GetNbVariables() nWires := nbInternal + nbSecret + nbPublic @@ -252,36 +245,36 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { // Prepare default contribution _, _, g1, g2 := curve.Generators() - c2.Parameters.G1.Delta = g1 - c2.Parameters.G2.Delta = g2 + p.Parameters.G1.Delta = g1 + p.Parameters.G2.Delta = g2 // Build Z in PK as τⁱ(τⁿ - 1) = τ⁽ⁱ⁺ⁿ⁾ - τⁱ for i ∈ [0, n-2] // τⁱ(τⁿ - 1) = τ⁽ⁱ⁺ⁿ⁾ - τⁱ for i ∈ [0, n-2] - n := len(srs.G1.AlphaTau) - c2.Parameters.G1.Z = make([]curve.G1Affine, n) + n := len(commons.G1.AlphaTau) + p.Parameters.G1.Z = make([]curve.G1Affine, n) for i := 0; i < n-1; i++ { // TODO @Tabaie why is the last element always 0? - c2.Parameters.G1.Z[i].Sub(&srs.G1.Tau[i+n], &srs.G1.Tau[i]) + p.Parameters.G1.Z[i].Sub(&commons.G1.Tau[i+n], &commons.G1.Tau[i]) } - bitReverse(c2.Parameters.G1.Z) - c2.Parameters.G1.Z = c2.Parameters.G1.Z[:n-1] + bitReverse(p.Parameters.G1.Z) + p.Parameters.G1.Z = p.Parameters.G1.Z[:n-1] commitments := r1cs.CommitmentInfo.(constraint.Groth16Commitments) evals.G1.CKK = make([][]curve.G1Affine, len(commitments)) - c2.Sigmas = make([]valueUpdate, len(commitments)) - c2.Parameters.G1.SigmaCKK = make([][]curve.G1Affine, len(commitments)) - c2.Parameters.G2.Sigma = make([]curve.G2Affine, len(commitments)) + p.Sigmas = make([]valueUpdate, len(commitments)) + p.Parameters.G1.SigmaCKK = make([][]curve.G1Affine, len(commitments)) + p.Parameters.G2.Sigma = make([]curve.G2Affine, len(commitments)) for j := range commitments { evals.G1.CKK[i] = make([]curve.G1Affine, 0, len(commitments[j].PrivateCommitted)) - c2.Parameters.G2.Sigma[j] = g2 + p.Parameters.G2.Sigma[j] = g2 } nbCommitted := internal.NbElements(commitments.GetPrivateCommitted()) // Evaluate PKK - c2.Parameters.G1.PKK = make([]curve.G1Affine, 0, nbInternal+nbSecret-nbCommitted-len(commitments)) + p.Parameters.G1.PKK = make([]curve.G1Affine, 0, nbInternal+nbSecret-nbCommitted-len(commitments)) evals.G1.VKK = make([]curve.G1Affine, 0, nbPublic+len(commitments)) committedIterator := internal.NewMergeIterator(commitments.GetPrivateCommitted()) nbCommitmentsSeen := 0 @@ -297,7 +290,7 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { } else if j < nbPublic || isCommitment { evals.G1.VKK = append(evals.G1.VKK, tmp) } else { - c2.Parameters.G1.PKK = append(c2.Parameters.G1.PKK, tmp) + p.Parameters.G1.PKK = append(p.Parameters.G1.PKK, tmp) } if isCommitment { nbCommitmentsSeen++ @@ -305,54 +298,27 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { } for j := range commitments { - c2.Parameters.G1.SigmaCKK[j] = slices.Clone(evals.G1.CKK[j]) + p.Parameters.G1.SigmaCKK[j] = slices.Clone(evals.G1.CKK[j]) } - // Hash initial contribution - c2.Challenge = c2.hash() // TODO remove - return c2, evals + p.Challenge = nil + + return evals } +// VerifyPhase2 +// c0 must be initialized with the Init method func VerifyPhase2(c0, c1 *Phase2, c ...*Phase2) error { + // CIRITCAL TODO: Should run the "beacon" step afterwards contribs := append([]*Phase2{c0, c1}, c...) for i := 0; i < len(contribs)-1; i++ { - if err := verifyPhase2(contribs[i], contribs[i+1]); err != nil { + if err := contribs[i].Verify(contribs[i+1]); err != nil { return err } } return nil } -func verifyPhase2(current, contribution *Phase2) error { - // Compute R for δ - deltaR := genR(contribution.PublicKey.SG, current.Challenge[:], 1) - - // Check for knowledge of δ - if !sameRatio(contribution.PublicKey.SG, contribution.PublicKey.SXG, contribution.PublicKey.XR, deltaR) { - return errors.New("couldn't verify knowledge of δ") - } - - // Check for valid updates using previous parameters - if !sameRatio(contribution.Parameters.G1.Delta, current.Parameters.G1.Delta, deltaR, contribution.PublicKey.XR) { - return errors.New("couldn't verify that [δ]₁ is based on previous contribution") - } - if !sameRatio(contribution.PublicKey.SG, contribution.PublicKey.SXG, contribution.Parameters.G2.Delta, current.Parameters.G2.Delta) { - return errors.New("couldn't verify that [δ]₂ is based on previous contribution") - } - - // Check for valid updates of PKK and Z using - L, prevL := linearCombination(contribution.Parameters.G1.PKK, current.Parameters.G1.PKK) - if !sameRatio(L, prevL, contribution.Parameters.G2.Delta, current.Parameters.G2.Delta) { - return errors.New("couldn't verify valid updates of PKK using δ⁻¹") - } - Z, prevZ := linearCombination(contribution.Parameters.G1.Z, current.Parameters.G1.Z) - if !sameRatio(Z, prevZ, contribution.Parameters.G2.Delta, current.Parameters.G2.Delta) { - return errors.New("couldn't verify valid updates of PKK using δ⁻¹") - } - - return nil -} - func (c *Phase2) hash() []byte { sha := sha256.New() c.writeTo(sha) diff --git a/backend/groth16/bn254/mpcsetup/setup.go b/backend/groth16/bn254/mpcsetup/setup.go index a1e79dfedb..470df05263 100644 --- a/backend/groth16/bn254/mpcsetup/setup.go +++ b/backend/groth16/bn254/mpcsetup/setup.go @@ -8,22 +8,26 @@ package mpcsetup import ( curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr/fft" + "github.com/consensys/gnark-crypto/ecc/bn254/fr/pedersen" groth16 "github.com/consensys/gnark/backend/groth16/bn254" ) -func ExtractKeys(srs1 *Phase1, srs2 *Phase2, evals *Phase2Evaluations, nConstraints int) (pk groth16.ProvingKey, vk groth16.VerifyingKey) { +func (srs2 *Phase2) Seal(commons *SrsCommons, evals *Phase2Evaluations, nConstraints int) (pk groth16.ProvingKey, vk groth16.VerifyingKey) { + + // TODO @Tabaie beacon contribution + _, _, _, g2 := curve.Generators() // Initialize PK pk.Domain = *fft.NewDomain(uint64(nConstraints)) - pk.G1.Alpha.Set(&srs1.Parameters.G1.AlphaTau[0]) - pk.G1.Beta.Set(&srs1.Parameters.G1.BetaTau[0]) + pk.G1.Alpha.Set(&commons.G1.AlphaTau[0]) + pk.G1.Beta.Set(&commons.G1.BetaTau[0]) pk.G1.Delta.Set(&srs2.Parameters.G1.Delta) pk.G1.Z = srs2.Parameters.G1.Z bitReverse(pk.G1.Z) pk.G1.K = srs2.Parameters.G1.PKK - pk.G2.Beta.Set(&srs1.Parameters.G2.Beta) + pk.G2.Beta.Set(&commons.G2.Beta) pk.G2.Delta.Set(&srs2.Parameters.G2.Delta) // Filter out infinity points @@ -69,14 +73,24 @@ func ExtractKeys(srs1 *Phase1, srs2 *Phase2, evals *Phase2Evaluations, nConstrai pk.G2.B = B2[:j] // Initialize VK - vk.G1.Alpha.Set(&srs1.Parameters.G1.AlphaTau[0]) - vk.G1.Beta.Set(&srs1.Parameters.G1.BetaTau[0]) + vk.G1.Alpha.Set(&commons.G1.AlphaTau[0]) + vk.G1.Beta.Set(&commons.G1.BetaTau[0]) vk.G1.Delta.Set(&srs2.Parameters.G1.Delta) - vk.G2.Beta.Set(&srs1.Parameters.G2.Beta) + vk.G2.Beta.Set(&commons.G2.Beta) vk.G2.Delta.Set(&srs2.Parameters.G2.Delta) vk.G2.Gamma.Set(&g2) vk.G1.K = evals.G1.VKK + vk.CommitmentKeys = make([]pedersen.VerifyingKey, len(evals.G1.CKK)) + pk.CommitmentKeys = make([]pedersen.ProvingKey, len(evals.G1.CKK)) + for i := range vk.CommitmentKeys { + vk.CommitmentKeys[i].G = g2 + vk.CommitmentKeys[i].GSigmaNeg.Neg(&srs2.Parameters.G2.Sigma[i]) + + pk.CommitmentKeys[i].Basis = evals.G1.CKK[i] + pk.CommitmentKeys[i].BasisExpSigma = srs2.Parameters.G1.SigmaCKK[i] + } + // sets e, -[δ]2, -[γ]2 if err := vk.Precompute(); err != nil { panic(err) From 81fdfb681261bc67d966d07223fa95cfc890216c Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Fri, 13 Dec 2024 16:57:20 -0600 Subject: [PATCH 22/25] reface p *Phase2 --- backend/groth16/bn254/mpcsetup/marshal.go | 42 +++++++-------- backend/groth16/bn254/mpcsetup/phase2.go | 56 ++++++++++---------- backend/groth16/bn254/mpcsetup/setup.go | 25 +++++---- backend/groth16/bn254/mpcsetup/setup_test.go | 14 ++--- backend/groth16/bn254/mpcsetup/utils.go | 6 --- 5 files changed, 72 insertions(+), 71 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/marshal.go b/backend/groth16/bn254/mpcsetup/marshal.go index 3daf9fa35c..bb975bbcbf 100644 --- a/backend/groth16/bn254/mpcsetup/marshal.go +++ b/backend/groth16/bn254/mpcsetup/marshal.go @@ -56,25 +56,25 @@ func (p *Phase1) ReadFrom(reader io.Reader) (n int64, err error) { } // WriteTo implements io.WriterTo -func (phase2 *Phase2) WriteTo(writer io.Writer) (int64, error) { - n, err := phase2.writeTo(writer) +func (p *Phase2) WriteTo(writer io.Writer) (int64, error) { + n, err := p.writeTo(writer) if err != nil { return n, err } - nBytes, err := writer.Write(phase2.Challenge) + nBytes, err := writer.Write(p.Challenge) return int64(nBytes) + n, err } -func (c *Phase2) writeTo(writer io.Writer) (int64, error) { +func (p *Phase2) writeTo(writer io.Writer) (int64, error) { enc := curve.NewEncoder(writer) toEncode := []interface{}{ - &c.PublicKey.SG, - &c.PublicKey.SXG, - &c.PublicKey.XR, - &c.Parameters.G1.Delta, - c.Parameters.G1.PKK, - c.Parameters.G1.Z, - &c.Parameters.G2.Delta, + &p.PublicKey.SG, + &p.PublicKey.SXG, + &p.PublicKey.XR, + &p.Parameters.G1.Delta, + p.Parameters.G1.PKK, + p.Parameters.G1.Z, + &p.Parameters.G2.Delta, } for _, v := range toEncode { @@ -87,16 +87,16 @@ func (c *Phase2) writeTo(writer io.Writer) (int64, error) { } // ReadFrom implements io.ReaderFrom -func (c *Phase2) ReadFrom(reader io.Reader) (int64, error) { +func (p *Phase2) ReadFrom(reader io.Reader) (int64, error) { dec := curve.NewDecoder(reader) toEncode := []interface{}{ - &c.PublicKey.SG, - &c.PublicKey.SXG, - &c.PublicKey.XR, - &c.Parameters.G1.Delta, - &c.Parameters.G1.PKK, - &c.Parameters.G1.Z, - &c.Parameters.G2.Delta, + &p.PublicKey.SG, + &p.PublicKey.SXG, + &p.PublicKey.XR, + &p.Parameters.G1.Delta, + &p.Parameters.G1.PKK, + &p.Parameters.G1.Z, + &p.Parameters.G2.Delta, } for _, v := range toEncode { @@ -105,8 +105,8 @@ func (c *Phase2) ReadFrom(reader io.Reader) (int64, error) { } } - c.Challenge = make([]byte, 32) - n, err := reader.Read(c.Challenge) + p.Challenge = make([]byte, 32) + n, err := reader.Read(p.Challenge) return int64(n) + dec.BytesRead(), err } diff --git a/backend/groth16/bn254/mpcsetup/phase2.go b/backend/groth16/bn254/mpcsetup/phase2.go index 2927acbdb8..769c52c8e3 100644 --- a/backend/groth16/bn254/mpcsetup/phase2.go +++ b/backend/groth16/bn254/mpcsetup/phase2.go @@ -54,17 +54,17 @@ type Phase2 struct { Challenge []byte } -func (c *Phase2) Verify(next *Phase2) error { - challenge := c.hash() +func (p *Phase2) Verify(next *Phase2) error { + challenge := p.hash() if len(next.Challenge) != 0 && !bytes.Equal(next.Challenge, challenge) { return errors.New("the challenge does not match the previous phase's hash") } next.Challenge = challenge - if len(next.Parameters.G1.Z) != len(c.Parameters.G1.Z) || - len(next.Parameters.G1.PKK) != len(c.Parameters.G1.PKK) || - len(next.Parameters.G1.SigmaCKK) != len(c.Parameters.G1.SigmaCKK) || - len(next.Parameters.G2.Sigma) != len(c.Parameters.G2.Sigma) { + if len(next.Parameters.G1.Z) != len(p.Parameters.G1.Z) || + len(next.Parameters.G1.PKK) != len(p.Parameters.G1.PKK) || + len(next.Parameters.G1.SigmaCKK) != len(p.Parameters.G1.SigmaCKK) || + len(next.Parameters.G2.Sigma) != len(p.Parameters.G2.Sigma) { return errors.New("contribution size mismatch") } @@ -79,12 +79,12 @@ func (c *Phase2) Verify(next *Phase2) error { // verify proof of knowledge of contributions to the σᵢ // and the correctness of updates to Parameters.G2.Sigma[i] and the Parameters.G1.SigmaCKK[i] - for i := range c.Sigmas { // match the first commitment basis elem against the contribution commitment + for i := range p.Sigmas { // match the first commitment basis elem against the contribution commitment if !areInSubGroupG1(next.Parameters.G1.SigmaCKK[i]) { return errors.New("commitment proving key subgroup check failed") } - if err := verifyContribution(&c.Sigmas[i], c.Parameters.G1.SigmaCKK[i], next.Parameters.G1.SigmaCKK[i], &c.Parameters.G2.Sigma[i], &next.Parameters.G2.Sigma[i], 2+byte(i)); err != nil { + if err := verifyContribution(&p.Sigmas[i], p.Parameters.G1.SigmaCKK[i], next.Parameters.G1.SigmaCKK[i], &p.Parameters.G2.Sigma[i], &next.Parameters.G2.Sigma[i], 2+byte(i)); err != nil { return fmt.Errorf("failed to verify contribution to σ[%d]: %w", i, err) } } @@ -95,60 +95,60 @@ func (c *Phase2) Verify(next *Phase2) error { return errors.New("derived values 𝔾₁ subgroup check failed") } - denom := cloneAppend([]curve.G1Affine{c.Parameters.G1.Delta}, next.Parameters.G1.Z, next.Parameters.G1.PKK) - num := cloneAppend([]curve.G1Affine{next.Parameters.G1.Delta}, c.Parameters.G1.Z, c.Parameters.G1.PKK) - if err := verifyContribution(&c.Delta, denom, num, &c.Parameters.G2.Delta, &next.Parameters.G2.Delta, 1); err != nil { + denom := cloneAppend([]curve.G1Affine{p.Parameters.G1.Delta}, next.Parameters.G1.Z, next.Parameters.G1.PKK) + num := cloneAppend([]curve.G1Affine{next.Parameters.G1.Delta}, p.Parameters.G1.Z, p.Parameters.G1.PKK) + if err := verifyContribution(&p.Delta, denom, num, &p.Parameters.G2.Delta, &next.Parameters.G2.Delta, 1); err != nil { return fmt.Errorf("failed to verify contribution to δ: %w", err) } return nil } -func (c *Phase2) Contribute() { +func (p *Phase2) Contribute() { // Sample toxic δ var delta, deltaInv fr.Element var deltaBI, deltaInvBI big.Int - c.Challenge = c.hash() + p.Challenge = p.hash() - if len(c.Parameters.G1.SigmaCKK) > 255 { + if len(p.Parameters.G1.SigmaCKK) > 255 { panic("too many commitments") // DST collision } - for i := range c.Parameters.G1.SigmaCKK { + for i := range p.Parameters.G1.SigmaCKK { var ( sigmaContribution fr.Element sigmaContributionI big.Int ) - pk := c.Parameters.G1.SigmaCKK[i] - c.Sigmas[i], sigmaContribution = updateValue(&pk[0], c.Challenge, byte(2+i)) + pk := p.Parameters.G1.SigmaCKK[i] + p.Sigmas[i], sigmaContribution = updateValue(&pk[0], p.Challenge, byte(2+i)) sigmaContribution.BigInt(&sigmaContributionI) for j := 1; j < len(pk); j++ { pk[j].ScalarMultiplication(&pk[j], &sigmaContributionI) } - c.Parameters.G2.Sigma[i].ScalarMultiplication(&c.Parameters.G2.Sigma[i], &sigmaContributionI) + p.Parameters.G2.Sigma[i].ScalarMultiplication(&p.Parameters.G2.Sigma[i], &sigmaContributionI) } - c.Delta, delta = updateValue(&c.Parameters.G1.Delta, c.Challenge, 1) + p.Delta, delta = updateValue(&p.Parameters.G1.Delta, p.Challenge, 1) deltaInv.Inverse(&delta) delta.BigInt(&deltaBI) deltaInv.BigInt(&deltaInvBI) // Update [δ]₂ - c.Parameters.G2.Delta.ScalarMultiplication(&c.Parameters.G2.Delta, &deltaBI) + p.Parameters.G2.Delta.ScalarMultiplication(&p.Parameters.G2.Delta, &deltaBI) - c.Parameters.G1.Delta.ScalarMultiplication(&c.Parameters.G1.Delta, &deltaBI) - c.Parameters.G2.Delta.ScalarMultiplication(&c.Parameters.G2.Delta, &deltaBI) + p.Parameters.G1.Delta.ScalarMultiplication(&p.Parameters.G1.Delta, &deltaBI) + p.Parameters.G2.Delta.ScalarMultiplication(&p.Parameters.G2.Delta, &deltaBI) // Update Z using δ⁻¹ - for i := 0; i < len(c.Parameters.G1.Z); i++ { - c.Parameters.G1.Z[i].ScalarMultiplication(&c.Parameters.G1.Z[i], &deltaInvBI) + for i := 0; i < len(p.Parameters.G1.Z); i++ { + p.Parameters.G1.Z[i].ScalarMultiplication(&p.Parameters.G1.Z[i], &deltaInvBI) } // Update PKK using δ⁻¹ - for i := 0; i < len(c.Parameters.G1.PKK); i++ { - c.Parameters.G1.PKK[i].ScalarMultiplication(&c.Parameters.G1.PKK[i], &deltaInvBI) + for i := 0; i < len(p.Parameters.G1.PKK); i++ { + p.Parameters.G1.PKK[i].ScalarMultiplication(&p.Parameters.G1.PKK[i], &deltaInvBI) } } @@ -319,9 +319,9 @@ func VerifyPhase2(c0, c1 *Phase2, c ...*Phase2) error { return nil } -func (c *Phase2) hash() []byte { +func (p *Phase2) hash() []byte { sha := sha256.New() - c.writeTo(sha) + p.writeTo(sha) return sha.Sum(nil) } diff --git a/backend/groth16/bn254/mpcsetup/setup.go b/backend/groth16/bn254/mpcsetup/setup.go index 470df05263..24273e01f4 100644 --- a/backend/groth16/bn254/mpcsetup/setup.go +++ b/backend/groth16/bn254/mpcsetup/setup.go @@ -12,7 +12,14 @@ import ( groth16 "github.com/consensys/gnark/backend/groth16/bn254" ) -func (srs2 *Phase2) Seal(commons *SrsCommons, evals *Phase2Evaluations, nConstraints int) (pk groth16.ProvingKey, vk groth16.VerifyingKey) { +// Seal performs the final contribution and outputs the proving and verifying keys. +// No randomization is performed at this step. +// A verifier should simply re-run this and check +// that it produces the same values. +// The inner workings of the random beacon are out of scope. +// WARNING: Seal modifies p, just as Contribute does. +// The result will be an INVALID Phase1 object, since no proof of correctness is produced. +func (p *Phase2) Seal(commons *SrsCommons, evals *Phase2Evaluations, nConstraints int) (pk groth16.ProvingKey, vk groth16.VerifyingKey) { // TODO @Tabaie beacon contribution @@ -22,13 +29,13 @@ func (srs2 *Phase2) Seal(commons *SrsCommons, evals *Phase2Evaluations, nConstra pk.Domain = *fft.NewDomain(uint64(nConstraints)) pk.G1.Alpha.Set(&commons.G1.AlphaTau[0]) pk.G1.Beta.Set(&commons.G1.BetaTau[0]) - pk.G1.Delta.Set(&srs2.Parameters.G1.Delta) - pk.G1.Z = srs2.Parameters.G1.Z + pk.G1.Delta.Set(&p.Parameters.G1.Delta) + pk.G1.Z = p.Parameters.G1.Z bitReverse(pk.G1.Z) - pk.G1.K = srs2.Parameters.G1.PKK + pk.G1.K = p.Parameters.G1.PKK pk.G2.Beta.Set(&commons.G2.Beta) - pk.G2.Delta.Set(&srs2.Parameters.G2.Delta) + pk.G2.Delta.Set(&p.Parameters.G2.Delta) // Filter out infinity points nWires := len(evals.G1.A) @@ -75,9 +82,9 @@ func (srs2 *Phase2) Seal(commons *SrsCommons, evals *Phase2Evaluations, nConstra // Initialize VK vk.G1.Alpha.Set(&commons.G1.AlphaTau[0]) vk.G1.Beta.Set(&commons.G1.BetaTau[0]) - vk.G1.Delta.Set(&srs2.Parameters.G1.Delta) + vk.G1.Delta.Set(&p.Parameters.G1.Delta) vk.G2.Beta.Set(&commons.G2.Beta) - vk.G2.Delta.Set(&srs2.Parameters.G2.Delta) + vk.G2.Delta.Set(&p.Parameters.G2.Delta) vk.G2.Gamma.Set(&g2) vk.G1.K = evals.G1.VKK @@ -85,10 +92,10 @@ func (srs2 *Phase2) Seal(commons *SrsCommons, evals *Phase2Evaluations, nConstra pk.CommitmentKeys = make([]pedersen.ProvingKey, len(evals.G1.CKK)) for i := range vk.CommitmentKeys { vk.CommitmentKeys[i].G = g2 - vk.CommitmentKeys[i].GSigmaNeg.Neg(&srs2.Parameters.G2.Sigma[i]) + vk.CommitmentKeys[i].GSigmaNeg.Neg(&p.Parameters.G2.Sigma[i]) pk.CommitmentKeys[i].Basis = evals.G1.CKK[i] - pk.CommitmentKeys[i].BasisExpSigma = srs2.Parameters.G1.SigmaCKK[i] + pk.CommitmentKeys[i].BasisExpSigma = p.Parameters.G1.SigmaCKK[i] } // sets e, -[δ]2, -[γ]2 diff --git a/backend/groth16/bn254/mpcsetup/setup_test.go b/backend/groth16/bn254/mpcsetup/setup_test.go index 742b2539b7..a1b5ad1e60 100644 --- a/backend/groth16/bn254/mpcsetup/setup_test.go +++ b/backend/groth16/bn254/mpcsetup/setup_test.go @@ -176,14 +176,14 @@ func (p *Phase1) clone() Phase1 { return r } -func (phase2 *Phase2) clone() Phase2 { +func (p *Phase2) clone() Phase2 { r := Phase2{} - r.Parameters.G1.Delta = phase2.Parameters.G1.Delta - r.Parameters.G1.PKK = append(r.Parameters.G1.PKK, phase2.Parameters.G1.PKK...) - r.Parameters.G1.Z = append(r.Parameters.G1.Z, phase2.Parameters.G1.Z...) - r.Parameters.G2.Delta = phase2.Parameters.G2.Delta - r.PublicKey = phase2.PublicKey - r.Challenge = append(r.Challenge, phase2.Challenge...) + r.Parameters.G1.Delta = p.Parameters.G1.Delta + r.Parameters.G1.PKK = append(r.Parameters.G1.PKK, p.Parameters.G1.PKK...) + r.Parameters.G1.Z = append(r.Parameters.G1.Z, p.Parameters.G1.Z...) + r.Parameters.G2.Delta = p.Parameters.G2.Delta + r.PublicKey = p.PublicKey + r.Challenge = append(r.Challenge, p.Challenge...) return r } diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index 14cd4d5ca9..a429eed3bc 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -19,12 +19,6 @@ import ( "github.com/consensys/gnark/internal/utils" ) -type PublicKey struct { - SG curve.G1Affine - SXG curve.G1Affine - XR curve.G2Affine // XR = X.R ∈ 𝔾₂ proof of knowledge -} - func bitReverse[T any](a []T) { n := uint64(len(a)) nn := uint64(64 - bits.TrailingZeros64(n)) From ebaff8743d06adaad36e575df15d5f610aad5dce Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Fri, 13 Dec 2024 18:11:47 -0600 Subject: [PATCH 23/25] feat phase2 final contribution --- backend/groth16/bn254/mpcsetup/phase1.go | 44 ++++--------- backend/groth16/bn254/mpcsetup/phase2.go | 80 +++++++++++++----------- backend/groth16/bn254/mpcsetup/setup.go | 8 ++- backend/groth16/bn254/mpcsetup/utils.go | 47 ++++++++++---- 4 files changed, 97 insertions(+), 82 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index c50a68e909..91585aedb6 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -51,11 +51,11 @@ func (p *Phase1) Contribute() { var ( tauContrib, alphaContrib, betaContrib fr.Element ) - p.proofs.Tau, tauContrib = updateValue(&p.parameters.G1.Tau[1], p.Challenge, 1) - p.proofs.Alpha, alphaContrib = updateValue(&p.parameters.G1.AlphaTau[0], p.Challenge, 2) - p.proofs.Beta, betaContrib = updateValue(&p.parameters.G1.BetaTau[0], p.Challenge, 3) + p.proofs.Tau, tauContrib = updateValue(p.parameters.G1.Tau[1], p.Challenge, 1) + p.proofs.Alpha, alphaContrib = updateValue(p.parameters.G1.AlphaTau[0], p.Challenge, 2) + p.proofs.Beta, betaContrib = updateValue(p.parameters.G1.BetaTau[0], p.Challenge, 3) - p.parameters.update(&tauContrib, &alphaContrib, &betaContrib, true) + p.parameters.update(&tauContrib, &alphaContrib, &betaContrib) } // setZero instantiates the parameters, and sets all contributions to zero @@ -82,32 +82,28 @@ func (c *SrsCommons) setOne(N uint64) { } // from the fourth argument on this just gives an opportunity to avoid recomputing some scalar multiplications -func (c *SrsCommons) update(tauUpdate, alphaUpdate, betaUpdate *fr.Element, principalG1sPrecomputed bool) { - i0 := 0 - if principalG1sPrecomputed { - i0 = 1 - } +func (c *SrsCommons) update(tauUpdate, alphaUpdate, betaUpdate *fr.Element) { // TODO @gbotrel working with jacobian points here will help with perf. tauUpdates := powers(tauUpdate, len(c.G1.Tau)) // saving 3 exactly scalar muls among millions. Not a significant gain but might as well. - scaleG1InPlace(c.G1.Tau[i0+1:], tauUpdates[i0+1:]) // first element remains 1. second element may have been precomputed. + scaleG1InPlace(c.G1.Tau[1:], tauUpdates[1:]) // first element remains 1 scaleG2InPlace(c.G2.Tau[1:], tauUpdates[1:]) alphaUpdates := make([]fr.Element, len(c.G1.AlphaTau)) alphaUpdates[0].Set(alphaUpdate) - for i := i0; i < len(alphaUpdates); i++ { + for i := range alphaUpdates { alphaUpdates[i].Mul(&tauUpdates[i], &alphaUpdates[1]) } - scaleG1InPlace(c.G1.AlphaTau[i0:], alphaUpdates[i0:]) // first element may have been precomputed + scaleG1InPlace(c.G1.AlphaTau, alphaUpdates) betaUpdates := make([]fr.Element, len(c.G1.BetaTau)) betaUpdates[0].Set(betaUpdate) - for i := i0; i < len(betaUpdates); i++ { + for i := range betaUpdates { alphaUpdates[i].Mul(&tauUpdates[i], &betaUpdates[1]) } - scaleG1InPlace(c.G1.BetaTau[i0:], betaUpdates[i0:]) + scaleG1InPlace(c.G1.BetaTau, betaUpdates) var betaUpdateI big.Int betaUpdate.SetBigInt(&betaUpdateI) @@ -122,24 +118,8 @@ func (c *SrsCommons) update(tauUpdate, alphaUpdate, betaUpdate *fr.Element, prin // WARNING: Seal modifies p, just as Contribute does. // The result will be an INVALID Phase1 object, since no proof of correctness is produced. func (p *Phase1) Seal(beaconChallenge []byte) SrsCommons { - var ( - bb bytes.Buffer - err error - ) - bb.Write(p.hash()) - bb.Write(beaconChallenge) - - newContribs := make([]fr.Element, 3) - // cryptographically unlikely for this to be run more than once - for newContribs[0].IsZero() || newContribs[1].IsZero() || newContribs[2].IsZero() { - if newContribs, err = fr.Hash(bb.Bytes(), []byte("Groth16 SRS generation ceremony - Phase 1 Final Step"), 3); err != nil { - panic(err) - } - bb.WriteByte('=') // padding just so that the hash is different next time - } - - p.parameters.update(&newContribs[0], &newContribs[1], &newContribs[2], false) - + newContribs := beaconContributions(p.hash(), beaconChallenge, 3) + p.parameters.update(&newContribs[0], &newContribs[1], &newContribs[2]) return p.parameters } diff --git a/backend/groth16/bn254/mpcsetup/phase2.go b/backend/groth16/bn254/mpcsetup/phase2.go index 769c52c8e3..ac54387241 100644 --- a/backend/groth16/bn254/mpcsetup/phase2.go +++ b/backend/groth16/bn254/mpcsetup/phase2.go @@ -104,52 +104,60 @@ func (p *Phase2) Verify(next *Phase2) error { return nil } -func (p *Phase2) Contribute() { - // Sample toxic δ - var delta, deltaInv fr.Element - var deltaBI, deltaInvBI big.Int - - p.Challenge = p.hash() - - if len(p.Parameters.G1.SigmaCKK) > 255 { - panic("too many commitments") // DST collision +// update modifies delta +func (p *Phase2) update(delta *fr.Element, sigma []fr.Element) { + var I big.Int + + scale := func(point any) { + switch p := point.(type) { + case *curve.G1Affine: + p.ScalarMultiplication(p, &I) + case *curve.G2Affine: + p.ScalarMultiplication(p, &I) + default: + panic("unknown type") + } } - for i := range p.Parameters.G1.SigmaCKK { - var ( - sigmaContribution fr.Element - sigmaContributionI big.Int - ) - - pk := p.Parameters.G1.SigmaCKK[i] - p.Sigmas[i], sigmaContribution = updateValue(&pk[0], p.Challenge, byte(2+i)) - sigmaContribution.BigInt(&sigmaContributionI) - for j := 1; j < len(pk); j++ { - pk[j].ScalarMultiplication(&pk[j], &sigmaContributionI) + + for i := range sigma { + sigma[i].BigInt(&I) + for j := range sigma { + scale(&p.Parameters.G1.SigmaCKK[i][j]) } - p.Parameters.G2.Sigma[i].ScalarMultiplication(&p.Parameters.G2.Sigma[i], &sigmaContributionI) + point := &p.Parameters.G2.Sigma[i] + point.ScalarMultiplicationBase(&I) } - p.Delta, delta = updateValue(&p.Parameters.G1.Delta, p.Challenge, 1) + delta.BigInt(&I) + scale(&p.Parameters.G2.Delta) + scale(&p.Parameters.G1.Delta) - deltaInv.Inverse(&delta) - delta.BigInt(&deltaBI) - deltaInv.BigInt(&deltaInvBI) + delta.Inverse(delta) + delta.BigInt(&I) + for i := range p.Parameters.G1.Z { + scale(&p.Parameters.G1.Z[i]) + } + for i := range p.Parameters.G1.PKK { + scale(&p.Parameters.G1.PKK[i]) + } +} - // Update [δ]₂ - p.Parameters.G2.Delta.ScalarMultiplication(&p.Parameters.G2.Delta, &deltaBI) +func (p *Phase2) Contribute() { + p.Challenge = p.hash() - p.Parameters.G1.Delta.ScalarMultiplication(&p.Parameters.G1.Delta, &deltaBI) - p.Parameters.G2.Delta.ScalarMultiplication(&p.Parameters.G2.Delta, &deltaBI) + // sample value contributions and provide correctness proofs + var delta fr.Element + p.Delta, delta = updateValue(p.Parameters.G1.Delta, p.Challenge, 1) - // Update Z using δ⁻¹ - for i := 0; i < len(p.Parameters.G1.Z); i++ { - p.Parameters.G1.Z[i].ScalarMultiplication(&p.Parameters.G1.Z[i], &deltaInvBI) + sigma := make([]fr.Element, len(p.Parameters.G1.SigmaCKK)) + if len(sigma) > 255 { + panic("too many commitments") // DST collision } - - // Update PKK using δ⁻¹ - for i := 0; i < len(p.Parameters.G1.PKK); i++ { - p.Parameters.G1.PKK[i].ScalarMultiplication(&p.Parameters.G1.PKK[i], &deltaInvBI) + for i := range sigma { + p.Sigmas[i], sigma[i] = updateValue(p.Parameters.G1.SigmaCKK[i][0], p.Challenge, byte(2+i)) } + + p.update(&delta, sigma) } // Init is to be run by the coordinator diff --git a/backend/groth16/bn254/mpcsetup/setup.go b/backend/groth16/bn254/mpcsetup/setup.go index 24273e01f4..f13be1c20c 100644 --- a/backend/groth16/bn254/mpcsetup/setup.go +++ b/backend/groth16/bn254/mpcsetup/setup.go @@ -19,14 +19,16 @@ import ( // The inner workings of the random beacon are out of scope. // WARNING: Seal modifies p, just as Contribute does. // The result will be an INVALID Phase1 object, since no proof of correctness is produced. -func (p *Phase2) Seal(commons *SrsCommons, evals *Phase2Evaluations, nConstraints int) (pk groth16.ProvingKey, vk groth16.VerifyingKey) { +func (p *Phase2) Seal(commons *SrsCommons, evals *Phase2Evaluations, nbConstraints int, beaconChallenge []byte) (pk groth16.ProvingKey, vk groth16.VerifyingKey) { - // TODO @Tabaie beacon contribution + // final contributions + contributions := beaconContributions(p.hash(), beaconChallenge, 1+len(p.Sigmas)) + p.update(&contributions[0], contributions[1:]) _, _, _, g2 := curve.Generators() // Initialize PK - pk.Domain = *fft.NewDomain(uint64(nConstraints)) + pk.Domain = *fft.NewDomain(uint64(nbConstraints)) pk.G1.Alpha.Set(&commons.G1.AlphaTau[0]) pk.G1.Beta.Set(&commons.G1.BetaTau[0]) pk.G1.Delta.Set(&p.Parameters.G1.Delta) diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index a429eed3bc..f24743a60c 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -8,15 +8,13 @@ package mpcsetup import ( "bytes" "errors" - "math/big" - "math/bits" - "runtime" - "time" - "github.com/consensys/gnark-crypto/ecc" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" "github.com/consensys/gnark/internal/utils" + "math/big" + "math/bits" + "runtime" ) func bitReverse[T any](a []T) { @@ -199,10 +197,6 @@ func genR(sG1 curve.G1Affine, challenge []byte, dst byte) curve.G2Affine { return spG2 } -type RandomBeacon func(time.Time) []byte - -// func (rb RandomBeacon) GenerateChallenge(...) []byte {} - type pair struct { g1 curve.G1Affine g2 *curve.G2Affine // optional; some values expect to have a 𝔾₂ representation, some don't. @@ -223,7 +217,7 @@ type valueUpdate struct { // updateValue produces values associated with contribution to an existing value. // if prevCommitment contains only a 𝔾₁ value, then so will updatedCommitment // the second output is toxic waste. It is the caller's responsibility to safely "dispose" of it. -func updateValue(value *curve.G1Affine, challenge []byte, dst byte) (proof valueUpdate, contributionValue fr.Element) { +func updateValue(value curve.G1Affine, challenge []byte, dst byte) (proof valueUpdate, contributionValue fr.Element) { if _, err := contributionValue.SetRandom(); err != nil { panic(err) } @@ -232,7 +226,7 @@ func updateValue(value *curve.G1Affine, challenge []byte, dst byte) (proof value _, _, g1, _ := curve.Generators() proof.contributionCommitment.ScalarMultiplication(&g1, &contributionValueI) - value.ScalarMultiplication(value, &contributionValueI) + value.ScalarMultiplication(&value, &contributionValueI) // proof of knowledge to commitment. Algorithm 3 from section 3.7 pokBase := genR(proof.contributionCommitment, challenge, dst) // r @@ -360,3 +354,34 @@ func partialSums(s ...int) []int { } return sums } + +func beaconContributions(hash, beaconChallenge []byte, n int) []fr.Element { + var ( + bb bytes.Buffer + err error + ) + bb.Grow(len(hash) + len(beaconChallenge)) + bb.Write(hash) + bb.Write(beaconChallenge) + + res := make([]fr.Element, 1) + + allNonZero := func() bool { + for i := range res { + if res[i].IsZero() { + return false + } + } + return true + } + + // cryptographically unlikely for this to be run more than once + for !allNonZero() { + if res, err = fr.Hash(bb.Bytes(), []byte("Groth16 SRS generation ceremony - Phase 1 Final Step"), n); err != nil { + panic(err) + } + bb.WriteByte('=') // padding just so that the hash is different next time + } + + return res +} From c2f3b8dabb64cfe27f439f91020028ac3287dcb2 Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Sun, 15 Dec 2024 19:47:37 -0600 Subject: [PATCH 24/25] feat marshal --- backend/groth16/bn254/mpcsetup/marshal.go | 162 ++++++++++++++-------- backend/groth16/bn254/mpcsetup/utils.go | 2 +- 2 files changed, 108 insertions(+), 56 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/marshal.go b/backend/groth16/bn254/mpcsetup/marshal.go index bb975bbcbf..b2f878fda9 100644 --- a/backend/groth16/bn254/mpcsetup/marshal.go +++ b/backend/groth16/bn254/mpcsetup/marshal.go @@ -6,11 +6,12 @@ package mpcsetup import ( + "encoding/binary" curve "github.com/consensys/gnark-crypto/ecc/bn254" "io" ) -func appendRefs[T any](s []interface{}, v []T) []interface{} { +func appendRefs[T any](s []any, v []T) []any { for i := range v { s = append(s, &v[i]) } @@ -55,110 +56,161 @@ func (p *Phase1) ReadFrom(reader io.Reader) (n int64, err error) { return } -// WriteTo implements io.WriterTo -func (p *Phase2) WriteTo(writer io.Writer) (int64, error) { - n, err := p.writeTo(writer) - if err != nil { - return n, err +// slice of references for the parameters of p +func (p *Phase2) refsSlice() []any { + nbCommitments := len(p.Parameters.G2.Sigma) + if nbCommitments > 65535 { + panic("nbCommitments not fitting in 16 bits") } - nBytes, err := writer.Write(p.Challenge) - return int64(nBytes) + n, err -} -func (p *Phase2) writeTo(writer io.Writer) (int64, error) { - enc := curve.NewEncoder(writer) - toEncode := []interface{}{ - &p.PublicKey.SG, - &p.PublicKey.SXG, - &p.PublicKey.XR, - &p.Parameters.G1.Delta, - p.Parameters.G1.PKK, - p.Parameters.G1.Z, - &p.Parameters.G2.Delta, + expectedLen := 2*nbCommitments + 5 + refs := make([]any, 5, expectedLen) + refs[0] = uint16(nbCommitments) + refs[1] = &p.Parameters.G1.Delta + refs[2] = &p.Parameters.G1.PKK // unique size: private input size, excluding those committed to + refs[3] = &p.Parameters.G1.Z // unique size: N-1 + refs[4] = &p.Parameters.G2.Delta + + refs = appendRefs(refs, p.Parameters.G1.SigmaCKK) + refs = appendRefs(refs, p.Parameters.G2.Sigma) + + if len(refs) != expectedLen { + panic("incorrect length estimate") } - for _, v := range toEncode { + return refs +} + +// WriteTo implements io.WriterTo +func (p *Phase2) WriteTo(writer io.Writer) (int64, error) { + + // write the parameters + enc := curve.NewEncoder(writer) + for _, v := range p.refsSlice() { if err := enc.Encode(v); err != nil { return enc.BytesWritten(), err } } - return enc.BytesWritten(), nil + //write the proofs + dn, err := p.Delta.WriteTo(writer) + n := enc.BytesWritten() + dn + if err != nil { + return n, err + } + + for i := range p.Sigmas { + dn, err = p.Sigmas[i].WriteTo(writer) + n += dn + if err != nil { + return n, err + } + } + + return n, nil } // ReadFrom implements io.ReaderFrom func (p *Phase2) ReadFrom(reader io.Reader) (int64, error) { - dec := curve.NewDecoder(reader) - toEncode := []interface{}{ - &p.PublicKey.SG, - &p.PublicKey.SXG, - &p.PublicKey.XR, - &p.Parameters.G1.Delta, - &p.Parameters.G1.PKK, - &p.Parameters.G1.Z, - &p.Parameters.G2.Delta, + var nbCommitments uint16 + + if err := binary.Read(reader, binary.BigEndian, &nbCommitments); err != nil { + return -1, err // binary.Read doesn't return the number of bytes read } + n := int64(2) // we've definitely successfully read 2 bytes - for _, v := range toEncode { + p.Sigmas = make([]valueUpdate, nbCommitments) + p.Parameters.G1.SigmaCKK = make([][]curve.G1Affine, nbCommitments) + p.Parameters.G2.Sigma = make([]curve.G2Affine, nbCommitments) + + dec := curve.NewDecoder(reader) + for _, v := range p.refsSlice()[1:] { // nbCommitments already read if err := dec.Decode(v); err != nil { - return dec.BytesRead(), err + return n + dec.BytesRead(), err } } + n += dec.BytesRead() + + dn, err := p.Delta.ReadFrom(reader) + n += dn + if err != nil { + return n, err + } - p.Challenge = make([]byte, 32) - n, err := reader.Read(p.Challenge) - return int64(n) + dec.BytesRead(), err + for i := range p.Sigmas { + dn, err = p.Sigmas[i].ReadFrom(reader) + n += dn + if err != nil { + return n, err + } + } + return n, nil +} + +func (c *Phase2Evaluations) refsSlice() []any { + N := uint64(len(c.G1.A)) + expectedLen := 3*N + 2 + refs := make([]any, 2, expectedLen) + refs[0] = &c.G1.CKK + refs[1] = &c.G1.VKK + refs = appendRefs(refs, c.G1.A) + refs = appendRefs(refs, c.G1.B) + refs = appendRefs(refs, c.G2.B) + + if uint64(len(refs)) != expectedLen { + panic("incorrect length estimate") + } + + return refs } // WriteTo implements io.WriterTo func (c *Phase2Evaluations) WriteTo(writer io.Writer) (int64, error) { enc := curve.NewEncoder(writer) - toEncode := []interface{}{ - c.G1.A, - c.G1.B, - c.G2.B, - } - for _, v := range toEncode { + for _, v := range c.refsSlice() { if err := enc.Encode(v); err != nil { return enc.BytesWritten(), err } } - return enc.BytesWritten(), nil } // ReadFrom implements io.ReaderFrom func (c *Phase2Evaluations) ReadFrom(reader io.Reader) (int64, error) { - dec := curve.NewDecoder(reader) - toEncode := []interface{}{ - &c.G1.A, - &c.G1.B, - &c.G2.B, + var N uint64 + if err := binary.Read(reader, binary.BigEndian, &N); err != nil { + return -1, err // binary.Read doesn't return the number of bytes read } + n := int64(8) + + c.G1.A = make([]curve.G1Affine, N) + c.G1.B = make([]curve.G1Affine, N) + c.G2.B = make([]curve.G2Affine, N) - for _, v := range toEncode { + dec := curve.NewDecoder(reader) + for _, v := range c.refsSlice()[1:] { if err := dec.Decode(v); err != nil { - return dec.BytesRead(), err + return n + dec.BytesRead(), err } } - return dec.BytesRead(), nil + return n + dec.BytesRead(), nil } // refsSlice produces a slice consisting of references to all sub-elements // prepended by the size parameter, to be used in WriteTo and ReadFrom functions -func (c *SrsCommons) refsSlice() []interface{} { - N := len(c.G2.Tau) - estimatedNbElems := 5*N - 1 +func (c *SrsCommons) refsSlice() []any { + N := uint64(len(c.G2.Tau)) + expectedLen := 5*N - 1 // size N 1 // [β]₂ 1 // [τⁱ]₁ for 1 ≤ i ≤ 2N-2 2N-2 // [τⁱ]₂ for 1 ≤ i ≤ N-1 N-1 // [ατⁱ]₁ for 0 ≤ i ≤ N-1 N // [βτⁱ]₁ for 0 ≤ i ≤ N-1 N - refs := make([]interface{}, 1, estimatedNbElems) + refs := make([]any, 1, expectedLen) refs[0] = N refs = appendRefs(refs, c.G1.Tau[1:]) @@ -166,7 +218,7 @@ func (c *SrsCommons) refsSlice() []interface{} { refs = appendRefs(refs, c.G1.BetaTau) refs = appendRefs(refs, c.G1.AlphaTau) - if len(refs) != estimatedNbElems { + if uint64(len(refs)) != expectedLen { panic("incorrect length estimate") } diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index f24743a60c..d99abed52a 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -302,7 +302,7 @@ func bivariateRandomMonomials(ends ...int) []fr.Element { return nil } - res := make([]fr.Element, ends[]) + res := make([]fr.Element, ends[len(ends)-1]) if _, err := res[1].SetRandom(); err != nil { panic(err) } From 5708047d0f0cd0b9f29033ed1e57b647bf7c142d Mon Sep 17 00:00:00 2001 From: Arya Tabaie <15056835+Tabaie@users.noreply.github.com> Date: Sun, 15 Dec 2024 21:22:56 -0600 Subject: [PATCH 25/25] test integration full - fails --- .../groth16/bn254/mpcsetup/marshal_test.go | 13 +- backend/groth16/bn254/mpcsetup/phase1.go | 37 ++++- backend/groth16/bn254/mpcsetup/phase2.go | 33 +++-- backend/groth16/bn254/mpcsetup/setup.go | 4 +- backend/groth16/bn254/mpcsetup/setup_test.go | 130 ++++++++++-------- 5 files changed, 128 insertions(+), 89 deletions(-) diff --git a/backend/groth16/bn254/mpcsetup/marshal_test.go b/backend/groth16/bn254/mpcsetup/marshal_test.go index 51651f8aca..adbfc3fe0e 100644 --- a/backend/groth16/bn254/mpcsetup/marshal_test.go +++ b/backend/groth16/bn254/mpcsetup/marshal_test.go @@ -5,17 +5,7 @@ package mpcsetup -import ( - "testing" - - curve "github.com/consensys/gnark-crypto/ecc/bn254" - cs "github.com/consensys/gnark/constraint/bn254" - "github.com/consensys/gnark/frontend" - "github.com/consensys/gnark/frontend/cs/r1cs" - gnarkio "github.com/consensys/gnark/io" - "github.com/stretchr/testify/require" -) - +/* TODO bring this back func TestContributionSerialization(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") @@ -41,3 +31,4 @@ func TestContributionSerialization(t *testing.T) { assert.NoError(gnarkio.RoundTripCheck(&srs2, func() interface{} { return new(Phase2) })) } +*/ diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index 91585aedb6..56e2457a9b 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -10,6 +10,7 @@ import ( "crypto/sha256" "errors" "fmt" + "github.com/consensys/gnark-crypto/ecc" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" "math/big" @@ -123,14 +124,19 @@ func (p *Phase1) Seal(beaconChallenge []byte) SrsCommons { return p.parameters } -func VerifyPhase1(c0, c1 *Phase1, c ...*Phase1) error { - contribs := append([]*Phase1{c0, c1}, c...) - for i := 0; i < len(contribs)-1; i++ { - if err := contribs[i].Verify(contribs[i+1]); err != nil { - return err +// VerifyPhase1 and return the SRS parameters usable for any circuit of domain size N +// beaconChallenge is the output of the random beacon +// and c are the output from the contributors +// WARNING: the last contribution object will be modified +func VerifyPhase1(N uint64, beaconChallenge []byte, c ...*Phase1) (SrsCommons, error) { + prev := NewPhase1(N) + for i := range c { + if err := prev.Verify(c[i]); err != nil { + return SrsCommons{}, err } + prev = c[i] } - return nil + return prev.Seal(beaconChallenge), nil } // Verify assumes previous is correct @@ -234,3 +240,22 @@ func (p *Phase1) hash() []byte { sha.Write(p.Challenge) return sha.Sum(nil) } + +// Initialize an empty Phase1 contribution object +// to be used by the first contributor or the verifier +// N is the FFT domain size +func (p *Phase1) Initialize(N uint64) { + if ecc.NextPowerOfTwo(N) != N { + panic("N must be a power of 2") + } + p.parameters.setOne(N) +} + +// NewPhase1 creates an empty Phase1 contribution object +// to be used by the first contributor or the verifier +// N is the FFT domain size +func NewPhase1(N uint64) *Phase1 { + res := new(Phase1) + res.Initialize(N) + return res +} diff --git a/backend/groth16/bn254/mpcsetup/phase2.go b/backend/groth16/bn254/mpcsetup/phase2.go index ac54387241..c13e6f9d56 100644 --- a/backend/groth16/bn254/mpcsetup/phase2.go +++ b/backend/groth16/bn254/mpcsetup/phase2.go @@ -10,6 +10,7 @@ import ( "crypto/sha256" "errors" "fmt" + "github.com/consensys/gnark/backend/groth16" "github.com/consensys/gnark/backend/groth16/internal" cs "github.com/consensys/gnark/constraint/bn254" "math/big" @@ -20,6 +21,8 @@ import ( "github.com/consensys/gnark/constraint" ) +// Phase2Evaluations components of the circuit keys +// not depending on Phase2 randomisations type Phase2Evaluations struct { // TODO @Tabaie rename G1 struct { A []curve.G1Affine // A are the left coefficient polynomials for each witness element, evaluated at τ @@ -160,10 +163,10 @@ func (p *Phase2) Contribute() { p.update(&delta, sigma) } -// Init is to be run by the coordinator +// Initialize is to be run by the coordinator // It involves no coin tosses. A verifier should // simply rerun all the steps -func (p *Phase2) Init(r1cs *cs.R1CS, commons SrsCommons) Phase2Evaluations { +func (p *Phase2) Initialize(r1cs *cs.R1CS, commons *SrsCommons) Phase2Evaluations { size := len(commons.G1.AlphaTau) if size < r1cs.GetNbConstraints() { @@ -314,22 +317,28 @@ func (p *Phase2) Init(r1cs *cs.R1CS, commons SrsCommons) Phase2Evaluations { return evals } -// VerifyPhase2 -// c0 must be initialized with the Init method -func VerifyPhase2(c0, c1 *Phase2, c ...*Phase2) error { - // CIRITCAL TODO: Should run the "beacon" step afterwards - contribs := append([]*Phase2{c0, c1}, c...) - for i := 0; i < len(contribs)-1; i++ { - if err := contribs[i].Verify(contribs[i+1]); err != nil { - return err +// VerifyPhase2 for circuit described by r1cs +// using parameters from commons +// beaconChallenge is the output of the random beacon +// and c are the output from the contributors +// WARNING: the last contribution object will be modified +func VerifyPhase2(r1cs *cs.R1CS, commons *SrsCommons, beaconChallenge []byte, c ...*Phase2) (groth16.ProvingKey, groth16.VerifyingKey, error) { + prev := new(Phase2) + evals := prev.Initialize(r1cs, commons) + for i := range c { + if err := prev.Verify(c[i]); err != nil { + return nil, nil, err } + prev = c[i] } - return nil + + pk, vk := prev.Seal(commons, &evals, beaconChallenge) + return &pk, &vk, nil } func (p *Phase2) hash() []byte { sha := sha256.New() - p.writeTo(sha) + p.WriteTo(sha) return sha.Sum(nil) } diff --git a/backend/groth16/bn254/mpcsetup/setup.go b/backend/groth16/bn254/mpcsetup/setup.go index f13be1c20c..ec0002f578 100644 --- a/backend/groth16/bn254/mpcsetup/setup.go +++ b/backend/groth16/bn254/mpcsetup/setup.go @@ -19,7 +19,7 @@ import ( // The inner workings of the random beacon are out of scope. // WARNING: Seal modifies p, just as Contribute does. // The result will be an INVALID Phase1 object, since no proof of correctness is produced. -func (p *Phase2) Seal(commons *SrsCommons, evals *Phase2Evaluations, nbConstraints int, beaconChallenge []byte) (pk groth16.ProvingKey, vk groth16.VerifyingKey) { +func (p *Phase2) Seal(commons *SrsCommons, evals *Phase2Evaluations, beaconChallenge []byte) (pk groth16.ProvingKey, vk groth16.VerifyingKey) { // final contributions contributions := beaconContributions(p.hash(), beaconChallenge, 1+len(p.Sigmas)) @@ -28,7 +28,7 @@ func (p *Phase2) Seal(commons *SrsCommons, evals *Phase2Evaluations, nbConstrain _, _, _, g2 := curve.Generators() // Initialize PK - pk.Domain = *fft.NewDomain(uint64(nbConstraints)) + pk.Domain = *fft.NewDomain(uint64(len(evals.G1.A))) pk.G1.Alpha.Set(&commons.G1.AlphaTau[0]) pk.G1.Beta.Set(&commons.G1.BetaTau[0]) pk.G1.Delta.Set(&p.Parameters.G1.Delta) diff --git a/backend/groth16/bn254/mpcsetup/setup_test.go b/backend/groth16/bn254/mpcsetup/setup_test.go index a1b5ad1e60..7396acea71 100644 --- a/backend/groth16/bn254/mpcsetup/setup_test.go +++ b/backend/groth16/bn254/mpcsetup/setup_test.go @@ -6,9 +6,12 @@ package mpcsetup import ( + "bytes" + "github.com/consensys/gnark-crypto/ecc" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" cs "github.com/consensys/gnark/constraint/bn254" + "io" "testing" "github.com/consensys/gnark/backend/groth16" @@ -20,51 +23,85 @@ import ( native_mimc "github.com/consensys/gnark-crypto/ecc/bn254/fr/mimc" ) +// TestSetupCircuit a full integration test of the MPC setup func TestSetupCircuit(t *testing.T) { const ( - nContributionsPhase1 = 3 - nContributionsPhase2 = 3 - power = 9 + nbContributionsPhase1 = 3 + nbContributionsPhase2 = 3 ) assert := require.New(t) - var srs1 Phase1 - srs1.Initialize(1 << power) + // Compile the circuit + var circuit Circuit + ccs, err := frontend.Compile(curve.ID.ScalarField(), r1cs.NewBuilder, &circuit) + assert.NoError(err) - // Make and verify contributions for phase1 - for i := 1; i < nContributionsPhase1; i++ { - // we clone test purposes; but in practice, participant will receive a []byte, deserialize it, - // add its contribution and send back to coordinator. - prev := srs1.clone() + domainSize := ecc.NextPowerOfTwo(uint64(ccs.GetNbConstraints())) - srs1.Contribute() - assert.NoError(VerifyPhase1(&prev, &srs1)) + var ( + bb bytes.Buffer // simulating network communications + serialized [max(nbContributionsPhase1, nbContributionsPhase2)][]byte + phase1 [nbContributionsPhase1]*Phase1 + p1 Phase1 + phase2 [nbContributionsPhase2]*Phase2 + p2 Phase2 + ) + + serialize := func(v io.WriterTo) []byte { + bb.Reset() + _, err = v.WriteTo(&bb) + assert.NoError(err) + return bb.Bytes() + } + deserialize := func(v io.ReaderFrom, b []byte) { + n, err := v.ReadFrom(bytes.NewReader(b)) + assert.NoError(err) + assert.Equal(len(b), int(n)) } - // Compile the circuit - var myCircuit Circuit - ccs, err := frontend.Compile(curve.ID.ScalarField(), r1cs.NewBuilder, &myCircuit) - assert.NoError(err) + // Make contributions for serialized + for i := range phase1 { + if i == 0 { // no "predecessor" to the first contribution + p1.Initialize(domainSize) + } + + p1.Contribute() + serialized[i] = serialize(&p1) + } + + // read all Phase1 objects + for i := range phase1 { + phase1[i] = new(Phase1) + deserialize(phase1[i], serialized[i]) + } + + // Verify contributions for phase 1 and generate non-circuit-specific parameters + srsCommons, err := VerifyPhase1(domainSize, []byte("testing phase1"), phase1[:]...) + { + var commonsRead SrsCommons + deserialize(&commonsRead, serialize(&srsCommons)) + srsCommons = commonsRead + } - var evals Phase2Evaluations r1cs := ccs.(*cs.R1CS) // Prepare for phase-2 - srs2, evals := InitPhase2(r1cs, &srs1) - - // Make and verify contributions for phase1 - for i := 1; i < nContributionsPhase2; i++ { - // we clone for test purposes; but in practice, participant will receive a []byte, deserialize it, - // add its contribution and send back to coordinator. - prev := srs2.clone() + for i := range phase2 { + if i == 0 { + p2.Initialize(r1cs, &srsCommons) + } + p2.Contribute() + serialized[i] = serialize(&p2) + } - srs2.Contribute() - assert.NoError(VerifyPhase2(&prev, &srs2)) + for i := range phase2 { + phase2[i] = new(Phase2) + deserialize(phase2[i], serialized[i]) } - // Extract the proving and verifying keys - pk, vk := ExtractKeys(&srs1, &srs2, &evals, ccs.GetNbConstraints()) + pk, vk, err := VerifyPhase2(r1cs, &srsCommons, []byte("testing phase2"), phase2[:]...) + assert.NoError(err) // Build the witness var preImage, hash fr.Element @@ -81,13 +118,14 @@ func TestSetupCircuit(t *testing.T) { assert.NoError(err) // groth16: ensure proof is verified - proof, err := groth16.Prove(ccs, &pk, witness) + proof, err := groth16.Prove(ccs, pk, witness) assert.NoError(err) - err = groth16.Verify(proof, &vk, pubWitness) + err = groth16.Verify(proof, vk, pubWitness) assert.NoError(err) } +/* func BenchmarkPhase1(b *testing.B) { const power = 14 @@ -140,7 +178,7 @@ func BenchmarkPhase2(b *testing.B) { }) } - +*/ // Circuit defines a pre-image knowledge proof // mimc(secret preImage) = public hash type Circuit struct { @@ -158,32 +196,8 @@ func (circuit *Circuit) Define(api frontend.API) error { mimc.Write(circuit.PreImage) api.AssertIsEqual(circuit.Hash, mimc.Sum()) - return nil -} - -func (p *Phase1) clone() Phase1 { - r := Phase1{} - r.Parameters.G1.Tau = append(r.Parameters.G1.Tau, p.Parameters.G1.Tau...) - r.Parameters.G1.AlphaTau = append(r.Parameters.G1.AlphaTau, p.Parameters.G1.AlphaTau...) - r.Parameters.G1.BetaTau = append(r.Parameters.G1.BetaTau, p.Parameters.G1.BetaTau...) - - r.Parameters.G2.Tau = append(r.Parameters.G2.Tau, p.Parameters.G2.Tau...) - r.Parameters.G2.Beta = p.Parameters.G2.Beta - - r.PublicKeys = p.PublicKeys - r.Hash = append(r.Hash, p.Hash...) - - return r -} - -func (p *Phase2) clone() Phase2 { - r := Phase2{} - r.Parameters.G1.Delta = p.Parameters.G1.Delta - r.Parameters.G1.PKK = append(r.Parameters.G1.PKK, p.Parameters.G1.PKK...) - r.Parameters.G1.Z = append(r.Parameters.G1.Z, p.Parameters.G1.Z...) - r.Parameters.G2.Delta = p.Parameters.G2.Delta - r.PublicKey = p.PublicKey - r.Challenge = append(r.Challenge, p.Challenge...) + c, err := api.(frontend.Committer).Commit(circuit.PreImage, circuit.Hash) + api.AssertIsDifferent(c, 0) - return r + return err }