diff --git a/backend/groth16/bn254/mpcsetup/lagrange.go b/backend/groth16/bn254/mpcsetup/lagrange.go index b41b6596ac..57eff9d56f 100644 --- a/backend/groth16/bn254/mpcsetup/lagrange.go +++ b/backend/groth16/bn254/mpcsetup/lagrange.go @@ -75,7 +75,7 @@ func butterflyG2(a *curve.G2Affine, b *curve.G2Affine) { b.Sub(&t, b) } -// kerDIF8 is a kernel that process a FFT of size 8 +// kerDIF8 is a kernel that processes an FFT of size 8 func kerDIF8G1(a []curve.G1Affine, twiddles [][]fr.Element, stage int) { butterflyG1(&a[0], &a[4]) butterflyG1(&a[1], &a[5]) @@ -103,7 +103,7 @@ func kerDIF8G1(a []curve.G1Affine, twiddles [][]fr.Element, stage int) { butterflyG1(&a[6], &a[7]) } -// kerDIF8 is a kernel that process a FFT of size 8 +// kerDIF8 is a kernel that processes an FFT of size 8 func kerDIF8G2(a []curve.G2Affine, twiddles [][]fr.Element, stage int) { butterflyG2(&a[0], &a[4]) butterflyG2(&a[1], &a[5]) diff --git a/backend/groth16/bn254/mpcsetup/marshal.go b/backend/groth16/bn254/mpcsetup/marshal.go index e2c26d591d..b2f878fda9 100644 --- a/backend/groth16/bn254/mpcsetup/marshal.go +++ b/backend/groth16/bn254/mpcsetup/marshal.go @@ -6,165 +6,267 @@ package mpcsetup import ( + "encoding/binary" curve "github.com/consensys/gnark-crypto/ecc/bn254" "io" ) +func appendRefs[T any](s []any, v []T) []any { + for i := range v { + s = append(s, &v[i]) + } + return s +} + // WriteTo implements io.WriterTo -func (phase1 *Phase1) WriteTo(writer io.Writer) (int64, error) { - n, err := phase1.writeTo(writer) - if err != nil { - return n, err +// It does not write the Challenge from the previous contribution +func (p *Phase1) WriteTo(writer io.Writer) (n int64, err error) { + var dn int64 + for _, v := range []io.WriterTo{ + &p.proofs.Tau, + &p.proofs.Alpha, + &p.proofs.Beta, + &p.parameters, + } { + dn, err = v.WriteTo(writer) + n += dn + if err != nil { + return + } } - nBytes, err := writer.Write(phase1.Hash) - return int64(nBytes) + n, err + return } -func (phase1 *Phase1) writeTo(writer io.Writer) (int64, error) { - toEncode := []interface{}{ - &phase1.PublicKeys.Tau.SG, - &phase1.PublicKeys.Tau.SXG, - &phase1.PublicKeys.Tau.XR, - &phase1.PublicKeys.Alpha.SG, - &phase1.PublicKeys.Alpha.SXG, - &phase1.PublicKeys.Alpha.XR, - &phase1.PublicKeys.Beta.SG, - &phase1.PublicKeys.Beta.SXG, - &phase1.PublicKeys.Beta.XR, - phase1.Parameters.G1.Tau, - phase1.Parameters.G1.AlphaTau, - phase1.Parameters.G1.BetaTau, - phase1.Parameters.G2.Tau, - &phase1.Parameters.G2.Beta, +// ReadFrom implements io.ReaderFrom +// It does not read the Challenge from the previous contribution +func (p *Phase1) ReadFrom(reader io.Reader) (n int64, err error) { + var dn int64 + for _, v := range []io.ReaderFrom{ + &p.proofs.Tau, + &p.proofs.Alpha, + &p.proofs.Beta, + &p.parameters, + } { + dn, err = v.ReadFrom(reader) + n += dn + if err != nil { + return + } } + return +} + +// slice of references for the parameters of p +func (p *Phase2) refsSlice() []any { + nbCommitments := len(p.Parameters.G2.Sigma) + if nbCommitments > 65535 { + panic("nbCommitments not fitting in 16 bits") + } + + expectedLen := 2*nbCommitments + 5 + refs := make([]any, 5, expectedLen) + refs[0] = uint16(nbCommitments) + refs[1] = &p.Parameters.G1.Delta + refs[2] = &p.Parameters.G1.PKK // unique size: private input size, excluding those committed to + refs[3] = &p.Parameters.G1.Z // unique size: N-1 + refs[4] = &p.Parameters.G2.Delta + + refs = appendRefs(refs, p.Parameters.G1.SigmaCKK) + refs = appendRefs(refs, p.Parameters.G2.Sigma) + + if len(refs) != expectedLen { + panic("incorrect length estimate") + } + + return refs +} + +// WriteTo implements io.WriterTo +func (p *Phase2) WriteTo(writer io.Writer) (int64, error) { + // write the parameters enc := curve.NewEncoder(writer) - for _, v := range toEncode { + for _, v := range p.refsSlice() { if err := enc.Encode(v); err != nil { return enc.BytesWritten(), err } } - return enc.BytesWritten(), nil + + //write the proofs + dn, err := p.Delta.WriteTo(writer) + n := enc.BytesWritten() + dn + if err != nil { + return n, err + } + + for i := range p.Sigmas { + dn, err = p.Sigmas[i].WriteTo(writer) + n += dn + if err != nil { + return n, err + } + } + + return n, nil } // ReadFrom implements io.ReaderFrom -func (phase1 *Phase1) ReadFrom(reader io.Reader) (int64, error) { - toEncode := []interface{}{ - &phase1.PublicKeys.Tau.SG, - &phase1.PublicKeys.Tau.SXG, - &phase1.PublicKeys.Tau.XR, - &phase1.PublicKeys.Alpha.SG, - &phase1.PublicKeys.Alpha.SXG, - &phase1.PublicKeys.Alpha.XR, - &phase1.PublicKeys.Beta.SG, - &phase1.PublicKeys.Beta.SXG, - &phase1.PublicKeys.Beta.XR, - &phase1.Parameters.G1.Tau, - &phase1.Parameters.G1.AlphaTau, - &phase1.Parameters.G1.BetaTau, - &phase1.Parameters.G2.Tau, - &phase1.Parameters.G2.Beta, +func (p *Phase2) ReadFrom(reader io.Reader) (int64, error) { + var nbCommitments uint16 + + if err := binary.Read(reader, binary.BigEndian, &nbCommitments); err != nil { + return -1, err // binary.Read doesn't return the number of bytes read } + n := int64(2) // we've definitely successfully read 2 bytes + + p.Sigmas = make([]valueUpdate, nbCommitments) + p.Parameters.G1.SigmaCKK = make([][]curve.G1Affine, nbCommitments) + p.Parameters.G2.Sigma = make([]curve.G2Affine, nbCommitments) dec := curve.NewDecoder(reader) - for _, v := range toEncode { + for _, v := range p.refsSlice()[1:] { // nbCommitments already read if err := dec.Decode(v); err != nil { - return dec.BytesRead(), err + return n + dec.BytesRead(), err } } - phase1.Hash = make([]byte, 32) - nBytes, err := reader.Read(phase1.Hash) - return dec.BytesRead() + int64(nBytes), err -} + n += dec.BytesRead() -// WriteTo implements io.WriterTo -func (phase2 *Phase2) WriteTo(writer io.Writer) (int64, error) { - n, err := phase2.writeTo(writer) + dn, err := p.Delta.ReadFrom(reader) + n += dn if err != nil { return n, err } - nBytes, err := writer.Write(phase2.Hash) - return int64(nBytes) + n, err + + for i := range p.Sigmas { + dn, err = p.Sigmas[i].ReadFrom(reader) + n += dn + if err != nil { + return n, err + } + } + + return n, nil } -func (c *Phase2) writeTo(writer io.Writer) (int64, error) { - enc := curve.NewEncoder(writer) - toEncode := []interface{}{ - &c.PublicKey.SG, - &c.PublicKey.SXG, - &c.PublicKey.XR, - &c.Parameters.G1.Delta, - c.Parameters.G1.L, - c.Parameters.G1.Z, - &c.Parameters.G2.Delta, +func (c *Phase2Evaluations) refsSlice() []any { + N := uint64(len(c.G1.A)) + expectedLen := 3*N + 2 + refs := make([]any, 2, expectedLen) + refs[0] = &c.G1.CKK + refs[1] = &c.G1.VKK + refs = appendRefs(refs, c.G1.A) + refs = appendRefs(refs, c.G1.B) + refs = appendRefs(refs, c.G2.B) + + if uint64(len(refs)) != expectedLen { + panic("incorrect length estimate") } - for _, v := range toEncode { + return refs +} + +// WriteTo implements io.WriterTo +func (c *Phase2Evaluations) WriteTo(writer io.Writer) (int64, error) { + enc := curve.NewEncoder(writer) + + for _, v := range c.refsSlice() { if err := enc.Encode(v); err != nil { return enc.BytesWritten(), err } } - return enc.BytesWritten(), nil } // ReadFrom implements io.ReaderFrom -func (c *Phase2) ReadFrom(reader io.Reader) (int64, error) { - dec := curve.NewDecoder(reader) - toEncode := []interface{}{ - &c.PublicKey.SG, - &c.PublicKey.SXG, - &c.PublicKey.XR, - &c.Parameters.G1.Delta, - &c.Parameters.G1.L, - &c.Parameters.G1.Z, - &c.Parameters.G2.Delta, +func (c *Phase2Evaluations) ReadFrom(reader io.Reader) (int64, error) { + var N uint64 + if err := binary.Read(reader, binary.BigEndian, &N); err != nil { + return -1, err // binary.Read doesn't return the number of bytes read } + n := int64(8) + + c.G1.A = make([]curve.G1Affine, N) + c.G1.B = make([]curve.G1Affine, N) + c.G2.B = make([]curve.G2Affine, N) - for _, v := range toEncode { + dec := curve.NewDecoder(reader) + for _, v := range c.refsSlice()[1:] { if err := dec.Decode(v); err != nil { - return dec.BytesRead(), err + return n + dec.BytesRead(), err } } - c.Hash = make([]byte, 32) - n, err := reader.Read(c.Hash) - return int64(n) + dec.BytesRead(), err - + return n + dec.BytesRead(), nil } -// WriteTo implements io.WriterTo -func (c *Phase2Evaluations) WriteTo(writer io.Writer) (int64, error) { - enc := curve.NewEncoder(writer) - toEncode := []interface{}{ - c.G1.A, - c.G1.B, - c.G2.B, +// refsSlice produces a slice consisting of references to all sub-elements +// prepended by the size parameter, to be used in WriteTo and ReadFrom functions +func (c *SrsCommons) refsSlice() []any { + N := uint64(len(c.G2.Tau)) + expectedLen := 5*N - 1 + // size N 1 + // [β]₂ 1 + // [τⁱ]₁ for 1 ≤ i ≤ 2N-2 2N-2 + // [τⁱ]₂ for 1 ≤ i ≤ N-1 N-1 + // [ατⁱ]₁ for 0 ≤ i ≤ N-1 N + // [βτⁱ]₁ for 0 ≤ i ≤ N-1 N + refs := make([]any, 1, expectedLen) + refs[0] = N + + refs = appendRefs(refs, c.G1.Tau[1:]) + refs = appendRefs(refs, c.G2.Tau[1:]) + refs = appendRefs(refs, c.G1.BetaTau) + refs = appendRefs(refs, c.G1.AlphaTau) + + if uint64(len(refs)) != expectedLen { + panic("incorrect length estimate") } - for _, v := range toEncode { + return refs +} + +func (c *SrsCommons) WriteTo(writer io.Writer) (int64, error) { + enc := curve.NewEncoder(writer) + for _, v := range c.refsSlice() { if err := enc.Encode(v); err != nil { return enc.BytesWritten(), err } } - return enc.BytesWritten(), nil } // ReadFrom implements io.ReaderFrom -func (c *Phase2Evaluations) ReadFrom(reader io.Reader) (int64, error) { +func (c *SrsCommons) ReadFrom(reader io.Reader) (n int64, err error) { + var N uint64 dec := curve.NewDecoder(reader) - toEncode := []interface{}{ - &c.G1.A, - &c.G1.B, - &c.G2.B, + if err = dec.Decode(&N); err != nil { + return dec.BytesRead(), err } - for _, v := range toEncode { - if err := dec.Decode(v); err != nil { + c.setZero(N) + + for _, v := range c.refsSlice()[1:] { // we've already decoded N + if err = dec.Decode(v); err != nil { return dec.BytesRead(), err } } - return dec.BytesRead(), nil } + +func (x *valueUpdate) WriteTo(writer io.Writer) (n int64, err error) { + enc := curve.NewEncoder(writer) + if err = enc.Encode(&x.contributionCommitment); err != nil { + return enc.BytesWritten(), err + } + err = enc.Encode(&x.contributionPok) + return enc.BytesWritten(), err +} + +func (x *valueUpdate) ReadFrom(reader io.Reader) (n int64, err error) { + dec := curve.NewDecoder(reader) + if err = dec.Decode(&x.contributionCommitment); err != nil { + return dec.BytesRead(), err + } + err = dec.Decode(&x.contributionPok) + return dec.BytesRead(), err +} diff --git a/backend/groth16/bn254/mpcsetup/marshal_test.go b/backend/groth16/bn254/mpcsetup/marshal_test.go index 317e58737a..adbfc3fe0e 100644 --- a/backend/groth16/bn254/mpcsetup/marshal_test.go +++ b/backend/groth16/bn254/mpcsetup/marshal_test.go @@ -5,17 +5,7 @@ package mpcsetup -import ( - "testing" - - curve "github.com/consensys/gnark-crypto/ecc/bn254" - cs "github.com/consensys/gnark/constraint/bn254" - "github.com/consensys/gnark/frontend" - "github.com/consensys/gnark/frontend/cs/r1cs" - gnarkio "github.com/consensys/gnark/io" - "github.com/stretchr/testify/require" -) - +/* TODO bring this back func TestContributionSerialization(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") @@ -23,7 +13,8 @@ func TestContributionSerialization(t *testing.T) { assert := require.New(t) // Phase 1 - srs1 := InitPhase1(9) + var srs1 Phase1 + srs1.Initialize(1 << 9) srs1.Contribute() assert.NoError(gnarkio.RoundTripCheck(&srs1, func() interface{} { return new(Phase1) })) @@ -40,3 +31,4 @@ func TestContributionSerialization(t *testing.T) { assert.NoError(gnarkio.RoundTripCheck(&srs2, func() interface{} { return new(Phase2) })) } +*/ diff --git a/backend/groth16/bn254/mpcsetup/phase1.go b/backend/groth16/bn254/mpcsetup/phase1.go index 18ffdd0647..56e2457a9b 100644 --- a/backend/groth16/bn254/mpcsetup/phase1.go +++ b/backend/groth16/bn254/mpcsetup/phase1.go @@ -6,187 +6,256 @@ package mpcsetup import ( + "bytes" "crypto/sha256" "errors" + "fmt" + "github.com/consensys/gnark-crypto/ecc" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "math" "math/big" ) -// Phase1 represents the Phase1 of the MPC described in +// SrsCommons are the circuit-independent components of the Groth16 SRS, +// computed by the first phase. +// in all that follows, N is the domain size +type SrsCommons struct { + G1 struct { + Tau []curve.G1Affine // {[τ⁰]₁, [τ¹]₁, [τ²]₁, …, [τ²ᴺ⁻²]₁} + AlphaTau []curve.G1Affine // {α[τ⁰]₁, α[τ¹]₁, α[τ²]₁, …, α[τᴺ⁻¹]₁} + BetaTau []curve.G1Affine // {β[τ⁰]₁, β[τ¹]₁, β[τ²]₁, …, β[τᴺ⁻¹]₁} + } + G2 struct { + Tau []curve.G2Affine // {[τ⁰]₂, [τ¹]₂, [τ²]₂, …, [τᴺ⁻¹]₂} + Beta curve.G2Affine // [β]₂ + } +} + +// Phase1 in line with Phase1 of the MPC described in // https://eprint.iacr.org/2017/1050.pdf // // Also known as "Powers of Tau" type Phase1 struct { - Parameters struct { - G1 struct { - Tau []curve.G1Affine // {[τ⁰]₁, [τ¹]₁, [τ²]₁, …, [τ²ⁿ⁻²]₁} - AlphaTau []curve.G1Affine // {α[τ⁰]₁, α[τ¹]₁, α[τ²]₁, …, α[τⁿ⁻¹]₁} - BetaTau []curve.G1Affine // {β[τ⁰]₁, β[τ¹]₁, β[τ²]₁, …, β[τⁿ⁻¹]₁} - } - G2 struct { - Tau []curve.G2Affine // {[τ⁰]₂, [τ¹]₂, [τ²]₂, …, [τⁿ⁻¹]₂} - Beta curve.G2Affine // [β]₂ - } + proofs struct { // "main" contributions + Tau, Alpha, Beta valueUpdate } - PublicKeys struct { - Tau, Alpha, Beta PublicKey - } - Hash []byte // sha256 hash + parameters SrsCommons + Challenge []byte // Hash of the transcript PRIOR to this participant } -// InitPhase1 initialize phase 1 of the MPC. This is called once by the coordinator before -// any randomness contribution is made (see Contribute()). -func InitPhase1(power int) (phase1 Phase1) { - N := int(math.Pow(2, float64(power))) - - // Generate key pairs - var tau, alpha, beta fr.Element - tau.SetOne() - alpha.SetOne() - beta.SetOne() - phase1.PublicKeys.Tau = newPublicKey(tau, nil, 1) - phase1.PublicKeys.Alpha = newPublicKey(alpha, nil, 2) - phase1.PublicKeys.Beta = newPublicKey(beta, nil, 3) - - // First contribution use generators - _, _, g1, g2 := curve.Generators() - phase1.Parameters.G2.Beta.Set(&g2) - phase1.Parameters.G1.Tau = make([]curve.G1Affine, 2*N-1) - phase1.Parameters.G2.Tau = make([]curve.G2Affine, N) - phase1.Parameters.G1.AlphaTau = make([]curve.G1Affine, N) - phase1.Parameters.G1.BetaTau = make([]curve.G1Affine, N) - for i := 0; i < len(phase1.Parameters.G1.Tau); i++ { - phase1.Parameters.G1.Tau[i].Set(&g1) - } - for i := 0; i < len(phase1.Parameters.G2.Tau); i++ { - phase1.Parameters.G2.Tau[i].Set(&g2) - phase1.Parameters.G1.AlphaTau[i].Set(&g1) - phase1.Parameters.G1.BetaTau[i].Set(&g1) - } - - phase1.Parameters.G2.Beta.Set(&g2) - - // Compute hash of Contribution - phase1.Hash = phase1.hash() - - return +// Contribute contributes randomness to the Phase1 object. This mutates Phase1. +// p is trusted to be well-formed. The ReadFrom function performs such basic sanity checks. +func (p *Phase1) Contribute() { + p.Challenge = p.hash() + + // Generate main value updates + var ( + tauContrib, alphaContrib, betaContrib fr.Element + ) + p.proofs.Tau, tauContrib = updateValue(p.parameters.G1.Tau[1], p.Challenge, 1) + p.proofs.Alpha, alphaContrib = updateValue(p.parameters.G1.AlphaTau[0], p.Challenge, 2) + p.proofs.Beta, betaContrib = updateValue(p.parameters.G1.BetaTau[0], p.Challenge, 3) + + p.parameters.update(&tauContrib, &alphaContrib, &betaContrib) } -// Contribute contributes randomness to the phase1 object. This mutates phase1. -func (phase1 *Phase1) Contribute() { - N := len(phase1.Parameters.G2.Tau) - - // Generate key pairs - var tau, alpha, beta fr.Element - tau.SetRandom() - alpha.SetRandom() - beta.SetRandom() - phase1.PublicKeys.Tau = newPublicKey(tau, phase1.Hash[:], 1) - phase1.PublicKeys.Alpha = newPublicKey(alpha, phase1.Hash[:], 2) - phase1.PublicKeys.Beta = newPublicKey(beta, phase1.Hash[:], 3) - - // Compute powers of τ, ατ, and βτ - taus := powers(tau, 2*N-1) - alphaTau := make([]fr.Element, N) - betaTau := make([]fr.Element, N) - for i := 0; i < N; i++ { - alphaTau[i].Mul(&taus[i], &alpha) - betaTau[i].Mul(&taus[i], &beta) - } - - // Update using previous parameters - // TODO @gbotrel working with jacobian points here will help with perf. - scaleG1InPlace(phase1.Parameters.G1.Tau, taus) - scaleG2InPlace(phase1.Parameters.G2.Tau, taus[0:N]) - scaleG1InPlace(phase1.Parameters.G1.AlphaTau, alphaTau) - scaleG1InPlace(phase1.Parameters.G1.BetaTau, betaTau) - var betaBI big.Int - beta.BigInt(&betaBI) - phase1.Parameters.G2.Beta.ScalarMultiplication(&phase1.Parameters.G2.Beta, &betaBI) - - // Compute hash of Contribution - phase1.Hash = phase1.hash() +// setZero instantiates the parameters, and sets all contributions to zero +func (c *SrsCommons) setZero(N uint64) { + c.G1.Tau = make([]curve.G1Affine, 2*N-2) + c.G2.Tau = make([]curve.G2Affine, N) + c.G1.AlphaTau = make([]curve.G1Affine, N) + c.G1.BetaTau = make([]curve.G1Affine, N) + _, _, c.G1.Tau[0], c.G2.Tau[0] = curve.Generators() } -func VerifyPhase1(c0, c1 *Phase1, c ...*Phase1) error { - contribs := append([]*Phase1{c0, c1}, c...) - for i := 0; i < len(contribs)-1; i++ { - if err := verifyPhase1(contribs[i], contribs[i+1]); err != nil { - return err - } +// setOne instantiates the parameters, and sets all contributions to one +func (c *SrsCommons) setOne(N uint64) { + c.setZero(N) + for i := range c.G1.Tau { + c.G1.Tau[i] = c.G1.Tau[0] } - return nil + for i := range c.G1.AlphaTau { + c.G1.AlphaTau[i] = c.G1.AlphaTau[0] + c.G1.BetaTau[i] = c.G1.AlphaTau[0] + c.G2.Tau[i] = c.G2.Tau[0] + } + c.G2.Beta = c.G2.Tau[0] } -// verifyPhase1 checks that a contribution is based on a known previous Phase1 state. -func verifyPhase1(current, contribution *Phase1) error { - // Compute R for τ, α, β - tauR := genR(contribution.PublicKeys.Tau.SG, contribution.PublicKeys.Tau.SXG, current.Hash[:], 1) - alphaR := genR(contribution.PublicKeys.Alpha.SG, contribution.PublicKeys.Alpha.SXG, current.Hash[:], 2) - betaR := genR(contribution.PublicKeys.Beta.SG, contribution.PublicKeys.Beta.SXG, current.Hash[:], 3) +// from the fourth argument on this just gives an opportunity to avoid recomputing some scalar multiplications +func (c *SrsCommons) update(tauUpdate, alphaUpdate, betaUpdate *fr.Element) { + + // TODO @gbotrel working with jacobian points here will help with perf. + + tauUpdates := powers(tauUpdate, len(c.G1.Tau)) + // saving 3 exactly scalar muls among millions. Not a significant gain but might as well. + scaleG1InPlace(c.G1.Tau[1:], tauUpdates[1:]) // first element remains 1 + scaleG2InPlace(c.G2.Tau[1:], tauUpdates[1:]) - // Check for knowledge of toxic parameters - if !sameRatio(contribution.PublicKeys.Tau.SG, contribution.PublicKeys.Tau.SXG, contribution.PublicKeys.Tau.XR, tauR) { - return errors.New("couldn't verify public key of τ") + alphaUpdates := make([]fr.Element, len(c.G1.AlphaTau)) + alphaUpdates[0].Set(alphaUpdate) + for i := range alphaUpdates { + alphaUpdates[i].Mul(&tauUpdates[i], &alphaUpdates[1]) } - if !sameRatio(contribution.PublicKeys.Alpha.SG, contribution.PublicKeys.Alpha.SXG, contribution.PublicKeys.Alpha.XR, alphaR) { - return errors.New("couldn't verify public key of α") + scaleG1InPlace(c.G1.AlphaTau, alphaUpdates) + + betaUpdates := make([]fr.Element, len(c.G1.BetaTau)) + betaUpdates[0].Set(betaUpdate) + for i := range betaUpdates { + alphaUpdates[i].Mul(&tauUpdates[i], &betaUpdates[1]) } - if !sameRatio(contribution.PublicKeys.Beta.SG, contribution.PublicKeys.Beta.SXG, contribution.PublicKeys.Beta.XR, betaR) { - return errors.New("couldn't verify public key of β") + scaleG1InPlace(c.G1.BetaTau, betaUpdates) + + var betaUpdateI big.Int + betaUpdate.SetBigInt(&betaUpdateI) + c.G2.Beta.ScalarMultiplication(&c.G2.Beta, &betaUpdateI) +} + +// Seal performs the final contribution and outputs the final parameters. +// No randomization is performed at this step. +// A verifier should simply re-run this and check +// that it produces the same values. +// The inner workings of the random beacon are out of scope. +// WARNING: Seal modifies p, just as Contribute does. +// The result will be an INVALID Phase1 object, since no proof of correctness is produced. +func (p *Phase1) Seal(beaconChallenge []byte) SrsCommons { + newContribs := beaconContributions(p.hash(), beaconChallenge, 3) + p.parameters.update(&newContribs[0], &newContribs[1], &newContribs[2]) + return p.parameters +} + +// VerifyPhase1 and return the SRS parameters usable for any circuit of domain size N +// beaconChallenge is the output of the random beacon +// and c are the output from the contributors +// WARNING: the last contribution object will be modified +func VerifyPhase1(N uint64, beaconChallenge []byte, c ...*Phase1) (SrsCommons, error) { + prev := NewPhase1(N) + for i := range c { + if err := prev.Verify(c[i]); err != nil { + return SrsCommons{}, err + } + prev = c[i] } + return prev.Seal(beaconChallenge), nil +} + +// Verify assumes previous is correct +func (p *Phase1) Verify(next *Phase1) error { - // Check for valid updates using previous parameters - if !sameRatio(contribution.Parameters.G1.Tau[1], current.Parameters.G1.Tau[1], tauR, contribution.PublicKeys.Tau.XR) { - return errors.New("couldn't verify that [τ]₁ is based on previous contribution") + challenge := p.hash() + if len(next.Challenge) != 0 && !bytes.Equal(next.Challenge, challenge) { + return errors.New("the challenge does not match the previous phase's hash") } - if !sameRatio(contribution.Parameters.G1.AlphaTau[0], current.Parameters.G1.AlphaTau[0], alphaR, contribution.PublicKeys.Alpha.XR) { - return errors.New("couldn't verify that [α]₁ is based on previous contribution") + next.Challenge = challenge + + // the internal consistency of the vector sizes in next is assumed + // so is its well-formedness i.e. Tau[0] = 1 + // it remains to check it is consistent with p + N := len(next.parameters.G2.Tau) + if N != len(p.parameters.G2.Tau) { + return errors.New("domain size mismatch") } - if !sameRatio(contribution.Parameters.G1.BetaTau[0], current.Parameters.G1.BetaTau[0], betaR, contribution.PublicKeys.Beta.XR) { - return errors.New("couldn't verify that [β]₁ is based on previous contribution") + + // verify updates to τ, α, β + if err := next.proofs.Tau.verify(pair{p.parameters.G1.Tau[1], nil}, pair{next.parameters.G1.Tau[1], nil}, challenge, 1); err != nil { + return fmt.Errorf("failed to verify contribution to τ: %w", err) } - if !sameRatio(contribution.PublicKeys.Tau.SG, contribution.PublicKeys.Tau.SXG, contribution.Parameters.G2.Tau[1], current.Parameters.G2.Tau[1]) { - return errors.New("couldn't verify that [τ]₂ is based on previous contribution") + if err := next.proofs.Alpha.verify(pair{p.parameters.G1.AlphaTau[0], nil}, pair{p.parameters.G1.AlphaTau[0], nil}, challenge, 2); err != nil { + return fmt.Errorf("failed to verify contribution to α: %w", err) } - if !sameRatio(contribution.PublicKeys.Beta.SG, contribution.PublicKeys.Beta.SXG, contribution.Parameters.G2.Beta, current.Parameters.G2.Beta) { - return errors.New("couldn't verify that [β]₂ is based on previous contribution") + if err := next.proofs.Beta.verify(pair{p.parameters.G1.BetaTau[0], &p.parameters.G2.Beta}, pair{next.parameters.G1.BetaTau[0], &next.parameters.G2.Beta}, challenge, 3); err != nil { + return fmt.Errorf("failed to verify contribution to β: %w", err) } - // Check for valid updates using powers of τ - _, _, g1, g2 := curve.Generators() - tauL1, tauL2 := linearCombinationG1(contribution.Parameters.G1.Tau) - if !sameRatio(tauL1, tauL2, contribution.Parameters.G2.Tau[1], g2) { - return errors.New("couldn't verify valid powers of τ in G₁") - } - alphaL1, alphaL2 := linearCombinationG1(contribution.Parameters.G1.AlphaTau) - if !sameRatio(alphaL1, alphaL2, contribution.Parameters.G2.Tau[1], g2) { - return errors.New("couldn't verify valid powers of α(τ) in G₁") + if !areInSubGroupG1(next.parameters.G1.Tau[2:]) || !areInSubGroupG1(next.parameters.G1.BetaTau[1:]) || !areInSubGroupG1(next.parameters.G1.AlphaTau[1:]) { + return errors.New("derived values 𝔾₁ subgroup check failed") } - betaL1, betaL2 := linearCombinationG1(contribution.Parameters.G1.BetaTau) - if !sameRatio(betaL1, betaL2, contribution.Parameters.G2.Tau[1], g2) { - return errors.New("couldn't verify valid powers of α(τ) in G₁") - } - tau2L1, tau2L2 := linearCombinationG2(contribution.Parameters.G2.Tau) - if !sameRatio(contribution.Parameters.G1.Tau[1], g1, tau2L1, tau2L2) { - return errors.New("couldn't verify valid powers of τ in G₂") + if !areInSubGroupG2(next.parameters.G2.Tau[2:]) { + return errors.New("derived values 𝔾₂ subgroup check failed") } - // Check hash of the contribution - h := contribution.hash() - for i := 0; i < len(h); i++ { - if h[i] != contribution.Hash[i] { - return errors.New("couldn't verify hash of contribution") - } + // lemma: let K be a field and + // F = ∑ fᵢⱼ XⁱYʲ F' = ∑ f'ᵢⱼ XⁱYʲ + // G = ∑ gᵢ Zⁱ G' = ∑ g'ᵢ Zⁱ + // polynomials in K[X,Y,Z]. + // if F/F' = G/G' + // then F/F' = G/G' ∈ K + // + // view our polynomials in K[X,Y,Z] + // By multiplying out the polynomials we get + // FG' = F'G ⇒ ∑ fᵢⱼg'ₖ XᶦYʲZᵏ = ∑ f'ᵢⱼgₖₗ XᶦYʲZᵏ + // pick i₀ ,j₀ , k₀ where f'ᵢ₀ⱼ₀, g'ₖ₀ ≠ 0 + // let x ≔ fᵢ₀ⱼ₀/f'ᵢ₀ⱼ₀ = gₖ₀/g'ₖ₀ + // now for any i,j: fᵢⱼg'ₖ₀ = f'ᵢⱼgₖ₀ ⇒ + // fᵢⱼ = x f'ᵢⱼ + // likewise for any i: fᵢ₀ⱼ₀g'ᵢ = f'ᵢ₀ⱼ₀gᵢ ⇒ + // gᵢ = x g'ᵢ + + // now we use this to check that: + // 1. aᵢ ≔ G1.Tau[i] = [τⁱ]₁ + // 2. bᵢ ≔ G2.Tau[i] = [τⁱ]₂ + // 3. cᵢ ≔ G1.AlphaTau[i] = [ατⁱ]₁ + // 4. dᵢ ≔ G1.BetaTau[i] = [βτⁱ]₁ + + // construct the polynomials + // F ≔ a₀ + a₁X + ... + a₂ₙ₋₃X²ᴺ⁻³ + c₀Y + c₁XY + ... + cₙ₋₂Xᴺ⁻²Y + d₀Y² + d₁XY² + ... + dₙ₋₂Xᴺ⁻²Y² + // F' ≔ a₁ + a₂X + ... + a₂ₙ₋₂X²ᴺ⁻³ + c₁Y + c₂XY + ... + cₙ₋₁Xᴺ⁻²Y + d₁Y² + d₂XY² + ... + dₙ₋₁Xᴺ⁻²Y² + // G ≔ b₀ + b₁Z + ... + bₙ₋₂Zᴺ⁻² + // G' ≔ b₁ + b₂Z + ... + bₙ₋₁Zᴺ⁻² + + // if F/F' = G/G' we get F/F' = G/G' = a₀/a₁ = 1/τ, which yields: + // for 0 ≤ i ≤ N-2: bᵢ = bᵢ₊₁/τ, cᵢ = cᵢ₊₁/τ, dᵢ = dᵢ₊₁/τ + // for 0 ≤ i ≤ 2N-3: aᵢ = aᵢ₊₁/τ + + // from previous checks we already know: + // 1. a₀ = 1 + // 2. b₀ = 1 + // 3. c₀ = α + // 4. d₀ = β + // and so the desired results follow + + ends := partialSums(len(next.parameters.G1.Tau), len(next.parameters.G1.AlphaTau), len(next.parameters.G1.BetaTau)) + + g1s := make([]curve.G1Affine, 0, ends[len(ends)-1]) + g1s = append(g1s, next.parameters.G1.Tau...) + g1s = append(g1s, next.parameters.G1.AlphaTau...) + g1s = append(g1s, next.parameters.G1.BetaTau...) + + g1Num, g1Denom := linearCombinationsG1(g1s, bivariateRandomMonomials(ends...), ends) + g2Num, g2Denom := linearCombinationsG2(next.parameters.G2.Tau, linearCombCoeffs(len(next.parameters.G2.Tau))) + + if !sameRatioUnsafe(g1Num, g1Denom, g2Num, g2Denom) { + return errors.New("value update check failed") } return nil } -func (phase1 *Phase1) hash() []byte { +func (p *Phase1) hash() []byte { + if len(p.Challenge) == 0 { + panic("challenge field missing") + } sha := sha256.New() - phase1.writeTo(sha) + p.WriteTo(sha) + sha.Write(p.Challenge) return sha.Sum(nil) } + +// Initialize an empty Phase1 contribution object +// to be used by the first contributor or the verifier +// N is the FFT domain size +func (p *Phase1) Initialize(N uint64) { + if ecc.NextPowerOfTwo(N) != N { + panic("N must be a power of 2") + } + p.parameters.setOne(N) +} + +// NewPhase1 creates an empty Phase1 contribution object +// to be used by the first contributor or the verifier +// N is the FFT domain size +func NewPhase1(N uint64) *Phase1 { + res := new(Phase1) + res.Initialize(N) + return res +} diff --git a/backend/groth16/bn254/mpcsetup/phase2.go b/backend/groth16/bn254/mpcsetup/phase2.go index 9ec35d37a9..c13e6f9d56 100644 --- a/backend/groth16/bn254/mpcsetup/phase2.go +++ b/backend/groth16/bn254/mpcsetup/phase2.go @@ -6,48 +6,173 @@ package mpcsetup import ( + "bytes" "crypto/sha256" "errors" + "fmt" + "github.com/consensys/gnark/backend/groth16" + "github.com/consensys/gnark/backend/groth16/internal" + cs "github.com/consensys/gnark/constraint/bn254" "math/big" + "slices" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" "github.com/consensys/gnark/constraint" - cs "github.com/consensys/gnark/constraint/bn254" ) -type Phase2Evaluations struct { +// Phase2Evaluations components of the circuit keys +// not depending on Phase2 randomisations +type Phase2Evaluations struct { // TODO @Tabaie rename G1 struct { - A, B, VKK []curve.G1Affine + A []curve.G1Affine // A are the left coefficient polynomials for each witness element, evaluated at τ + B []curve.G1Affine // B are the right coefficient polynomials for each witness element, evaluated at τ + VKK []curve.G1Affine // VKK are the coefficients of the public witness and commitments + CKK [][]curve.G1Affine // CKK are the coefficients of the committed values } G2 struct { - B []curve.G2Affine + B []curve.G2Affine // B are the right coefficient polynomials for each witness element, evaluated at τ } } type Phase2 struct { Parameters struct { G1 struct { - Delta curve.G1Affine - L, Z []curve.G1Affine + Delta curve.G1Affine + Z []curve.G1Affine // Z[i] = xⁱt(x)/δ where t is the domain vanishing polynomial 0 ≤ i ≤ N-2 + PKK []curve.G1Affine // PKK are the coefficients of the private witness, needed for the proving key. They have a denominator of δ + SigmaCKK [][]curve.G1Affine // Commitment proof bases: SigmaCKK[i][j] = Cᵢⱼσᵢ where Cᵢⱼ is the commitment basis for the jᵗʰ committed element from the iᵗʰ commitment } G2 struct { Delta curve.G2Affine + Sigma []curve.G2Affine // the secret σ value for each commitment + } + } + + // Proofs of update correctness + Sigmas []valueUpdate + Delta valueUpdate + + // Challenge is the hash of the PREVIOUS contribution + Challenge []byte +} + +func (p *Phase2) Verify(next *Phase2) error { + challenge := p.hash() + if len(next.Challenge) != 0 && !bytes.Equal(next.Challenge, challenge) { + return errors.New("the challenge does not match the previous phase's hash") + } + next.Challenge = challenge + + if len(next.Parameters.G1.Z) != len(p.Parameters.G1.Z) || + len(next.Parameters.G1.PKK) != len(p.Parameters.G1.PKK) || + len(next.Parameters.G1.SigmaCKK) != len(p.Parameters.G1.SigmaCKK) || + len(next.Parameters.G2.Sigma) != len(p.Parameters.G2.Sigma) { + return errors.New("contribution size mismatch") + } + + r := linearCombCoeffs(len(next.Parameters.G1.Z) + len(next.Parameters.G1.PKK) + 1) // TODO @Tabaie If all contributions are being verified in one go, we could reuse r + + verifyContribution := func(update *valueUpdate, g1Denominator, g1Numerator []curve.G1Affine, g2Denominator, g2Numerator *curve.G2Affine, dst byte) error { + g1Num := linearCombination(g1Numerator, r) + g1Denom := linearCombination(g1Denominator, r) + + return update.verify(pair{g1Denom, g2Denominator}, pair{g1Num, g2Denominator}, challenge, dst) + } + + // verify proof of knowledge of contributions to the σᵢ + // and the correctness of updates to Parameters.G2.Sigma[i] and the Parameters.G1.SigmaCKK[i] + for i := range p.Sigmas { // match the first commitment basis elem against the contribution commitment + if !areInSubGroupG1(next.Parameters.G1.SigmaCKK[i]) { + return errors.New("commitment proving key subgroup check failed") + } + + if err := verifyContribution(&p.Sigmas[i], p.Parameters.G1.SigmaCKK[i], next.Parameters.G1.SigmaCKK[i], &p.Parameters.G2.Sigma[i], &next.Parameters.G2.Sigma[i], 2+byte(i)); err != nil { + return fmt.Errorf("failed to verify contribution to σ[%d]: %w", i, err) + } + } + + // verify proof of knowledge of contribution to δ + // and the correctness of updates to Parameters.Gi.Delta, PKK[i], and Z[i] + if !areInSubGroupG1(next.Parameters.G1.Z) || !areInSubGroupG1(next.Parameters.G1.PKK) { + return errors.New("derived values 𝔾₁ subgroup check failed") + } + + denom := cloneAppend([]curve.G1Affine{p.Parameters.G1.Delta}, next.Parameters.G1.Z, next.Parameters.G1.PKK) + num := cloneAppend([]curve.G1Affine{next.Parameters.G1.Delta}, p.Parameters.G1.Z, p.Parameters.G1.PKK) + if err := verifyContribution(&p.Delta, denom, num, &p.Parameters.G2.Delta, &next.Parameters.G2.Delta, 1); err != nil { + return fmt.Errorf("failed to verify contribution to δ: %w", err) + } + + return nil +} + +// update modifies delta +func (p *Phase2) update(delta *fr.Element, sigma []fr.Element) { + var I big.Int + + scale := func(point any) { + switch p := point.(type) { + case *curve.G1Affine: + p.ScalarMultiplication(p, &I) + case *curve.G2Affine: + p.ScalarMultiplication(p, &I) + default: + panic("unknown type") } } - PublicKey PublicKey - Hash []byte + + for i := range sigma { + sigma[i].BigInt(&I) + for j := range sigma { + scale(&p.Parameters.G1.SigmaCKK[i][j]) + } + point := &p.Parameters.G2.Sigma[i] + point.ScalarMultiplicationBase(&I) + } + + delta.BigInt(&I) + scale(&p.Parameters.G2.Delta) + scale(&p.Parameters.G1.Delta) + + delta.Inverse(delta) + delta.BigInt(&I) + for i := range p.Parameters.G1.Z { + scale(&p.Parameters.G1.Z[i]) + } + for i := range p.Parameters.G1.PKK { + scale(&p.Parameters.G1.PKK[i]) + } } -func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { - srs := srs1.Parameters - size := len(srs.G1.AlphaTau) +func (p *Phase2) Contribute() { + p.Challenge = p.hash() + + // sample value contributions and provide correctness proofs + var delta fr.Element + p.Delta, delta = updateValue(p.Parameters.G1.Delta, p.Challenge, 1) + + sigma := make([]fr.Element, len(p.Parameters.G1.SigmaCKK)) + if len(sigma) > 255 { + panic("too many commitments") // DST collision + } + for i := range sigma { + p.Sigmas[i], sigma[i] = updateValue(p.Parameters.G1.SigmaCKK[i][0], p.Challenge, byte(2+i)) + } + + p.update(&delta, sigma) +} + +// Initialize is to be run by the coordinator +// It involves no coin tosses. A verifier should +// simply rerun all the steps +func (p *Phase2) Initialize(r1cs *cs.R1CS, commons *SrsCommons) Phase2Evaluations { + + size := len(commons.G1.AlphaTau) if size < r1cs.GetNbConstraints() { panic("Number of constraints is larger than expected") } - c2 := Phase2{} - accumulateG1 := func(res *curve.G1Affine, t constraint.Term, value *curve.G1Affine) { cID := t.CoeffID() switch cID { @@ -89,26 +214,28 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { } // Prepare Lagrange coefficients of [τ...]₁, [τ...]₂, [ατ...]₁, [βτ...]₁ - coeffTau1 := lagrangeCoeffsG1(srs.G1.Tau, size) - coeffTau2 := lagrangeCoeffsG2(srs.G2.Tau, size) - coeffAlphaTau1 := lagrangeCoeffsG1(srs.G1.AlphaTau, size) - coeffBetaTau1 := lagrangeCoeffsG1(srs.G1.BetaTau, size) + coeffTau1 := lagrangeCoeffsG1(commons.G1.Tau, size) // [L_{ω⁰}(τ)]₁, [L_{ω¹}(τ)]₁, ... where ω is a primitive sizeᵗʰ root of unity + coeffTau2 := lagrangeCoeffsG2(commons.G2.Tau, size) // [L_{ω⁰}(τ)]₂, [L_{ω¹}(τ)]₂, ... + coeffAlphaTau1 := lagrangeCoeffsG1(commons.G1.AlphaTau, size) // [L_{ω⁰}(ατ)]₁, [L_{ω¹}(ατ)]₁, ... + coeffBetaTau1 := lagrangeCoeffsG1(commons.G1.BetaTau, size) // [L_{ω⁰}(βτ)]₁, [L_{ω¹}(βτ)]₁, ... - internal, secret, public := r1cs.GetNbVariables() - nWires := internal + secret + public + nbInternal, nbSecret, nbPublic := r1cs.GetNbVariables() + nWires := nbInternal + nbSecret + nbPublic var evals Phase2Evaluations - evals.G1.A = make([]curve.G1Affine, nWires) - evals.G1.B = make([]curve.G1Affine, nWires) - evals.G2.B = make([]curve.G2Affine, nWires) + evals.G1.A = make([]curve.G1Affine, nWires) // recall: A are the left coefficients in DIZK parlance + evals.G1.B = make([]curve.G1Affine, nWires) // recall: B are the right coefficients in DIZK parlance + evals.G2.B = make([]curve.G2Affine, nWires) // recall: A only appears in 𝔾₁ elements in the proof, but B needs to appear in a 𝔾₂ element so the verifier can compute something resembling (A.x).(B.x) via pairings bA := make([]curve.G1Affine, nWires) aB := make([]curve.G1Affine, nWires) C := make([]curve.G1Affine, nWires) - // TODO @gbotrel use constraint iterator when available. - i := 0 it := r1cs.GetR1CIterator() for c := it.Next(); c != nil; c = it.Next() { + // each constraint is sparse, i.e. involves a small portion of all variables. + // so we iterate over the variables involved and add the constraint's contribution + // to every variable's A, B, and C values + // A for _, t := range c.L { accumulateG1(&evals.G1.A[t.WireID()], t, &coeffTau1[i]) @@ -129,125 +256,100 @@ func InitPhase2(r1cs *cs.R1CS, srs1 *Phase1) (Phase2, Phase2Evaluations) { // Prepare default contribution _, _, g1, g2 := curve.Generators() - c2.Parameters.G1.Delta = g1 - c2.Parameters.G2.Delta = g2 + p.Parameters.G1.Delta = g1 + p.Parameters.G2.Delta = g2 // Build Z in PK as τⁱ(τⁿ - 1) = τ⁽ⁱ⁺ⁿ⁾ - τⁱ for i ∈ [0, n-2] // τⁱ(τⁿ - 1) = τ⁽ⁱ⁺ⁿ⁾ - τⁱ for i ∈ [0, n-2] - n := len(srs.G1.AlphaTau) - c2.Parameters.G1.Z = make([]curve.G1Affine, n) - for i := 0; i < n-1; i++ { - c2.Parameters.G1.Z[i].Sub(&srs.G1.Tau[i+n], &srs.G1.Tau[i]) - } - bitReverse(c2.Parameters.G1.Z) - c2.Parameters.G1.Z = c2.Parameters.G1.Z[:n-1] - - // Evaluate L - nPrivate := internal + secret - c2.Parameters.G1.L = make([]curve.G1Affine, nPrivate) - evals.G1.VKK = make([]curve.G1Affine, public) - offset := public - for i := 0; i < nWires; i++ { - var tmp curve.G1Affine - tmp.Add(&bA[i], &aB[i]) - tmp.Add(&tmp, &C[i]) - if i < public { - evals.G1.VKK[i].Set(&tmp) - } else { - c2.Parameters.G1.L[i-offset].Set(&tmp) - } + n := len(commons.G1.AlphaTau) + p.Parameters.G1.Z = make([]curve.G1Affine, n) + for i := 0; i < n-1; i++ { // TODO @Tabaie why is the last element always 0? + p.Parameters.G1.Z[i].Sub(&commons.G1.Tau[i+n], &commons.G1.Tau[i]) } - // Set δ public key - var delta fr.Element - delta.SetOne() - c2.PublicKey = newPublicKey(delta, nil, 1) - - // Hash initial contribution - c2.Hash = c2.hash() - return c2, evals -} - -func (c *Phase2) Contribute() { - // Sample toxic δ - var delta, deltaInv fr.Element - var deltaBI, deltaInvBI big.Int - delta.SetRandom() - deltaInv.Inverse(&delta) - - delta.BigInt(&deltaBI) - deltaInv.BigInt(&deltaInvBI) + bitReverse(p.Parameters.G1.Z) + p.Parameters.G1.Z = p.Parameters.G1.Z[:n-1] - // Set δ public key - c.PublicKey = newPublicKey(delta, c.Hash, 1) + commitments := r1cs.CommitmentInfo.(constraint.Groth16Commitments) - // Update δ - c.Parameters.G1.Delta.ScalarMultiplication(&c.Parameters.G1.Delta, &deltaBI) - c.Parameters.G2.Delta.ScalarMultiplication(&c.Parameters.G2.Delta, &deltaBI) + evals.G1.CKK = make([][]curve.G1Affine, len(commitments)) + p.Sigmas = make([]valueUpdate, len(commitments)) + p.Parameters.G1.SigmaCKK = make([][]curve.G1Affine, len(commitments)) + p.Parameters.G2.Sigma = make([]curve.G2Affine, len(commitments)) - // Update Z using δ⁻¹ - for i := 0; i < len(c.Parameters.G1.Z); i++ { - c.Parameters.G1.Z[i].ScalarMultiplication(&c.Parameters.G1.Z[i], &deltaInvBI) + for j := range commitments { + evals.G1.CKK[i] = make([]curve.G1Affine, 0, len(commitments[j].PrivateCommitted)) + p.Parameters.G2.Sigma[j] = g2 } - // Update L using δ⁻¹ - for i := 0; i < len(c.Parameters.G1.L); i++ { - c.Parameters.G1.L[i].ScalarMultiplication(&c.Parameters.G1.L[i], &deltaInvBI) - } + nbCommitted := internal.NbElements(commitments.GetPrivateCommitted()) - // 4. Hash contribution - c.Hash = c.hash() -} + // Evaluate PKK -func VerifyPhase2(c0, c1 *Phase2, c ...*Phase2) error { - contribs := append([]*Phase2{c0, c1}, c...) - for i := 0; i < len(contribs)-1; i++ { - if err := verifyPhase2(contribs[i], contribs[i+1]); err != nil { - return err + p.Parameters.G1.PKK = make([]curve.G1Affine, 0, nbInternal+nbSecret-nbCommitted-len(commitments)) + evals.G1.VKK = make([]curve.G1Affine, 0, nbPublic+len(commitments)) + committedIterator := internal.NewMergeIterator(commitments.GetPrivateCommitted()) + nbCommitmentsSeen := 0 + for j := 0; j < nWires; j++ { + // since as yet δ, γ = 1, the VKK and PKK are computed identically, as βA + αB + C + var tmp curve.G1Affine + tmp.Add(&bA[j], &aB[j]) + tmp.Add(&tmp, &C[j]) + commitmentIndex := committedIterator.IndexIfNext(j) + isCommitment := nbCommitmentsSeen < len(commitments) && commitments[nbCommitmentsSeen].CommitmentIndex == j + if commitmentIndex != -1 { + evals.G1.CKK[commitmentIndex] = append(evals.G1.CKK[commitmentIndex], tmp) + } else if j < nbPublic || isCommitment { + evals.G1.VKK = append(evals.G1.VKK, tmp) + } else { + p.Parameters.G1.PKK = append(p.Parameters.G1.PKK, tmp) + } + if isCommitment { + nbCommitmentsSeen++ } } - return nil -} -func verifyPhase2(current, contribution *Phase2) error { - // Compute R for δ - deltaR := genR(contribution.PublicKey.SG, contribution.PublicKey.SXG, current.Hash[:], 1) - - // Check for knowledge of δ - if !sameRatio(contribution.PublicKey.SG, contribution.PublicKey.SXG, contribution.PublicKey.XR, deltaR) { - return errors.New("couldn't verify knowledge of δ") + for j := range commitments { + p.Parameters.G1.SigmaCKK[j] = slices.Clone(evals.G1.CKK[j]) } - // Check for valid updates using previous parameters - if !sameRatio(contribution.Parameters.G1.Delta, current.Parameters.G1.Delta, deltaR, contribution.PublicKey.XR) { - return errors.New("couldn't verify that [δ]₁ is based on previous contribution") - } - if !sameRatio(contribution.PublicKey.SG, contribution.PublicKey.SXG, contribution.Parameters.G2.Delta, current.Parameters.G2.Delta) { - return errors.New("couldn't verify that [δ]₂ is based on previous contribution") - } + p.Challenge = nil - // Check for valid updates of L and Z using - L, prevL := merge(contribution.Parameters.G1.L, current.Parameters.G1.L) - if !sameRatio(L, prevL, contribution.Parameters.G2.Delta, current.Parameters.G2.Delta) { - return errors.New("couldn't verify valid updates of L using δ⁻¹") - } - Z, prevZ := merge(contribution.Parameters.G1.Z, current.Parameters.G1.Z) - if !sameRatio(Z, prevZ, contribution.Parameters.G2.Delta, current.Parameters.G2.Delta) { - return errors.New("couldn't verify valid updates of L using δ⁻¹") - } + return evals +} - // Check hash of the contribution - h := contribution.hash() - for i := 0; i < len(h); i++ { - if h[i] != contribution.Hash[i] { - return errors.New("couldn't verify hash of contribution") +// VerifyPhase2 for circuit described by r1cs +// using parameters from commons +// beaconChallenge is the output of the random beacon +// and c are the output from the contributors +// WARNING: the last contribution object will be modified +func VerifyPhase2(r1cs *cs.R1CS, commons *SrsCommons, beaconChallenge []byte, c ...*Phase2) (groth16.ProvingKey, groth16.VerifyingKey, error) { + prev := new(Phase2) + evals := prev.Initialize(r1cs, commons) + for i := range c { + if err := prev.Verify(c[i]); err != nil { + return nil, nil, err } + prev = c[i] } - return nil + pk, vk := prev.Seal(commons, &evals, beaconChallenge) + return &pk, &vk, nil } -func (c *Phase2) hash() []byte { +func (p *Phase2) hash() []byte { sha := sha256.New() - c.writeTo(sha) + p.WriteTo(sha) return sha.Sum(nil) } + +func cloneAppend(s ...[]curve.G1Affine) []curve.G1Affine { + l := 0 + for _, s := range s { + l += len(s) + } + res := make([]curve.G1Affine, 0, l) + for _, s := range s { + res = append(res, s...) + } + return res +} diff --git a/backend/groth16/bn254/mpcsetup/setup.go b/backend/groth16/bn254/mpcsetup/setup.go index 27fd034903..ec0002f578 100644 --- a/backend/groth16/bn254/mpcsetup/setup.go +++ b/backend/groth16/bn254/mpcsetup/setup.go @@ -8,23 +8,36 @@ package mpcsetup import ( curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr/fft" + "github.com/consensys/gnark-crypto/ecc/bn254/fr/pedersen" groth16 "github.com/consensys/gnark/backend/groth16/bn254" ) -func ExtractKeys(srs1 *Phase1, srs2 *Phase2, evals *Phase2Evaluations, nConstraints int) (pk groth16.ProvingKey, vk groth16.VerifyingKey) { +// Seal performs the final contribution and outputs the proving and verifying keys. +// No randomization is performed at this step. +// A verifier should simply re-run this and check +// that it produces the same values. +// The inner workings of the random beacon are out of scope. +// WARNING: Seal modifies p, just as Contribute does. +// The result will be an INVALID Phase1 object, since no proof of correctness is produced. +func (p *Phase2) Seal(commons *SrsCommons, evals *Phase2Evaluations, beaconChallenge []byte) (pk groth16.ProvingKey, vk groth16.VerifyingKey) { + + // final contributions + contributions := beaconContributions(p.hash(), beaconChallenge, 1+len(p.Sigmas)) + p.update(&contributions[0], contributions[1:]) + _, _, _, g2 := curve.Generators() // Initialize PK - pk.Domain = *fft.NewDomain(uint64(nConstraints)) - pk.G1.Alpha.Set(&srs1.Parameters.G1.AlphaTau[0]) - pk.G1.Beta.Set(&srs1.Parameters.G1.BetaTau[0]) - pk.G1.Delta.Set(&srs2.Parameters.G1.Delta) - pk.G1.Z = srs2.Parameters.G1.Z + pk.Domain = *fft.NewDomain(uint64(len(evals.G1.A))) + pk.G1.Alpha.Set(&commons.G1.AlphaTau[0]) + pk.G1.Beta.Set(&commons.G1.BetaTau[0]) + pk.G1.Delta.Set(&p.Parameters.G1.Delta) + pk.G1.Z = p.Parameters.G1.Z bitReverse(pk.G1.Z) - pk.G1.K = srs2.Parameters.G1.L - pk.G2.Beta.Set(&srs1.Parameters.G2.Beta) - pk.G2.Delta.Set(&srs2.Parameters.G2.Delta) + pk.G1.K = p.Parameters.G1.PKK + pk.G2.Beta.Set(&commons.G2.Beta) + pk.G2.Delta.Set(&p.Parameters.G2.Delta) // Filter out infinity points nWires := len(evals.G1.A) @@ -69,14 +82,24 @@ func ExtractKeys(srs1 *Phase1, srs2 *Phase2, evals *Phase2Evaluations, nConstrai pk.G2.B = B2[:j] // Initialize VK - vk.G1.Alpha.Set(&srs1.Parameters.G1.AlphaTau[0]) - vk.G1.Beta.Set(&srs1.Parameters.G1.BetaTau[0]) - vk.G1.Delta.Set(&srs2.Parameters.G1.Delta) - vk.G2.Beta.Set(&srs1.Parameters.G2.Beta) - vk.G2.Delta.Set(&srs2.Parameters.G2.Delta) + vk.G1.Alpha.Set(&commons.G1.AlphaTau[0]) + vk.G1.Beta.Set(&commons.G1.BetaTau[0]) + vk.G1.Delta.Set(&p.Parameters.G1.Delta) + vk.G2.Beta.Set(&commons.G2.Beta) + vk.G2.Delta.Set(&p.Parameters.G2.Delta) vk.G2.Gamma.Set(&g2) vk.G1.K = evals.G1.VKK + vk.CommitmentKeys = make([]pedersen.VerifyingKey, len(evals.G1.CKK)) + pk.CommitmentKeys = make([]pedersen.ProvingKey, len(evals.G1.CKK)) + for i := range vk.CommitmentKeys { + vk.CommitmentKeys[i].G = g2 + vk.CommitmentKeys[i].GSigmaNeg.Neg(&p.Parameters.G2.Sigma[i]) + + pk.CommitmentKeys[i].Basis = evals.G1.CKK[i] + pk.CommitmentKeys[i].BasisExpSigma = p.Parameters.G1.SigmaCKK[i] + } + // sets e, -[δ]2, -[γ]2 if err := vk.Precompute(); err != nil { panic(err) diff --git a/backend/groth16/bn254/mpcsetup/setup_test.go b/backend/groth16/bn254/mpcsetup/setup_test.go index 4621926b8c..7396acea71 100644 --- a/backend/groth16/bn254/mpcsetup/setup_test.go +++ b/backend/groth16/bn254/mpcsetup/setup_test.go @@ -6,9 +6,12 @@ package mpcsetup import ( + "bytes" + "github.com/consensys/gnark-crypto/ecc" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" cs "github.com/consensys/gnark/constraint/bn254" + "io" "testing" "github.com/consensys/gnark/backend/groth16" @@ -20,50 +23,85 @@ import ( native_mimc "github.com/consensys/gnark-crypto/ecc/bn254/fr/mimc" ) +// TestSetupCircuit a full integration test of the MPC setup func TestSetupCircuit(t *testing.T) { const ( - nContributionsPhase1 = 3 - nContributionsPhase2 = 3 - power = 9 + nbContributionsPhase1 = 3 + nbContributionsPhase2 = 3 ) assert := require.New(t) - srs1 := InitPhase1(power) + // Compile the circuit + var circuit Circuit + ccs, err := frontend.Compile(curve.ID.ScalarField(), r1cs.NewBuilder, &circuit) + assert.NoError(err) - // Make and verify contributions for phase1 - for i := 1; i < nContributionsPhase1; i++ { - // we clone test purposes; but in practice, participant will receive a []byte, deserialize it, - // add his contribution and send back to coordinator. - prev := srs1.clone() + domainSize := ecc.NextPowerOfTwo(uint64(ccs.GetNbConstraints())) + + var ( + bb bytes.Buffer // simulating network communications + serialized [max(nbContributionsPhase1, nbContributionsPhase2)][]byte + phase1 [nbContributionsPhase1]*Phase1 + p1 Phase1 + phase2 [nbContributionsPhase2]*Phase2 + p2 Phase2 + ) - srs1.Contribute() - assert.NoError(VerifyPhase1(&prev, &srs1)) + serialize := func(v io.WriterTo) []byte { + bb.Reset() + _, err = v.WriteTo(&bb) + assert.NoError(err) + return bb.Bytes() + } + deserialize := func(v io.ReaderFrom, b []byte) { + n, err := v.ReadFrom(bytes.NewReader(b)) + assert.NoError(err) + assert.Equal(len(b), int(n)) } - // Compile the circuit - var myCircuit Circuit - ccs, err := frontend.Compile(curve.ID.ScalarField(), r1cs.NewBuilder, &myCircuit) - assert.NoError(err) + // Make contributions for serialized + for i := range phase1 { + if i == 0 { // no "predecessor" to the first contribution + p1.Initialize(domainSize) + } + + p1.Contribute() + serialized[i] = serialize(&p1) + } + + // read all Phase1 objects + for i := range phase1 { + phase1[i] = new(Phase1) + deserialize(phase1[i], serialized[i]) + } + + // Verify contributions for phase 1 and generate non-circuit-specific parameters + srsCommons, err := VerifyPhase1(domainSize, []byte("testing phase1"), phase1[:]...) + { + var commonsRead SrsCommons + deserialize(&commonsRead, serialize(&srsCommons)) + srsCommons = commonsRead + } - var evals Phase2Evaluations r1cs := ccs.(*cs.R1CS) // Prepare for phase-2 - srs2, evals := InitPhase2(r1cs, &srs1) - - // Make and verify contributions for phase1 - for i := 1; i < nContributionsPhase2; i++ { - // we clone for test purposes; but in practice, participant will receive a []byte, deserialize it, - // add his contribution and send back to coordinator. - prev := srs2.clone() + for i := range phase2 { + if i == 0 { + p2.Initialize(r1cs, &srsCommons) + } + p2.Contribute() + serialized[i] = serialize(&p2) + } - srs2.Contribute() - assert.NoError(VerifyPhase2(&prev, &srs2)) + for i := range phase2 { + phase2[i] = new(Phase2) + deserialize(phase2[i], serialized[i]) } - // Extract the proving and verifying keys - pk, vk := ExtractKeys(&srs1, &srs2, &evals, ccs.GetNbConstraints()) + pk, vk, err := VerifyPhase2(r1cs, &srsCommons, []byte("testing phase2"), phase2[:]...) + assert.NoError(err) // Build the witness var preImage, hash fr.Element @@ -80,25 +118,28 @@ func TestSetupCircuit(t *testing.T) { assert.NoError(err) // groth16: ensure proof is verified - proof, err := groth16.Prove(ccs, &pk, witness) + proof, err := groth16.Prove(ccs, pk, witness) assert.NoError(err) - err = groth16.Verify(proof, &vk, pubWitness) + err = groth16.Verify(proof, vk, pubWitness) assert.NoError(err) } +/* func BenchmarkPhase1(b *testing.B) { const power = 14 b.Run("init", func(b *testing.B) { b.ResetTimer() + var srs1 Phase1 for i := 0; i < b.N; i++ { - _ = InitPhase1(power) + srs1.Initialize(1 << power) } }) b.Run("contrib", func(b *testing.B) { - srs1 := InitPhase1(power) + var srs1 Phase1 + srs1.Initialize(1 << power) b.ResetTimer() for i := 0; i < b.N; i++ { srs1.Contribute() @@ -109,7 +150,8 @@ func BenchmarkPhase1(b *testing.B) { func BenchmarkPhase2(b *testing.B) { const power = 14 - srs1 := InitPhase1(power) + var srs1 Phase1 + srs1.Initialize(1 << power) srs1.Contribute() var myCircuit Circuit @@ -136,7 +178,7 @@ func BenchmarkPhase2(b *testing.B) { }) } - +*/ // Circuit defines a pre-image knowledge proof // mimc(secret preImage) = public hash type Circuit struct { @@ -154,32 +196,8 @@ func (circuit *Circuit) Define(api frontend.API) error { mimc.Write(circuit.PreImage) api.AssertIsEqual(circuit.Hash, mimc.Sum()) - return nil -} - -func (phase1 *Phase1) clone() Phase1 { - r := Phase1{} - r.Parameters.G1.Tau = append(r.Parameters.G1.Tau, phase1.Parameters.G1.Tau...) - r.Parameters.G1.AlphaTau = append(r.Parameters.G1.AlphaTau, phase1.Parameters.G1.AlphaTau...) - r.Parameters.G1.BetaTau = append(r.Parameters.G1.BetaTau, phase1.Parameters.G1.BetaTau...) - - r.Parameters.G2.Tau = append(r.Parameters.G2.Tau, phase1.Parameters.G2.Tau...) - r.Parameters.G2.Beta = phase1.Parameters.G2.Beta - - r.PublicKeys = phase1.PublicKeys - r.Hash = append(r.Hash, phase1.Hash...) - - return r -} - -func (phase2 *Phase2) clone() Phase2 { - r := Phase2{} - r.Parameters.G1.Delta = phase2.Parameters.G1.Delta - r.Parameters.G1.L = append(r.Parameters.G1.L, phase2.Parameters.G1.L...) - r.Parameters.G1.Z = append(r.Parameters.G1.Z, phase2.Parameters.G1.Z...) - r.Parameters.G2.Delta = phase2.Parameters.G2.Delta - r.PublicKey = phase2.PublicKey - r.Hash = append(r.Hash, phase2.Hash...) + c, err := api.(frontend.Committer).Commit(circuit.PreImage, circuit.Hash) + api.AssertIsDifferent(c, 0) - return r + return err } diff --git a/backend/groth16/bn254/mpcsetup/utils.go b/backend/groth16/bn254/mpcsetup/utils.go index 66a7edaa4a..d99abed52a 100644 --- a/backend/groth16/bn254/mpcsetup/utils.go +++ b/backend/groth16/bn254/mpcsetup/utils.go @@ -7,45 +7,16 @@ package mpcsetup import ( "bytes" - "math/big" - "math/bits" - "runtime" - + "errors" "github.com/consensys/gnark-crypto/ecc" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" "github.com/consensys/gnark/internal/utils" + "math/big" + "math/bits" + "runtime" ) -type PublicKey struct { - SG curve.G1Affine - SXG curve.G1Affine - XR curve.G2Affine -} - -func newPublicKey(x fr.Element, challenge []byte, dst byte) PublicKey { - var pk PublicKey - _, _, g1, _ := curve.Generators() - - var s fr.Element - var sBi big.Int - s.SetRandom() - s.BigInt(&sBi) - pk.SG.ScalarMultiplication(&g1, &sBi) - - // compute x*sG1 - var xBi big.Int - x.BigInt(&xBi) - pk.SXG.ScalarMultiplication(&pk.SG, &xBi) - - // generate R based on sG1, sxG1, challenge, and domain separation tag (tau, alpha or beta) - R := genR(pk.SG, pk.SXG, challenge, dst) - - // compute x*spG2 - pk.XR.ScalarMultiplication(&R, &xBi) - return pk -} - func bitReverse[T any](a []T) { n := uint64(len(a)) nn := uint64(64 - bits.TrailingZeros64(n)) @@ -58,18 +29,33 @@ func bitReverse[T any](a []T) { } } -// Returns [1, a, a², ..., aⁿ⁻¹ ] in Montgomery form -func powers(a fr.Element, n int) []fr.Element { +func linearCombCoeffs(n int) []fr.Element { + return bivariateRandomMonomials(n) +} + +// Returns [1, a, a², ..., aᴺ⁻¹ ] +func powers(a *fr.Element, n int) []fr.Element { + result := make([]fr.Element, n) - result[0] = fr.NewElement(1) - for i := 1; i < n; i++ { - result[i].Mul(&result[i-1], &a) + if n >= 1 { + result[0].SetOne() + } + if n >= 2 { + result[1].Set(a) + } + for i := 2; i < n; i++ { + result[i].Mul(&result[i-1], a) } return result } -// Returns [aᵢAᵢ, ...] in G1 +// Returns [aᵢAᵢ, ...]∈𝔾₁ +// it assumes len(A) ≤ len(a) func scaleG1InPlace(A []curve.G1Affine, a []fr.Element) { + /*if a[0].IsOne() { + A = A[1:] + a = a[1:] + }*/ utils.Parallelize(len(A), func(start, end int) { var tmp big.Int for i := start; i < end; i++ { @@ -79,8 +65,13 @@ func scaleG1InPlace(A []curve.G1Affine, a []fr.Element) { }) } -// Returns [aᵢAᵢ, ...] in G2 +// Returns [aᵢAᵢ, ...]∈𝔾₂ +// it assumes len(A) ≤ len(a) func scaleG2InPlace(A []curve.G2Affine, a []fr.Element) { + /*if a[0].IsOne() { + A = A[1:] + a = a[1:] + }*/ utils.Parallelize(len(A), func(start, end int) { var tmp big.Int for i := start; i < end; i++ { @@ -90,66 +81,114 @@ func scaleG2InPlace(A []curve.G2Affine, a []fr.Element) { }) } -// Check e(a₁, a₂) = e(b₁, b₂) -func sameRatio(a1, b1 curve.G1Affine, a2, b2 curve.G2Affine) bool { - if !a1.IsInSubGroup() || !b1.IsInSubGroup() || !a2.IsInSubGroup() || !b2.IsInSubGroup() { - panic("invalid point not in subgroup") - } - var na2 curve.G2Affine - na2.Neg(&a2) +// Check n₁/d₁ = n₂/d₂ i.e. e(n₁, d₂) = e(d₁, n₂). No subgroup checks. +func sameRatioUnsafe(n1, d1 curve.G1Affine, n2, d2 curve.G2Affine) bool { + var nd1 curve.G1Affine + nd1.Neg(&d1) res, err := curve.PairingCheck( - []curve.G1Affine{a1, b1}, - []curve.G2Affine{na2, b2}) + []curve.G1Affine{n1, nd1}, + []curve.G2Affine{d2, n2}) if err != nil { panic(err) } return res } -// returns a = ∑ rᵢAᵢ, b = ∑ rᵢBᵢ -func merge(A, B []curve.G1Affine) (a, b curve.G1Affine) { +// returns ∑ rᵢAᵢ +func linearCombination(A []curve.G1Affine, r []fr.Element) curve.G1Affine { nc := runtime.NumCPU() - r := make([]fr.Element, len(A)) - for i := 0; i < len(A); i++ { - r[i].SetRandom() + var res curve.G1Affine + if _, err := res.MultiExp(A, r[:len(A)], ecc.MultiExpConfig{NbTasks: nc}); err != nil { + panic(err) } - a.MultiExp(A, r, ecc.MultiExpConfig{NbTasks: nc / 2}) - b.MultiExp(B, r, ecc.MultiExpConfig{NbTasks: nc / 2}) - return + return res } -// L1 = ∑ rᵢAᵢ, L2 = ∑ rᵢAᵢ₊₁ in G1 -func linearCombinationG1(A []curve.G1Affine) (L1, L2 curve.G1Affine) { - nc := runtime.NumCPU() - n := len(A) - r := make([]fr.Element, n-1) - for i := 0; i < n-1; i++ { - r[i].SetRandom() +// linearCombinationsG1 returns +// +// powers[0].A[0] + powers[1].A[1] + ... + powers[ends[0]-2].A[ends[0]-2] +// + powers[ends[0]].A[ends[0]] + ... + powers[ends[1]-2].A[ends[1]-2] +// .... (truncated) +// +// powers[0].A[1] + powers[1].A[2] + ... + powers[ends[0]-2].A[ends[0]-1] +// + powers[ends[0]].A[ends[0]+1] + ... + powers[ends[1]-2].A[ends[1]-1] +// .... (shifted) +// +// It is assumed without checking that powers[i+1] = powers[i]*powers[1] unless i+1 is a partial sum of sizes +// the slices powers and A will be modified +func linearCombinationsG1(A []curve.G1Affine, powers []fr.Element, ends []int) (truncated, shifted curve.G1Affine) { + if ends[len(ends)-1] != len(A) || len(A) != len(powers) { + panic("lengths mismatch") + } + + largeCoeffs := make([]fr.Element, len(ends)) + for i := range ends { + largeCoeffs[i].Neg(&powers[ends[i]-1]) + powers[ends[i]-1].SetZero() + } + + msmCfg := ecc.MultiExpConfig{NbTasks: runtime.NumCPU()} + + if _, err := shifted.MultiExp(A, powers, msmCfg); err != nil { + panic(err) + } + + // compute truncated as + // r.shifted + // + powers[0].A[0] + powers[ends[0].A[ends[0]] + ... + // - powers[ends[0]-1].A[ends[0]-1] - powers[ends[1]-1].A[ends[1]-1] - ... + r := powers[1] + prevEnd := 0 + for i := range ends { + if ends[i] <= prevEnd { + panic("non-increasing ends") + } + + powers[2*i] = powers[prevEnd] + powers[2*i+1] = largeCoeffs[i] + + A[2*i] = A[prevEnd] + A[2*i+1] = A[ends[i]-1] + + prevEnd = ends[i] + } + powers[len(ends)*2] = r + A[len(ends)*2] = shifted + + // TODO @Tabaie O(1) MSM worth it? + if _, err := truncated.MultiExp(A[:2*len(ends)+1], powers[:2*len(ends)+1], msmCfg); err != nil { + panic(err) } - L1.MultiExp(A[:n-1], r, ecc.MultiExpConfig{NbTasks: nc / 2}) - L2.MultiExp(A[1:], r, ecc.MultiExpConfig{NbTasks: nc / 2}) + return } -// L1 = ∑ rᵢAᵢ, L2 = ∑ rᵢAᵢ₊₁ in G2 -func linearCombinationG2(A []curve.G2Affine) (L1, L2 curve.G2Affine) { - nc := runtime.NumCPU() - n := len(A) - r := make([]fr.Element, n-1) - for i := 0; i < n-1; i++ { - r[i].SetRandom() +// linearCombinationsG2 assumes, and does not check, that rPowers[i+1] = rPowers[1].rPowers[i] for all applicable i +// Also assumed that 3 ≤ N ≔ len(A) ≤ len(rPowers) +// the results are truncated = ∑_{i=0}^{N-2} rⁱAᵢ, shifted = ∑_{i=1}^{N-1} rⁱAᵢ +func linearCombinationsG2(A []curve.G2Affine, rPowers []fr.Element) (truncated, shifted curve.G2Affine) { + // the common section, 1 to N-2 + var common curve.G2Affine + if _, err := common.MultiExp(A[1:len(A)-1], rPowers[:len(A)-2], ecc.MultiExpConfig{NbTasks: runtime.NumCPU()}); err != nil { // A[1] + r.A[2] + ... + rᴺ⁻³.A[N-2] + panic(err) } - L1.MultiExp(A[:n-1], r, ecc.MultiExpConfig{NbTasks: nc / 2}) - L2.MultiExp(A[1:], r, ecc.MultiExpConfig{NbTasks: nc / 2}) + var c big.Int + rPowers[1].BigInt(&c) + truncated.ScalarMultiplication(&common, &c).Add(&truncated, &A[0]) // A[0] + r.A[1] + r².A[2] + ... + rᴺ⁻².A[N-2] + + rPowers[len(A)-1].BigInt(&c) + shifted.ScalarMultiplication(&A[len(A)-1], &c).Add(&shifted, &common) + return } -// Generate R in G₂ as Hash(gˢ, gˢˣ, challenge, dst) -func genR(sG1, sxG1 curve.G1Affine, challenge []byte, dst byte) curve.G2Affine { +// Generate R∈𝔾₂ as Hash(gˢ, gˢˣ, challenge, dst) +// it is to be used as a challenge for generating a proof of knowledge to x +// π ≔ x.r; e([1]₁, π) =﹖ e([x]₁, r) +func genR(sG1 curve.G1Affine, challenge []byte, dst byte) curve.G2Affine { var buf bytes.Buffer buf.Grow(len(challenge) + curve.SizeOfG1AffineUncompressed*2) buf.Write(sG1.Marshal()) - buf.Write(sxG1.Marshal()) buf.Write(challenge) spG2, err := curve.HashToG2(buf.Bytes(), []byte{dst}) if err != nil { @@ -157,3 +196,192 @@ func genR(sG1, sxG1 curve.G1Affine, challenge []byte, dst byte) curve.G2Affine { } return spG2 } + +type pair struct { + g1 curve.G1Affine + g2 *curve.G2Affine // optional; some values expect to have a 𝔾₂ representation, some don't. +} + +// check that g1, g2 are valid as updated values, i.e. in their subgroups, and non-zero +func (p *pair) validUpdate() bool { + // if the contribution is 0 the product is doomed to be 0. + // no need to check this for g2 independently because if g1 is 0 and g2 is not, consistency checks will fail + return !p.g1.IsInfinity() && p.g1.IsInSubGroup() && (p.g2 == nil || p.g2.IsInSubGroup()) +} + +type valueUpdate struct { + contributionCommitment curve.G1Affine // x or [Xⱼ]₁ + contributionPok curve.G2Affine // π ≔ x.r ∈ 𝔾₂ +} + +// updateValue produces values associated with contribution to an existing value. +// if prevCommitment contains only a 𝔾₁ value, then so will updatedCommitment +// the second output is toxic waste. It is the caller's responsibility to safely "dispose" of it. +func updateValue(value curve.G1Affine, challenge []byte, dst byte) (proof valueUpdate, contributionValue fr.Element) { + if _, err := contributionValue.SetRandom(); err != nil { + panic(err) + } + var contributionValueI big.Int + contributionValue.BigInt(&contributionValueI) + + _, _, g1, _ := curve.Generators() + proof.contributionCommitment.ScalarMultiplication(&g1, &contributionValueI) + value.ScalarMultiplication(&value, &contributionValueI) + + // proof of knowledge to commitment. Algorithm 3 from section 3.7 + pokBase := genR(proof.contributionCommitment, challenge, dst) // r + proof.contributionPok.ScalarMultiplication(&pokBase, &contributionValueI) + + return +} + +// verify corresponds with verification steps {i, i+3} with 1 ≤ i ≤ 3 in section 7.1 of Bowe-Gabizon17 +// it checks the proof of knowledge of the contribution, and the fact that the product of the contribution +// and previous commitment makes the new commitment. +// prevCommitment is assumed to be valid. No subgroup check and the like. +func (x *valueUpdate) verify(denom, num pair, challenge []byte, dst byte) error { + noG2 := denom.g2 == nil + if noG2 != (num.g2 == nil) { + return errors.New("erasing or creating g2 values") + } + + if !x.contributionPok.IsInSubGroup() || !x.contributionCommitment.IsInSubGroup() || !num.validUpdate() { + return errors.New("contribution values subgroup check failed") + } + + // verify commitment proof of knowledge. CheckPOK, algorithm 4 from section 3.7 + r := genR(x.contributionCommitment, challenge, dst) // verification challenge in the form of a g2 base + _, _, g1, _ := curve.Generators() + if !sameRatioUnsafe(x.contributionCommitment, g1, x.contributionPok, r) { // π =? x.r i.e. x/g1 =? π/r + return errors.New("contribution proof of knowledge verification failed") + } + + // check that the num/denom ratio is consistent between the 𝔾₁ and 𝔾₂ representations. Based on CONSISTENT, algorithm 2 in Section 3.6. + if !noG2 && !sameRatioUnsafe(num.g1, denom.g1, *num.g2, *denom.g2) { + return errors.New("g2 update inconsistent") + } + + // now verify that num₁/denom₁ = x ( = x/g1 = π/r ) + // have to use the latter value for the RHS because we sameRatio needs both 𝔾₁ and 𝔾₂ values + if !sameRatioUnsafe(num.g1, denom.g1, x.contributionPok, r) { + return errors.New("g1 update inconsistent") + } + + return nil +} + +func toRefs[T any](s []T) []*T { + res := make([]*T, len(s)) + for i := range s { + res[i] = &s[i] + } + return res +} + +func areInSubGroup[T interface{ IsInSubGroup() bool }](s []T) bool { + for i := range s { + if !s[i].IsInSubGroup() { + return false + } + } + return true +} + +func areInSubGroupG1(s []curve.G1Affine) bool { + return areInSubGroup(toRefs(s)) +} + +func areInSubGroupG2(s []curve.G2Affine) bool { + return areInSubGroup(toRefs(s)) +} + +// bivariateRandomMonomials returns 1, x, ..., x^{ends[0]-1}; y, xy, ..., x^{ends[1]-ends[0]-1}y; ... +// all concatenated in the same slice +func bivariateRandomMonomials(ends ...int) []fr.Element { + if len(ends) == 0 { + return nil + } + + res := make([]fr.Element, ends[len(ends)-1]) + if _, err := res[1].SetRandom(); err != nil { + panic(err) + } + setPowers(res[:ends[0]]) + + if len(ends) == 1 { + return res + } + + y := make([]fr.Element, len(ends)) + if _, err := y[1].SetRandom(); err != nil { + panic(err) + } + setPowers(y) + + for d := 1; d < len(ends); d++ { + xdeg := ends[d] - ends[d-1] + if xdeg > ends[0] { + panic("impl detail: first maximum degree for x must be the greatest") + } + + for i := range xdeg { + res[ends[d-1]+i].Mul(&res[i], &y[d]) + } + } + + return res +} + +// sets x[i] = x[1]ⁱ +func setPowers(x []fr.Element) { + if len(x) == 0 { + return + } + x[0].SetOne() + for i := 2; i < len(x); i++ { + x[i].Mul(&x[i-1], &x[1]) + } +} + +func partialSums(s ...int) []int { + if len(s) == 0 { + return nil + } + sums := make([]int, len(s)) + sums[0] = s[0] + for i := 1; i < len(s); i++ { + sums[i] = sums[i-1] + s[i] + } + return sums +} + +func beaconContributions(hash, beaconChallenge []byte, n int) []fr.Element { + var ( + bb bytes.Buffer + err error + ) + bb.Grow(len(hash) + len(beaconChallenge)) + bb.Write(hash) + bb.Write(beaconChallenge) + + res := make([]fr.Element, 1) + + allNonZero := func() bool { + for i := range res { + if res[i].IsZero() { + return false + } + } + return true + } + + // cryptographically unlikely for this to be run more than once + for !allNonZero() { + if res, err = fr.Hash(bb.Bytes(), []byte("Groth16 SRS generation ceremony - Phase 1 Final Step"), n); err != nil { + panic(err) + } + bb.WriteByte('=') // padding just so that the hash is different next time + } + + return res +} diff --git a/backend/groth16/bn254/setup.go b/backend/groth16/bn254/setup.go index b4f4ba9eca..b2ce93a9ce 100644 --- a/backend/groth16/bn254/setup.go +++ b/backend/groth16/bn254/setup.go @@ -133,7 +133,7 @@ func Setup(r1cs *cs.R1CS, pk *ProvingKey, vk *VerifyingKey) error { vkK := make([]fr.Element, nbPublicWires) ckK := make([][]fr.Element, len(commitmentInfo)) for i := range commitmentInfo { - ckK[i] = make([]fr.Element, len(privateCommitted[i])) + ckK[i] = make([]fr.Element, 0, len(privateCommitted[i])) } var t0, t1 fr.Element @@ -145,37 +145,29 @@ func Setup(r1cs *cs.R1CS, pk *ProvingKey, vk *VerifyingKey) error { Add(&t1, &C[i]). Mul(&t1, coeff) } - vI := 0 // number of public wires seen so far - cI := make([]int, len(commitmentInfo)) // number of private committed wires seen so far for each commitment - nbPrivateCommittedSeen := 0 // = ∑ᵢ cI[i] + vI := 0 // number of public wires seen so far + committedIterator := internal.NewMergeIterator(privateCommitted) + nbPrivateCommittedSeen := 0 // = ∑ᵢ cI[i] nbCommitmentsSeen := 0 for i := range A { - commitment := -1 // index of the commitment that commits to this variable as a private or commitment value - var isCommitment, isPublic bool - if isPublic = i < r1cs.GetNbPublicVariables(); !isPublic { + commitmentIndex := committedIterator.IndexIfNext(i) // the index of the commitment that commits to the wire i. -1 if i is not committed + isCommitment, isPublic := false, i < r1cs.GetNbPublicVariables() + if !isPublic { if nbCommitmentsSeen < len(commitmentWires) && commitmentWires[nbCommitmentsSeen] == i { isCommitment = true nbCommitmentsSeen++ } - - for j := range commitmentInfo { // does commitment j commit to i? - if cI[j] < len(privateCommitted[j]) && privateCommitted[j][cI[j]] == i { - commitment = j - break // frontend guarantees that no private variable is committed to more than once - } - } } - if isPublic || commitment != -1 || isCommitment { + if isPublic || isCommitment || commitmentIndex != -1 { computeK(i, &toxicWaste.gammaInv) if isPublic || isCommitment { vkK[vI] = t1 vI++ } else { // committed and private - ckK[commitment][cI[commitment]] = t1 - cI[commitment]++ + ckK[commitmentIndex] = append(ckK[commitmentIndex], t1) nbPrivateCommittedSeen++ } } else { diff --git a/backend/groth16/internal/utils.go b/backend/groth16/internal/utils.go index 6062ef57ce..67648b2104 100644 --- a/backend/groth16/internal/utils.go +++ b/backend/groth16/internal/utils.go @@ -1,5 +1,10 @@ package internal +import ( + "math" + "slices" +) + func ConcatAll(slices ...[]int) []int { // copyright note: written by GitHub Copilot totalLen := 0 for _, s := range slices { @@ -20,3 +25,56 @@ func NbElements(slices [][]int) int { // copyright note: written by GitHub Copil } return totalLen } + +// NewMergeIterator assumes that all slices in s are sorted +func NewMergeIterator(s [][]int) *MergeIterator { + res := &MergeIterator{slices: slices.Clone(s)} + res.findLeast() + return res +} + +// MergeIterator iterates through a merging of multiple sorted slices +type MergeIterator struct { + slices [][]int + leastIndex int +} + +func (i *MergeIterator) findLeast() { + value := math.MaxInt + i.leastIndex = -1 + for j := range i.slices { + if len(i.slices[j]) == 0 { + continue + } + if v := i.slices[j][0]; v < value { + value = v + i.leastIndex = j + } + } + return +} + +// Peek returns the next smallest value and the index of the slice it came from +// If the iterator is empty, Peek returns (math.MaxInt, -1) +func (i *MergeIterator) Peek() (value, index int) { + return i.slices[i.leastIndex][0], i.leastIndex +} + +// Next returns the next smallest value and the index of the slice it came from, and advances the iterator +// If the iterator is empty, Next returns (math.MaxInt, -1) +func (i *MergeIterator) Next() (value, index int) { + value, index = i.Peek() + i.findLeast() + i.slices[i.leastIndex] = i.slices[i.leastIndex][1:] + return +} + +// IndexIfNext returns the index of the slice and advances the iterator if the next value is value, otherwise returns -1 +// If the iterator is empty, IndexIfNext returns -1 +func (i *MergeIterator) IndexIfNext(value int) int { + if v, index := i.Peek(); v == value { + i.Next() + return index + } + return -1 +}