diff --git a/Makefile b/Makefile index 1bf01c3aeb..93eb238e42 100644 --- a/Makefile +++ b/Makefile @@ -135,6 +135,14 @@ clean-fabric-peer-images: tokengen: @go install ./cmd/tokengen +.PHONY: traceinspector +traceinspector: + @go install ./token/services/benchmark/cmd/traceinspector + +.PHONY: memcheck +memcheck: + @go install ./token/services/benchmark/cmd/memcheck + .PHONY: idemixgen txgen: @go install github.com/IBM/idemix/tools/idemixgen diff --git a/cmd/tokengen/cobra/pp/common/common_test.go b/cmd/tokengen/cobra/pp/common/common_test.go index 2560a8ca9a..460105953d 100644 --- a/cmd/tokengen/cobra/pp/common/common_test.go +++ b/cmd/tokengen/cobra/pp/common/common_test.go @@ -7,6 +7,7 @@ SPDX-License-Identifier: Apache-2.0 package common import ( + "bytes" "os" "path/filepath" "testing" @@ -45,11 +46,11 @@ func TestLoadExtras(t *testing.T) { t.Errorf("expected 2 entries, got %d", len(result)) } - if string(result["foo"]) != string(file1Content) { + if !bytes.Equal(result["foo"], file1Content) { t.Errorf("expected %q for foo, got %q", string(file1Content), string(result["foo"])) } - if string(result["bar"]) != string(file2Content) { + if !bytes.Equal(result["bar"], file2Content) { t.Errorf("expected %q for bar, got %q", string(file2Content), string(result["bar"])) } }) @@ -144,7 +145,7 @@ func TestLoadExtras(t *testing.T) { t.Fatalf("expected no error, got: %v", err) } - if string(result["mykey"]) != string(fileContent) { + if !bytes.Equal(result["mykey"], fileContent) { t.Errorf("expected %q, got %q", string(fileContent), string(result["mykey"])) } }) diff --git a/docs/benchmark/benchmark.md b/docs/benchmark/benchmark.md index 661ba1fb79..1847728d04 100644 --- a/docs/benchmark/benchmark.md +++ b/docs/benchmark/benchmark.md @@ -1,4 +1,13 @@ -# Benchmark +# Benchmarks + +## Tools - [Go Tools for Benchmarks](./tools.md) -- [ZKAT DLog No Graph-Hiding](dlognogh/dlognogh.md) \ No newline at end of file +- Custom Analysis Tools: + - [`memcheck`](./../../token/services/benchmark/cmd/memcheck/README.md): Go Pprof Memory Analyzer + - [`traceinspector`](./../../token/services/benchmark/cmd/memcheck/README.md): Go Pprof Trace Analyzer + +## Benchmark + +- [ZKAT DLog No Graph-Hiding Benchmarks](core/dlognogh/dlognogh.md) +- [Identity Service - Idemix](services/identity/idemix.md) \ No newline at end of file diff --git a/docs/benchmark/dlognogh/dlognogh.md b/docs/benchmark/core/dlognogh/dlognogh.md similarity index 56% rename from docs/benchmark/dlognogh/dlognogh.md rename to docs/benchmark/core/dlognogh/dlognogh.md index ee76b6cca4..88f39320d0 100644 --- a/docs/benchmark/dlognogh/dlognogh.md +++ b/docs/benchmark/core/dlognogh/dlognogh.md @@ -1,4 +1,4 @@ -# ZKAT DLog No Graph Hiding Benchmark +# ZKAT DLog No Graph Hiding Benchmarks Packages with benchmark tests: @@ -6,7 +6,8 @@ Packages with benchmark tests: - `BenchmarkSender`, `BenchmarkVerificationSenderProof`, `TestParallelBenchmarkSender`, and `TestParallelBenchmarkVerificationSenderProof` are used to benchmark the generation of a transfer action. This includes also the generation of ZK proof for a transfer operation. - `BenchmarkTransferProofGeneration`, `TestParallelBenchmarkTransferProofGeneration` are used to benchmark the generation of ZK proof alone. - `token/core/zkatdlog/nogh/v1/issue`: `BenchmarkIssuer` and `BenchmarkProofVerificationIssuer` -- `token/core/zkatdlog/nogh/v1`: `BenchmarkTransfer` +- `token/core/zkatdlog/nogh/v1/validator`: `TestParallelBenchmarkValidatorTransfer`. +- `token/core/zkatdlog/nogh/v1`: `BenchmarkTransferServiceTransfer` and `TestParallelBenchmarkTransferServiceTransfer`. The steps necessary to run the benchmarks are very similar. We give two examples here: @@ -109,17 +110,17 @@ You can then aggregate/parse the output (e.g., benchstat) to compute averages ac ### Results -Example results have been produced on an Apple M1 Max and can be consulted [here](./transfer_BenchmarkSender_results.md). +Example results have been produced on an Apple M1 Max and can be consulted [here](transfer_BenchmarkSender_results.md). ## Benchmark: `token/core/zkatdlog/nogh/v1/transfer#TestParallelBenchmarkSender` This is a test that runs multiple instances of the above benchmark in parallel. This allows the analyst to understand if shared data structures are actual bottlenecks. -It uses a custom-made runner whose documentation can be found [here](../../../token/core/common/benchmark/runner.md). +It uses a custom-made runner whose documentation can be found [here](../../../../token/services/benchmark/runner.md). ```shell -go test ./token/core/zkatdlog/nogh/v1/transfer -test.run=TestParallelBenchmarkSender -test.v -test.benchmem -test.timeout 0 -bits="32" -curves="BN254" -num_inputs="2" -num_outputs="2" -workers="1,10" -duration="10s" | tee bench.txt +go test ./token/core/zkatdlog/nogh/v1/transfer -test.run=TestParallelBenchmarkSender -test.v -test.timeout 0 -bits="32" -curves="BN254" -num_inputs="2" -num_outputs="2" -workers="NumCPU" -duration="10s" -setup_samples=128 | tee bench.txt ``` The test supports the following flags: @@ -136,120 +137,82 @@ The test supports the following flags: a comma-separate list of number of outputs (1,2,3,...) -workers string a comma-separate list of workers (1,2,3,...,NumCPU), where NumCPU is converted to the number of available CPUs + -profile bool + write pprof profiles to file + -setup_samples uint + number of setup samples, 0 disables it ``` ### Results -```go +```shell === RUN TestParallelBenchmarkSender -=== RUN TestParallelBenchmarkSender/Setup(bits_32,_curve_BN254,_#i_2,_#o_2)_with_1_workers -Metric Value Description ------- ----- ----------- -Workers 1 -Total Ops 168 (Low Sample Size) -Duration 10.023390959s (Good Duration) -Real Throughput 16.76/s Observed Ops/sec (Wall Clock) -Pure Throughput 17.77/s Theoretical Max (Low Overhead) - -Latency Distribution: - Min 55.180375ms - P50 (Median) 55.945812ms - Average 56.290356ms - P95 58.108814ms - P99 58.758087ms - Max 59.089958ms (Stable Tail) - -Stability Metrics: - Std Dev 898.087µs - IQR 1.383083ms Interquartile Range - Jitter 590.076µs Avg delta per worker - CV 1.60% Excellent Stability (<5%) - -Memory 1301420 B/op Allocated bytes per operation -Allocs 18817 allocs/op Allocations per operation - -Latency Heatmap (Dynamic Range): -Range Freq Distribution Graph - 55.180375ms-55.369563ms 17 █████████████████████████ (10.1%) - 55.369563ms-55.5594ms 18 ██████████████████████████ (10.7%) - 55.5594ms-55.749887ms 27 ████████████████████████████████████████ (16.1%) - 55.749887ms-55.941028ms 20 █████████████████████████████ (11.9%) - 55.941028ms-56.132824ms 13 ███████████████████ (7.7%) - 56.132824ms-56.325277ms 9 █████████████ (5.4%) - 56.325277ms-56.51839ms 4 █████ (2.4%) - 56.51839ms-56.712165ms 6 ████████ (3.6%) - 56.712165ms-56.906605ms 9 █████████████ (5.4%) - 56.906605ms-57.101711ms 13 ███████████████████ (7.7%) - 57.101711ms-57.297486ms 10 ██████████████ (6.0%) - 57.297486ms-57.493933ms 3 ████ (1.8%) - 57.493933ms-57.691053ms 3 ████ (1.8%) - 57.691053ms-57.888849ms 4 █████ (2.4%) - 57.888849ms-58.087323ms 3 ████ (1.8%) - 58.087323ms-58.286478ms 2 ██ (1.2%) - 58.286478ms-58.486315ms 2 ██ (1.2%) - 58.486315ms-58.686837ms 2 ██ (1.2%) - 58.686837ms-58.888047ms 2 ██ (1.2%) - 58.888047ms-59.089958ms 1 █ (0.6%) - ---- Analysis & Recommendations --- -[WARN] Low sample size (168). Results may not be statistically significant. Run for longer. -[INFO] High Allocations (18817/op). This will trigger frequent GC cycles and increase Max Latency. ----------------------------------- === RUN TestParallelBenchmarkSender/Setup(bits_32,_curve_BN254,_#i_2,_#o_2)_with_10_workers -Metric Value Description ------- ----- ----------- -Workers 10 -Total Ops 1232 (Low Sample Size) -Duration 10.070877291s (Good Duration) -Real Throughput 122.33/s Observed Ops/sec (Wall Clock) -Pure Throughput 130.12/s Theoretical Max (Low Overhead) +Metric Value Description +------ ----- ----------- +Workers 10 +Total Ops 1230 (Low Sample Size) +Duration 10.068s (Good Duration) +Real Throughput 122.17/s Observed Ops/sec (Wall Clock) +Pure Throughput 123.04/s Theoretical Max (Low Overhead) Latency Distribution: - Min 61.2545ms - P50 (Median) 75.461375ms - Average 76.852256ms - P95 93.50851ms - P99 106.198982ms - Max 144.872375ms (Stable Tail) + Min 59.895916ms + P50 (Median) 77.717333ms + Average 81.27214ms + P95 112.28194ms + P99 137.126207ms + P99.9 189.117473ms + Max 215.981417ms (Stable Tail) Stability Metrics: - Std Dev 9.28799ms - IQR 10.909229ms Interquartile Range - Jitter 9.755984ms Avg delta per worker - CV 12.09% Moderate Variance (10-20%) - -Memory 1282384 B/op Allocated bytes per operation -Allocs 18668 allocs/op Allocations per operation + Std Dev 16.96192ms + IQR 19.050834ms Interquartile Range + Jitter 15.937043ms Avg delta per worker + CV 20.87% Unstable (>20%) - Result is Noisy + +System Health & Reliability: + Error Rate 0.0000% (100% Success) (0 errors) + Memory 1159374 B/op Allocated bytes per operation + Allocs 17213 allocs/op Allocations per operation + Alloc Rate 133.20 MB/s Memory pressure on system + GC Overhead 1.27% (High GC Pressure) + GC Pause 127.435871ms Total Stop-The-World time + GC Cycles 264 Full garbage collection cycles Latency Heatmap (Dynamic Range): Range Freq Distribution Graph - 61.2545ms-63.948502ms 36 ███████ (2.9%) - 63.948502ms-66.760987ms 86 █████████████████ (7.0%) - 66.760987ms-69.697167ms 152 ███████████████████████████████ (12.3%) - 69.697167ms-72.762481ms 181 █████████████████████████████████████ (14.7%) - 72.762481ms-75.962609ms 195 ████████████████████████████████████████ (15.8%) - 75.962609ms-79.303481ms 179 ████████████████████████████████████ (14.5%) - 79.303481ms-82.791286ms 152 ███████████████████████████████ (12.3%) - 82.791286ms-86.432486ms 94 ███████████████████ (7.6%) - 86.432486ms-90.233828ms 59 ████████████ (4.8%) - 90.233828ms-94.202355ms 40 ████████ (3.2%) - 94.202355ms-98.345419ms 29 █████ (2.4%) - 98.345419ms-102.670697ms 9 █ (0.7%) - 102.670697ms-107.186203ms 8 █ (0.6%) - 107.186203ms-111.900303ms 4 (0.3%) - 111.900303ms-116.821732ms 2 (0.2%) - 116.821732ms-121.959608ms 3 (0.2%) - 121.959608ms-127.32345ms 1 (0.1%) - 127.32345ms-132.923196ms 1 (0.1%) - 138.769222ms-144.872375ms 1 (0.1%) + 59.895916ms-63.862831ms 98 ██████████████████████ (8.0%) + 63.862831ms-68.092476ms 163 ████████████████████████████████████ (13.3%) + 68.092476ms-72.602251ms 170 ██████████████████████████████████████ (13.8%) + 72.602251ms-77.410709ms 172 ██████████████████████████████████████ (14.0%) + 77.410709ms-82.537631ms 177 ████████████████████████████████████████ (14.4%) + 82.537631ms-88.004111ms 128 ████████████████████████████ (10.4%) + 88.004111ms-93.832637ms 119 ██████████████████████████ (9.7%) + 93.832637ms-100.047186ms 73 ████████████████ (5.9%) + 100.047186ms-106.673326ms 40 █████████ (3.3%) + 106.673326ms-113.738317ms 32 ███████ (2.6%) + 113.738317ms-121.271222ms 20 ████ (1.6%) + 121.271222ms-129.303034ms 14 ███ (1.1%) + 129.303034ms-137.866793ms 12 ██ (1.0%) + 137.866793ms-146.997731ms 3 (0.2%) + 146.997731ms-156.733413ms 4 (0.3%) + 167.11389ms-178.181868ms 2 (0.2%) + 178.181868ms-189.98288ms 1 (0.1%) + 189.98288ms-202.565475ms 1 (0.1%) + 202.565475ms-215.981417ms 1 (0.1%) --- Analysis & Recommendations --- -[WARN] Low sample size (1232). Results may not be statistically significant. Run for longer. -[INFO] High Allocations (18668/op). This will trigger frequent GC cycles and increase Max Latency. +[WARN] Low sample size (1230). Results may not be statistically significant. Run for longer. +[FAIL] High Variance (CV 20.87%). System noise is affecting results. Isolate the machine or increase duration. +[INFO] High Allocations (17213/op). This will trigger frequent GC cycles and increase Max Latency. ---------------------------------- ---- PASS: TestParallelBenchmarkSender (20.83s) - --- PASS: TestParallelBenchmarkSender/Setup(bits_32,_curve_BN254,_#i_2,_#o_2)_with_1_workers (10.39s) - --- PASS: TestParallelBenchmarkSender/Setup(bits_32,_curve_BN254,_#i_2,_#o_2)_with_10_workers (10.44s) + +--- Throughput Timeline --- +Timeline: [▇▇▇█▇▇▇▇▆▇] (Max: 131 ops/s) + +--- PASS: TestParallelBenchmarkSender (13.97s) + --- PASS: TestParallelBenchmarkSender/Setup(bits_32,_curve_BN254,_#i_2,_#o_2)_with_10_workers (13.96s) PASS -ok github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/transfer 21.409s +ok github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/transfer 14.566s ``` \ No newline at end of file diff --git a/docs/benchmark/dlognogh/transfer_BenchmarkSender_results.md b/docs/benchmark/core/dlognogh/transfer_BenchmarkSender_results.md similarity index 99% rename from docs/benchmark/dlognogh/transfer_BenchmarkSender_results.md rename to docs/benchmark/core/dlognogh/transfer_BenchmarkSender_results.md index 676cf1d07c..13232c799c 100644 --- a/docs/benchmark/dlognogh/transfer_BenchmarkSender_results.md +++ b/docs/benchmark/core/dlognogh/transfer_BenchmarkSender_results.md @@ -1,6 +1,6 @@ ## Benchmark Results: `token/core/zkatdlog/nogh/v1/transfer#BenchmarkSender` -The output of `go test` can be found [here](./transfer_results.txt). +The output of `go test` can be found [here](transfer_results.txt). Here is the summary produced by `benchstat`. diff --git a/docs/benchmark/dlognogh/transfer_results.txt b/docs/benchmark/core/dlognogh/transfer_results.txt similarity index 100% rename from docs/benchmark/dlognogh/transfer_results.txt rename to docs/benchmark/core/dlognogh/transfer_results.txt diff --git a/docs/benchmark/services/identity/idemix.md b/docs/benchmark/services/identity/idemix.md new file mode 100644 index 0000000000..01eb12f998 --- /dev/null +++ b/docs/benchmark/services/identity/idemix.md @@ -0,0 +1,81 @@ +# Identity Service - Idemix Benchmarks + +Packages with benchmark tests: + +- `token/services/identity/idemix`: + - `TestParallelBenchmarkIdemixKMIdentity`: Generation of a pseudonym. + - `TestParallelBenchmarkIdemixSign`: Generation of a signature given a pseudonym. + - `TestParallelBenchmarkIdemixVerify`: Verification of a signature. + - `TestParallelBenchmarkIdemixDeserializeSigner`: Deserialization of a Signer given a pseudonym. + +Here is an execution example: + +```shell +➜ fabric-token-sdk git:(1284-dlog-validator-service-benchmark) ✗ go test ./token/services/identity/idemix -test.run=TestParallelBenchmarkIdemixDeserializeSigner -test.v -test.timeout 0 -workers="NumCPU" -duration="10s" -setup_samples=128 +=== RUN TestParallelBenchmarkIdemixDeserializeSigner +Metric Value Description +------ ----- ----------- +Workers 10 +Total Ops 18494 (Robust Sample) +Duration 10.026s (Good Duration) +Real Throughput 1844.65/s Observed Ops/sec (Wall Clock) +Pure Throughput 1845.74/s Theoretical Max (Low Overhead) + +Latency Distribution: + Min 4.326583ms + P50 (Median) 4.409667ms + Average 5.417878ms + P95 11.517116ms + P99 16.813871ms + P99.9 26.423944ms + Max 98.053292ms (Stable Tail) + +Stability Metrics: + Std Dev 2.798676ms + IQR 259.906µs Interquartile Range + Jitter 1.502269ms Avg delta per worker + CV 51.66% Unstable (>20%) - Result is Noisy + +System Health & Reliability: + Error Rate 0.0000% (100% Success) (0 errors) + Memory 60665 B/op Allocated bytes per operation + Allocs 694 allocs/op Allocations per operation + Alloc Rate 103.69 MB/s Memory pressure on system + GC Overhead 0.40% (Healthy) + GC Pause 39.798795ms Total Stop-The-World time + GC Cycles 92 Full garbage collection cycles + +Latency Heatmap (Dynamic Range): +Range Freq Distribution Graph + 4.326583ms-5.057208ms 14749 ████████████████████████████████████████ (79.8%) + 5.057208ms-5.911214ms 889 ██ (4.8%) + 5.911214ms-6.909436ms 535 █ (2.9%) + 6.909436ms-8.076226ms 444 █ (2.4%) + 8.076226ms-9.44005ms 434 █ (2.3%) + 9.44005ms-11.034182ms 435 █ (2.4%) + 11.034182ms-12.897514ms 302 (1.6%) + 12.897514ms-15.075505ms 373 █ (2.0%) + 15.075505ms-17.621292ms 196 (1.1%) + 17.621292ms-20.596982ms 78 (0.4%) + 20.596982ms-24.075175ms 32 (0.2%) + 24.075175ms-28.140727ms 15 (0.1%) + 28.140727ms-32.892825ms 5 (0.0%) + 32.892825ms-38.447405ms 2 (0.0%) + 38.447405ms-44.939982ms 1 (0.0%) + 44.939982ms-52.528953ms 2 (0.0%) + 52.528953ms-61.399467ms 1 (0.0%) + 83.88732ms-98.053292ms 1 (0.0%) + +--- Analysis & Recommendations --- +[FAIL] High Variance (CV 51.66%). System noise is affecting results. Isolate the machine or increase duration. +[INFO] High Allocations (694/op). This will trigger frequent GC cycles and increase Max Latency. +---------------------------------- + +--- Throughput Timeline --- +Timeline: [▇▇▇▇▇▇▇█▇▇] (Max: 1906 ops/s) + +--- PASS: TestParallelBenchmarkIdemixDeserializeSigner (13.82s) +PASS +ok github.com/hyperledger-labs/fabric-token-sdk/token/services/identity/idemix 14.365s + +``` \ No newline at end of file diff --git a/go.mod b/go.mod index 1ff7c44db8..ed65bd19ff 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( go.uber.org/dig v1.18.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.45.0 - golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b + golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 golang.org/x/sync v0.18.0 google.golang.org/protobuf v1.36.10 gopkg.in/yaml.v2 v2.4.0 @@ -282,14 +282,14 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/arch v0.11.0 // indirect - golang.org/x/mod v0.29.0 // indirect + golang.org/x/mod v0.30.0 // indirect golang.org/x/net v0.47.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sys v0.38.0 // indirect - golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 // indirect + golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.8.0 // indirect - golang.org/x/tools v0.38.0 // indirect + golang.org/x/tools v0.39.0 // indirect gonum.org/v1/gonum v0.16.0 // indirect google.golang.org/api v0.215.0 // indirect google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect diff --git a/go.sum b/go.sum index 40f4bf9e8f..a76158eb7e 100644 --- a/go.sum +++ b/go.sum @@ -1702,8 +1702,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 h1:DHNhtq3sNNzrvduZZIiFyXWOL9IWaDPHqTnLJp+rCBY= +golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1750,8 +1750,8 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1975,8 +1975,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU= -golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2095,8 +2095,8 @@ golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/token/core/common/benchmark/runner.go b/token/core/common/benchmark/runner.go deleted file mode 100644 index 71f6eae930..0000000000 --- a/token/core/common/benchmark/runner.go +++ /dev/null @@ -1,781 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package benchmark - -import ( - "fmt" - "math" - "os" - "runtime" - "sort" - "sync" - "sync/atomic" - "text/tabwriter" - "time" -) - -// Result holds the comprehensive benchmark metrics. -type Result struct { - GoRoutines int - OpsTotal uint64 - Duration time.Duration - OpsPerSecReal float64 - OpsPerSecPure float64 - - AvgLatency time.Duration - StdDevLatency time.Duration - Variance float64 - - P50Latency time.Duration - P75Latency time.Duration - P95Latency time.Duration - P99Latency time.Duration - MinLatency time.Duration - MaxLatency time.Duration - - IQR time.Duration // Interquartile Range (measure of spread) - Jitter time.Duration // Avg change between consecutive latencies - - CoeffVar float64 - BytesPerOp uint64 - AllocsPerOp uint64 - - Histogram []Bucket -} - -// Bucket represents a latency range and its frequency. -type Bucket struct { - LowBound time.Duration - HighBound time.Duration - Count int -} - -// chunk holds a fixed-size batch of latencies to prevent slice resizing costs. -// -// SANITY CHECK: We keep a simple fixed array + linked-list so that the -// benchmark's hot path does not allocate on every operation when recording -// latency. -const chunkSize = 10000 - -type chunk struct { - data [chunkSize]time.Duration - next *chunk - idx int // number of valid entries in data -} - -// ANSI Color Codes for output. -const ( - ColorReset = "\033[0m" - ColorRed = "\033[31m" - ColorGreen = "\033[32m" - ColorYellow = "\033[33m" - ColorBlue = "\033[34m" -) - -func (r Result) Print() { - w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - - cvPct, tailRatio := r.printMainMetrics(w) - - r.printHeatmap(w) - - r.printAnalysis(w, cvPct, tailRatio) - - if err := w.Flush(); err != nil { - _, _ = fmt.Fprintln(os.Stderr, "benchmark: flush error:", err) - } -} - -// printMainMetrics prints the main metrics, latency distribution and stability -// related lines to the provided tabwriter and returns the coefficient of -// variation percent and tailRatio which are later used by the analysis -// section. -func (r Result) printMainMetrics(w *tabwriter.Writer) (cvPct float64, tailRatio float64) { - // Helper for coloring status. - status := func(condition bool, goodMsg, badMsg string) string { - if condition { - return ColorGreen + goodMsg + ColorReset - } - return ColorRed + badMsg + ColorReset - } - - // --- Section 1: Main Metrics --- - writeLine(w, "Metric\tValue\tDescription") - writeLine(w, "------\t-----\t-----------") - writef(w, "Workers\t%d\t\n", r.GoRoutines) - writef( - w, - "Total Ops\t%d\t%s\n", - r.OpsTotal, - status(r.OpsTotal > 10000, "(Robust Sample)", "(Low Sample Size)"), - ) - writef( - w, - "Duration\t%v\t%s\n", - r.Duration, - status(r.Duration > 1*time.Second, "(Good Duration)", "(Too Short < 1s)"), - ) - - writef(w, "Real Throughput\t%.2f/s\tObserved Ops/sec (Wall Clock)\n", r.OpsPerSecReal) - - // Overhead Check. - overheadPct := 0.0 - if r.OpsPerSecPure > 0 && r.OpsPerSecReal > 0 { - // SANITY CHECK: Clamp to [0, 100+] range to avoid NaN/Inf due to - // floating errors in weird edge cases. - overheadPct = (1.0 - (r.OpsPerSecReal / r.OpsPerSecPure)) * 100 - } - - overheadStatus := "(Low Overhead)" - if overheadPct > 15.0 { - overheadStatus = ColorYellow + fmt.Sprintf("(High Setup Cost: %.1f%%)", overheadPct) + ColorReset - } - - writef(w, "Pure Throughput\t%.2f/s\tTheoretical Max %s\n", r.OpsPerSecPure, overheadStatus) - writeLine(w, "") - - writeLine(w, "Latency Distribution:") - writef(w, " Min\t%v\t\n", r.MinLatency) - writef(w, " P50 (Median)\t%v\t\n", r.P50Latency) - writef(w, " Average\t%v\t\n", r.AvgLatency) - writef(w, " P95\t%v\t\n", r.P95Latency) - writef(w, " P99\t%v\t\n", r.P99Latency) - - // Tail Latency Check. - tailRatio = 0.0 - if r.P99Latency > 0 { - tailRatio = float64(r.MaxLatency) / float64(r.P99Latency) - } - - maxStatus := ColorGreen + "(Stable Tail)" + ColorReset - if tailRatio > 10.0 { - maxStatus = ColorRed + fmt.Sprintf("(Extreme Outliers: Max is %.1fx P99)", tailRatio) + ColorReset - } - writef(w, " Max\t%v\t%s\n", r.MaxLatency, maxStatus) - writeLine(w, "") - - writeLine(w, "Stability Metrics:") - writef(w, " Std Dev\t%v\t\n", r.StdDevLatency) - writef(w, " IQR\t%v\tInterquartile Range\n", r.IQR) - writef(w, " Jitter\t%v\tAvg delta per worker\n", r.Jitter) - - // CV Check. - cvPct = r.CoeffVar * 100 - cvStatus := ColorGreen + "Excellent Stability (<5%)" + ColorReset - if cvPct > 20.0 { - cvStatus = ColorRed + "Unstable (>20%) - Result is Noisy" + ColorReset - } else if cvPct > 10.0 { - cvStatus = ColorYellow + "Moderate Variance (10-20%)" + ColorReset - } - writef(w, " CV\t%.2f%%\t%s\n", cvPct, cvStatus) - writeLine(w, "") - - writef(w, "Memory\t%d B/op\tAllocated bytes per operation\n", r.BytesPerOp) - writef(w, "Allocs\t%d allocs/op\tAllocations per operation\n", r.AllocsPerOp) - writeLine(w, "") - - return cvPct, tailRatio -} - -// printHeatmap renders the histogram heatmap section to the provided writer. -func (r Result) printHeatmap(w *tabwriter.Writer) { - writeLine(w, "Latency Heatmap (Dynamic Range):") - writeLine(w, "Range\tFreq\tDistribution Graph") - - maxCount := 0 - for _, b := range r.Histogram { - if b.Count > maxCount { - maxCount = b.Count - } - } - - for _, b := range r.Histogram { - // Skip empty buckets. - if b.Count == 0 { - continue - } - - // 1. Draw Bar. - barLen := 0 - if maxCount > 0 { - // SANITY CHECK: Scale to at most ~40 chars so the output remains readable. - barLen = (b.Count * 40) / maxCount - } - - ratio := 0.0 - if maxCount > 0 { - ratio = float64(b.Count) / float64(maxCount) - } - - // Heat Color Logic. - color := ColorBlue - if ratio > 0.75 { - color = ColorRed - } else if ratio > 0.3 { - color = ColorYellow - } else if ratio > 0.1 { - color = ColorGreen - } - - bar := "" - for i := 0; i < barLen; i++ { - bar += "█" - } - - // 2. Format Label. - label := fmt.Sprintf("%v-%v", b.LowBound, b.HighBound) - // Visual fix for very small buckets. - if b.LowBound.Round(time.Microsecond) == b.HighBound.Round(time.Microsecond) && - b.HighBound-b.LowBound < time.Microsecond { - label = fmt.Sprintf("%dns-%dns", b.LowBound.Nanoseconds(), b.HighBound.Nanoseconds()) - } - - percentage := 0.0 - if r.OpsTotal > 0 { - percentage = (float64(b.Count) / float64(r.OpsTotal)) * 100 - } - - writef( - w, - " %s\t%d\t%s%s %s(%.1f%%)\n", - label, - b.Count, - color, - bar, - ColorReset, - percentage, - ) - } -} - -// printAnalysis prints the analysis and recommendations section. It uses the -// precomputed cvPct and tailRatio to produce the same messaging as before. -func (r Result) printAnalysis(w *tabwriter.Writer, cvPct float64, tailRatio float64) { - writeLine(w, "") - writeLine(w, ColorBlue+"--- Analysis & Recommendations ---"+ColorReset) - - // 1. Sample Size Check. - if r.OpsTotal < 5000 { - writef( - w, - "%s[WARN] Low sample size (%d). Results may not be statistically significant. Run for longer.%s\n", - ColorRed, - r.OpsTotal, - ColorReset, - ) - } - - // 2. Duration Check. - if r.Duration < 1*time.Second { - writef( - w, - "%s[WARN] Test ran for less than 1s. Go runtime/scheduler might not have stabilized.%s\n", - ColorYellow, - ColorReset, - ) - } - - // 3. Variance Check. - if cvPct > 20.0 { - writef( - w, - "%s[FAIL] High Variance (CV %.2f%%). System noise is affecting results. "+ - "Isolate the machine or increase duration.%s\n", - ColorRed, - cvPct, - ColorReset, - ) - } - - // 4. Memory Check. - if r.AllocsPerOp > 100 { - writef( - w, - "%s[INFO] High Allocations (%d/op). This will trigger frequent GC cycles and increase Max Latency.%s\n", - ColorYellow, - r.AllocsPerOp, - ColorReset, - ) - } - - // 5. Outlier Check. - if tailRatio > 20.0 { - writef( - w, - "%s[CRITICAL] Massive Latency Spikes Detected. Max is %.0fx higher than P99. "+ - "Check for Stop-The-World GC or Lock Contention.%s\n", - ColorRed, - tailRatio, - ColorReset, - ) - } - - if cvPct < 10.0 && r.OpsTotal > 10000 && tailRatio < 10.0 { - writef( - w, - "%s[PASS] Benchmark looks healthy and statistically sound.%s\n", - ColorGreen, - ColorReset, - ) - } - - writeLine(w, "----------------------------------") -} - -// safe write helpers used to centralize error handling for tabwriter writes. -func writef(w *tabwriter.Writer, format string, a ...interface{}) { - _, err := fmt.Fprintf(w, format, a...) - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, "benchmark: write error:", err) - } -} - -func writeLine(w *tabwriter.Writer, s string) { - _, err := fmt.Fprintln(w, s) - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, "benchmark: write error:", err) - } -} - -func RunBenchmark[T any]( - workers int, - benchDuration time.Duration, - setup func() T, - work func(T), -) Result { - // SANITY CHECK: Ensure we have at least one worker and a positive duration. - if workers <= 0 { - workers = 1 - } - if benchDuration <= 0 { - benchDuration = 1 * time.Second - } - - // --------------------------------------------------------- - // PHASE 1: Memory Analysis (Serial & Isolated) - // --------------------------------------------------------- - // - // Measure memory in isolation to avoid contamination from benchmark - // infrastructure (channels, sync, etc.). - - var totalAllocs, totalBytes uint64 - const memSamples = 5 - - for i := 0; i < memSamples; i++ { - // SANITY CHECK: Force GC before each measurement to get a clean baseline. - // Call GC twice to ensure finalization of objects from previous iteration. - runtime.GC() - runtime.GC() - - // Sleep briefly to allow GC to fully complete before measurement. - // Without this, overlapping GC from previous iteration can contaminate - // measurements. - time.Sleep(10 * time.Millisecond) - - var memBefore, memAfter runtime.MemStats - runtime.ReadMemStats(&memBefore) - - // Create data inside the measurement window to avoid counting setup - // allocations made before the baseline. - data := setup() - work(data) - - runtime.ReadMemStats(&memAfter) - - // SANITY CHECK: Ensure we're measuring deltas, not absolute values. - // This accounts for allocations made specifically by setup() + work(). - totalAllocs += memAfter.Mallocs - memBefore.Mallocs - totalBytes += memAfter.TotalAlloc - memBefore.TotalAlloc - } - - // SANITY CHECK: Average over multiple samples to reduce noise from GC timing - // variance. Note that memSamples is the number of ops in this phase. - allocs := totalAllocs / uint64(memSamples) - bytes := totalBytes / uint64(memSamples) - - // --------------------------------------------------------- - // PHASE 2: Throughput & Latency (Concurrent) - // --------------------------------------------------------- - - // SANITY CHECK: Clean slate for Phase 2 - no contamination from Phase 1. - runtime.GC() - runtime.GC() - time.Sleep(10 * time.Millisecond) - - var ( - running int32 // 1 = running, 0 = stopped - - startWg sync.WaitGroup - endWg sync.WaitGroup - ) - - workerResults := make([]*chunk, workers) - - atomic.StoreInt32(&running, 1) - startWg.Add(workers) - endWg.Add(workers) - - for i := 0; i < workers; i++ { - workerID := i - - go func() { - defer endWg.Done() - - // Initialize first chunk for this worker. - currentChunk := &chunk{} - headChunk := currentChunk - - // Signal readiness and wait for all workers. - startWg.Done() - // SANITY CHECK: Barrier - all workers start simultaneously. - // This ensures fair timing and prevents early-bird bias. - startWg.Wait() - - // SANITY CHECK: Loop continues until global stop signal. - // Using atomic load ensures memory visibility across goroutines. - for atomic.LoadInt32(&running) == 1 { - // 1. Setup: Create test data (not timed). - d := setup() - - // 2. Work: Execute the operation we're benchmarking (timed). - t0 := time.Now() - work(d) - dur := time.Since(t0) - - // 3. Record latency. - // - // SANITY CHECK: Check chunk capacity BEFORE writing to avoid overflow. - if currentChunk.idx >= chunkSize { - newC := &chunk{} - currentChunk.next = newC - currentChunk = newC - } - - // SANITY CHECK: Store latency in pre-allocated array (no allocation - // overhead on the hot path). - currentChunk.data[currentChunk.idx] = dur - currentChunk.idx++ - } - - // Save head pointer for post-processing. - // Each worker maintains its own linked list of chunks. - workerResults[workerID] = headChunk - }() - } - - // SANITY CHECK: Ensure all workers are created and waiting before starting - // the timer. This prevents skew from goroutine creation overhead. - startWg.Wait() - - startGlobal := time.Now() - - // Sleep for the benchmark duration - workers run concurrently during this time. - time.Sleep(benchDuration) - - // SANITY CHECK: Signal all workers to stop and wait for cleanup. - // Using atomic store ensures all workers see the stop signal promptly. - atomic.StoreInt32(&running, 0) - endWg.Wait() - - globalDuration := time.Since(startGlobal) - if globalDuration <= 0 { - // Avoid division by zero; in practice, this should not happen with sane durations. - globalDuration = 1 - } - - // --------------------------------------------------------- - // PHASE 3: Statistical Analysis - // --------------------------------------------------------- - - // Count all operations INCLUDING partial chunks at the end. - var totalOps uint64 - var totalTimeNs int64 - - // First pass: count operations and sum latencies. - for _, head := range workerResults { - curr := head - for curr != nil { - // SANITY CHECK: Only process valid entries (idx tells us how many were written). - limit := curr.idx - totalOps += uint64(limit) - - for k := 0; k < limit; k++ { - // Use int64 for nanosecond accumulation to prevent overflow. - totalTimeNs += int64(curr.data[k]) - } - - curr = curr.next - } - } - - // SANITY CHECK: Pre-allocate with exact-ish capacity to avoid resizing overhead - // during analysis. This keeps analysis overhead out of the hot path. - allLatencies := make([]time.Duration, 0, int(totalOps)) - - // Calculate jitter per-worker only (not across workers). - var totalJitter float64 - var totalJitterSamples uint64 - - for _, head := range workerResults { - curr := head - - var prevLat time.Duration - firstInWorker := true - - for curr != nil { - limit := curr.idx - for k := 0; k < limit; k++ { - val := curr.data[k] - allLatencies = append(allLatencies, val) - - // SANITY CHECK: Only calculate jitter within a worker's sequence. - if !firstInWorker { - diff := float64(val - prevLat) - if diff < 0 { - diff = -diff - } - totalJitter += diff - totalJitterSamples++ - } - - prevLat = val - firstInWorker = false - } - curr = curr.next - } - } - - var ( - avgLatency time.Duration - stdDev time.Duration - variance float64 - p50, p75 time.Duration - p95, p99 time.Duration - minLat, maxLat time.Duration - iqr time.Duration - jitter time.Duration - coeffVar float64 - pureThroughput float64 - ) - - if totalOps > 0 { - // Jitter calculation. - if totalJitterSamples > 0 { - jitter = time.Duration(totalJitter / float64(totalJitterSamples)) - } - - // Basic stats: average latency. - avgLatency = time.Duration(totalTimeNs / int64(totalOps)) - - // SANITY CHECK: Pure throughput is theoretical max if setup() had zero cost. - // Guard against zero avgLatency to avoid Inf. - if avgLatency > 0 { - pureThroughput = float64(workers) / avgLatency.Seconds() - } - - // Sort for percentiles - required for accurate quantile calculations. - sort.Slice(allLatencies, func(i, j int) bool { - return allLatencies[i] < allLatencies[j] - }) - - minLat = allLatencies[0] - maxLat = allLatencies[len(allLatencies)-1] - - // Use linear interpolation for percentiles. - p50 = percentileInterpolated(allLatencies, 0.50) - p75 = percentileInterpolated(allLatencies, 0.75) - p95 = percentileInterpolated(allLatencies, 0.95) - p99 = percentileInterpolated(allLatencies, 0.99) - p25 := percentileInterpolated(allLatencies, 0.25) - - // IQR measures the spread of the middle 50% of data. - iqr = p75 - p25 - - // Use population variance (divide by n) as we're observing full data, not a sample. - meanNs := float64(avgLatency.Nanoseconds()) - var sumSquaredDiff float64 - - for _, lat := range allLatencies { - diff := float64(lat.Nanoseconds()) - meanNs - sumSquaredDiff += diff * diff - } - - if len(allLatencies) > 0 { - variance = sumSquaredDiff / float64(len(allLatencies)) - stdDev = time.Duration(math.Sqrt(variance)) - if avgLatency > 0 { - coeffVar = float64(stdDev) / float64(avgLatency) - } - } - } - - // Build histogram with improved boundary handling. - hist := calcExponentialHistogramImproved(allLatencies, minLat, maxLat, 20) - - return Result{ - GoRoutines: workers, - OpsTotal: totalOps, - Duration: globalDuration, - OpsPerSecReal: float64(totalOps) / globalDuration.Seconds(), - OpsPerSecPure: pureThroughput, - - AvgLatency: avgLatency, - StdDevLatency: stdDev, - Variance: variance, - - P50Latency: p50, - P75Latency: p75, - P95Latency: p95, - P99Latency: p99, - MinLatency: minLat, - MaxLatency: maxLat, - - IQR: iqr, - Jitter: jitter, - CoeffVar: coeffVar, - BytesPerOp: bytes, - AllocsPerOp: allocs, - Histogram: hist, - } -} - -// percentileInterpolated computes a percentile using linear interpolation. -// -// SANITY CHECK: sorted must be non-empty for meaningful results. -func percentileInterpolated(sorted []time.Duration, p float64) time.Duration { - if len(sorted) == 0 { - return 0 - } - - // Edge cases. - if p <= 0 { - return sorted[0] - } - if p >= 1 { - return sorted[len(sorted)-1] - } - - // Calculate the exact position (0-indexed, can be fractional). - pos := p * float64(len(sorted)-1) - lower := int(math.Floor(pos)) - upper := int(math.Ceil(pos)) - - // If position is exactly on an index, return that value. - if lower == upper { - return sorted[lower] - } - - // Linear interpolation between adjacent values. - fraction := pos - float64(lower) - lowerVal := float64(sorted[lower]) - upperVal := float64(sorted[upper]) - interpolated := lowerVal + fraction*(upperVal-lowerVal) - - return time.Duration(interpolated) -} - -// calcExponentialHistogramImproved builds an exponential histogram with better -// boundary precision and O(n) assignment. -func calcExponentialHistogramImproved( - latencies []time.Duration, - min time.Duration, - max time.Duration, - bucketCount int, -) []Bucket { - if len(latencies) == 0 || bucketCount <= 0 { - return nil - } - - // SANITY CHECK: Ensure min is positive for log calculations. - if min <= 0 { - min = 1 - } - if max < min { - max = min - } - - buckets := make([]Bucket, bucketCount) - - // 1. Calculate the geometric growth factor. - var factor float64 - if min == max { - // All values identical, a single effective bucket. - factor = 1.0 - } else { - // Formula: min * factor^N = max. - ratio := float64(max) / float64(min) - factor = math.Pow(ratio, 1.0/float64(bucketCount)) - } - - // 2. Initialize Bucket Boundaries using int64 nanoseconds to avoid float drift. - currentLowerNs := min.Nanoseconds() - - for i := 0; i < bucketCount; i++ { - var currentUpperNs int64 - if i == bucketCount-1 { - // SANITY CHECK: Snap last bucket strictly to max to avoid floating - // point errors that might drop values outside the histogram. - currentUpperNs = max.Nanoseconds() - } else { - if factor == 1.0 { - currentUpperNs = max.Nanoseconds() - } else { - currentUpperNs = int64(float64(currentLowerNs) * factor) - } - if currentUpperNs < currentLowerNs { - // Guard against rounding weirdness. - currentUpperNs = currentLowerNs - } - } - - buckets[i] = Bucket{ - LowBound: time.Duration(currentLowerNs), - HighBound: time.Duration(currentUpperNs), - Count: 0, - } - - currentLowerNs = currentUpperNs - } - - // 3. Populate Counts using Logarithmic Indexing (O(n)). - logMin := math.Log(float64(min.Nanoseconds())) - logFactor := 0.0 - if factor != 1.0 { - logFactor = math.Log(factor) - } - - for _, lat := range latencies { - valNs := float64(lat.Nanoseconds()) - if valNs <= 0 { - // Extremely small or zero value, put into first bucket. - buckets[0].Count++ - continue - } - - var idx int - if min == max || logFactor == 0 { - // All values the same or factor degenerate. - idx = 0 - } else { - // Inverse of the exponential function to find index directly: - // idx = floor((log(value) - log(min)) / log(factor)). - idx = int(math.Floor((math.Log(valNs) - logMin) / logFactor)) - } - - // Clamp index to [0, bucketCount-1] to handle precision edge cases. - if idx < 0 { - idx = 0 - } - if idx >= bucketCount { - idx = bucketCount - 1 - } - - buckets[idx].Count++ - } - - return buckets -} diff --git a/token/core/zkatdlog/nogh/v1/benchmark/setup.go b/token/core/zkatdlog/nogh/v1/benchmark/setup.go new file mode 100644 index 0000000000..812b816dfe --- /dev/null +++ b/token/core/zkatdlog/nogh/v1/benchmark/setup.go @@ -0,0 +1,233 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package benchmark + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "fmt" + "os" + "path/filepath" + + "github.com/IBM/idemix/bccsp/types" + math "github.com/IBM/mathlib" + "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/errors" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/x509" + math2 "github.com/hyperledger-labs/fabric-token-sdk/token/core/common/crypto/math" + "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/setup" + "github.com/hyperledger-labs/fabric-token-sdk/token/driver" + "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity" + idemix2 "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity/idemix" + "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity/idemix/crypto" + "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity/storage/kvs" + ix509 "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity/x509" + crypto2 "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity/x509/crypto" +) + +type SetupConfiguration struct { + PP *setup.PublicParams + OwnerIdentity *OwnerIdentity + AuditorSigner *Signer + IssuerSigner *Signer +} + +type SetupConfigurations struct { + Configurations map[string]*SetupConfiguration +} + +func NewSetupConfigurations(idemixTestdataPath string, bits []uint64, curveIDs []math.CurveID) (*SetupConfigurations, error) { + configurations := map[string]*SetupConfiguration{} + for _, curveID := range curveIDs { + var ipk []byte + var err error + var oID *OwnerIdentity + switch curveID { + case math.BN254: + idemixPath := filepath.Join(idemixTestdataPath, "bn254", "idemix") + ipk, err = os.ReadFile(filepath.Join(idemixPath, "msp", "IssuerPublicKey")) + if err != nil { + return nil, err + } + oID, err = loadOwnerIdentity(context.Background(), idemixPath, curveID) + if err != nil { + return nil, err + } + case math.BLS12_381_BBS_GURVY: + fallthrough + case math2.BLS12_381_BBS_GURVY_FAST_RNG: + idemixPath := filepath.Join(idemixTestdataPath, "bls12_381_bbs", "idemix") + ipk, err = os.ReadFile(filepath.Join(idemixPath, "msp", "IssuerPublicKey")) + if err != nil { + return nil, err + } + oID, err = loadOwnerIdentity(context.Background(), idemixPath, curveID) + if err != nil { + return nil, err + } + default: + return nil, errors.Errorf("curveID [%d] not found", curveID) + } + + auditorSigner, err := PrepareECDSASigner() + if err != nil { + return nil, err + } + issuerSigner, err := NewECDSASigner() + if err != nil { + return nil, err + } + + for _, bit := range bits { + pp, err := setup.Setup(bit, ipk, curveID) + if err != nil { + return nil, err + } + issuerID, err := issuerSigner.Serialize() + if err != nil { + return nil, err + } + pp.AddIssuer(issuerID) + auditorID, err := auditorSigner.Serialize() + if err != nil { + return nil, err + } + pp.AddAuditor(auditorID) + configurations[key(bit, curveID)] = &SetupConfiguration{ + PP: pp, + OwnerIdentity: oID, + AuditorSigner: auditorSigner, + IssuerSigner: issuerSigner, + } + } + } + return &SetupConfigurations{ + Configurations: configurations, + }, nil +} + +func (c *SetupConfigurations) GetPublicParams(bits uint64, curveID math.CurveID) (*setup.PublicParams, error) { + configuration, ok := c.Configurations[key(bits, curveID)] + if !ok { + return nil, fmt.Errorf("configuration not found") + } + return configuration.PP, nil +} + +func (c *SetupConfigurations) GetSetupConfiguration(bits uint64, curveID math.CurveID) (*SetupConfiguration, error) { + configuration, ok := c.Configurations[key(bits, curveID)] + if !ok { + return nil, fmt.Errorf("configuration not found") + } + return configuration, nil +} + +func key(bits uint64, curveID math.CurveID) string { + return fmt.Sprintf("%d-%d", bits, curveID) +} + +type OwnerIdentity struct { + ID driver.Identity + AuditInfo *crypto.AuditInfo + Signer driver.SigningIdentity +} + +func loadOwnerIdentity(ctx context.Context, dir string, curveID math.CurveID) (*OwnerIdentity, error) { + backend, err := kvs.NewInMemory() + if err != nil { + return nil, err + } + config, err := crypto.NewConfig(dir) + if err != nil { + return nil, err + } + keyStore, err := crypto.NewKeyStore(curveID, kvs.Keystore(backend)) + if err != nil { + return nil, err + } + cryptoProvider, err := crypto.NewBCCSP(keyStore, curveID) + if err != nil { + return nil, err + } + p, err := idemix2.NewKeyManager(config, types.EidNymRhNym, cryptoProvider) + if err != nil { + return nil, err + } + + identityDescriptor, err := p.Identity(ctx, nil) + if err != nil { + return nil, err + } + id := identityDescriptor.Identity + audit := identityDescriptor.AuditInfo + + auditInfo, err := p.DeserializeAuditInfo(ctx, audit) + if err != nil { + return nil, err + } + err = auditInfo.Match(ctx, id) + if err != nil { + return nil, err + } + + signer, err := p.DeserializeSigningIdentity(ctx, id) + if err != nil { + return nil, err + } + + id, err = identity.WrapWithType(idemix2.IdentityType, id) + if err != nil { + return nil, err + } + + return &OwnerIdentity{ + ID: id, + AuditInfo: auditInfo, + Signer: signer, + }, nil +} + +func PrepareECDSASigner() (*Signer, error) { + signer, err := NewECDSASigner() + if err != nil { + return nil, err + } + return signer, nil +} + +type Signer struct { + SK *ecdsa.PrivateKey + Signer driver.Signer +} + +func NewECDSASigner() (*Signer, error) { + // Create ephemeral key and store it in the context + sk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, err + } + return &Signer{SK: sk, Signer: crypto2.NewEcdsaSigner(sk)}, nil +} + +func (d *Signer) Sign(message []byte) ([]byte, error) { + return d.Signer.Sign(message) +} + +func (d *Signer) Serialize() ([]byte, error) { + pkRaw, err := x509.PemEncodeKey(&d.SK.PublicKey) + if err != nil { + return nil, errors.Wrap(err, "failed marshalling public key") + } + + wrap, err := identity.WrapWithType(ix509.IdentityType, pkRaw) + if err != nil { + return nil, errors.Wrap(err, "failed wrapping identity") + } + + return wrap, nil +} diff --git a/token/core/zkatdlog/nogh/v1/crypto/common/array.go b/token/core/zkatdlog/nogh/v1/crypto/common/array.go index a86add8faa..15b4112b55 100644 --- a/token/core/zkatdlog/nogh/v1/crypto/common/array.go +++ b/token/core/zkatdlog/nogh/v1/crypto/common/array.go @@ -8,7 +8,6 @@ package common import ( "bytes" - "encoding/hex" math "github.com/IBM/mathlib" "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/errors" @@ -22,25 +21,28 @@ const Separator = "||" type G1Array []*math.G1 // Bytes serialize an array of G1 elements -func (a *G1Array) Bytes() ([]byte, error) { - raw := make([][]byte, len([]*math.G1(*a))) - for i, e := range []*math.G1(*a) { +func (a G1Array) Bytes() ([]byte, error) { + raw := make([][]byte, len([]*math.G1(a))) + for i, e := range []*math.G1(a) { if e == nil { return nil, errors.Errorf("failed to marshal array of G1") } - st := hex.EncodeToString(e.Bytes()) - raw[i] = []byte(st) + raw[i] = e.Bytes() } // join the serialization of the group elements with the predefined separator. return bytes.Join(raw, []byte(Separator)), nil } // GetG1Array takes a series of G1 elements and returns the corresponding array -func GetG1Array(elements ...[]*math.G1) *G1Array { - var array []*math.G1 +func GetG1Array(elements ...[]*math.G1) G1Array { + // compute length + length := 0 for _, e := range elements { - array = append(array, e...) + length += len(e) } - a := G1Array(array) - return &a + s := make([]*math.G1, 0, length) + for _, e := range elements { + s = append(s, e...) + } + return s } diff --git a/token/core/zkatdlog/nogh/v1/issue/issuer_test.go b/token/core/zkatdlog/nogh/v1/issue/issuer_test.go index 40c866021f..448787f86a 100644 --- a/token/core/zkatdlog/nogh/v1/issue/issuer_test.go +++ b/token/core/zkatdlog/nogh/v1/issue/issuer_test.go @@ -11,12 +11,12 @@ import ( "testing" math "github.com/IBM/mathlib" - "github.com/hyperledger-labs/fabric-token-sdk/token/core/common/benchmark" math2 "github.com/hyperledger-labs/fabric-token-sdk/token/core/common/crypto/math" issue2 "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/issue" "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/issue/mock" v1 "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/setup" "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/token" + benchmark2 "github.com/hyperledger-labs/fabric-token-sdk/token/services/benchmark" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -45,12 +45,12 @@ func TestIssuer(t *testing.T) { } func BenchmarkIssuer(b *testing.B) { - bits, err := benchmark.Bits(32, 64) + bits, err := benchmark2.Bits(32, 64) require.NoError(b, err) - curves := benchmark.Curves(math.BN254, math.BLS12_381_BBS_GURVY, math2.BLS12_381_BBS_GURVY_FAST_RNG) - outputs, err := benchmark.NumOutputs(1, 2, 3) + curves := benchmark2.Curves(math.BN254, math.BLS12_381_BBS_GURVY, math2.BLS12_381_BBS_GURVY_FAST_RNG) + outputs, err := benchmark2.NumOutputs(1, 2, 3) require.NoError(b, err) - testCases := benchmark.GenerateCases(bits, curves, nil, outputs, nil) + testCases := benchmark2.GenerateCases(bits, curves, nil, outputs, nil) for _, tc := range testCases { b.Run(tc.Name, func(b *testing.B) { @@ -76,12 +76,12 @@ func BenchmarkIssuer(b *testing.B) { } func BenchmarkProofVerificationIssuer(b *testing.B) { - bits, err := benchmark.Bits(32, 64) + bits, err := benchmark2.Bits(32, 64) require.NoError(b, err) - curves := benchmark.Curves(math.BN254, math.BLS12_381_BBS_GURVY, math2.BLS12_381_BBS_GURVY_FAST_RNG) - outputs, err := benchmark.NumOutputs(1, 2, 3) + curves := benchmark2.Curves(math.BN254, math.BLS12_381_BBS_GURVY, math2.BLS12_381_BBS_GURVY_FAST_RNG) + outputs, err := benchmark2.NumOutputs(1, 2, 3) require.NoError(b, err) - testCases := benchmark.GenerateCases(bits, curves, nil, outputs, nil) + testCases := benchmark2.GenerateCases(bits, curves, nil, outputs, nil) for _, tc := range testCases { b.Run(tc.Name, func(b *testing.B) { @@ -158,7 +158,7 @@ type benchmarkIssuerEnv struct { IssuerEnvs []*issuerEnv } -func newBenchmarkIssuerEnv(b *testing.B, n int, benchmarkCase *benchmark.Case) *benchmarkIssuerEnv { +func newBenchmarkIssuerEnv(b *testing.B, n int, benchmarkCase *benchmark2.Case) *benchmarkIssuerEnv { b.Helper() envs := make([]*issuerEnv, n) pp := setup(b, benchmarkCase.Bits, benchmarkCase.CurveID) @@ -168,7 +168,7 @@ func newBenchmarkIssuerEnv(b *testing.B, n int, benchmarkCase *benchmark.Case) * return &benchmarkIssuerEnv{IssuerEnvs: envs} } -func newBenchmarkIssuerProofVerificationEnv(b *testing.B, n int, benchmarkCase *benchmark.Case) *benchmarkIssuerEnv { +func newBenchmarkIssuerProofVerificationEnv(b *testing.B, n int, benchmarkCase *benchmark2.Case) *benchmarkIssuerEnv { b.Helper() envs := make([]*issuerEnv, n) pp := setup(b, benchmarkCase.Bits, benchmarkCase.CurveID) diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/ca/ca/IssuerPublicKey b/token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/ca/ca/IssuerPublicKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/ca/ca/IssuerPublicKey rename to token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/ca/ca/IssuerPublicKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/ca/ca/IssuerSecretKey b/token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/ca/ca/IssuerSecretKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/ca/ca/IssuerSecretKey rename to token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/ca/ca/IssuerSecretKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/ca/ca/RevocationKey b/token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/ca/ca/RevocationKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/ca/ca/RevocationKey rename to token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/ca/ca/RevocationKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/ca/msp/IssuerPublicKey b/token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/ca/msp/IssuerPublicKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/ca/msp/IssuerPublicKey rename to token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/ca/msp/IssuerPublicKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/ca/msp/RevocationPublicKey b/token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/ca/msp/RevocationPublicKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/ca/msp/RevocationPublicKey rename to token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/ca/msp/RevocationPublicKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/idemix/msp/IssuerPublicKey b/token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/idemix/msp/IssuerPublicKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/idemix/msp/IssuerPublicKey rename to token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/idemix/msp/IssuerPublicKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/idemix/msp/RevocationPublicKey b/token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/idemix/msp/RevocationPublicKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/idemix/msp/RevocationPublicKey rename to token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/idemix/msp/RevocationPublicKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/idemix/user/SignerConfig b/token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/idemix/user/SignerConfig similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bls12_381_bbs/idemix/user/SignerConfig rename to token/core/zkatdlog/nogh/v1/testdata/bls12_381_bbs/idemix/user/SignerConfig diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bn254/ca/ca/IssuerPublicKey b/token/core/zkatdlog/nogh/v1/testdata/bn254/ca/ca/IssuerPublicKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bn254/ca/ca/IssuerPublicKey rename to token/core/zkatdlog/nogh/v1/testdata/bn254/ca/ca/IssuerPublicKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bn254/ca/ca/IssuerSecretKey b/token/core/zkatdlog/nogh/v1/testdata/bn254/ca/ca/IssuerSecretKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bn254/ca/ca/IssuerSecretKey rename to token/core/zkatdlog/nogh/v1/testdata/bn254/ca/ca/IssuerSecretKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bn254/ca/ca/RevocationKey b/token/core/zkatdlog/nogh/v1/testdata/bn254/ca/ca/RevocationKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bn254/ca/ca/RevocationKey rename to token/core/zkatdlog/nogh/v1/testdata/bn254/ca/ca/RevocationKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bn254/ca/msp/IssuerPublicKey b/token/core/zkatdlog/nogh/v1/testdata/bn254/ca/msp/IssuerPublicKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bn254/ca/msp/IssuerPublicKey rename to token/core/zkatdlog/nogh/v1/testdata/bn254/ca/msp/IssuerPublicKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bn254/ca/msp/RevocationPublicKey b/token/core/zkatdlog/nogh/v1/testdata/bn254/ca/msp/RevocationPublicKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bn254/ca/msp/RevocationPublicKey rename to token/core/zkatdlog/nogh/v1/testdata/bn254/ca/msp/RevocationPublicKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bn254/idemix/msp/IssuerPublicKey b/token/core/zkatdlog/nogh/v1/testdata/bn254/idemix/msp/IssuerPublicKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bn254/idemix/msp/IssuerPublicKey rename to token/core/zkatdlog/nogh/v1/testdata/bn254/idemix/msp/IssuerPublicKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bn254/idemix/msp/RevocationPublicKey b/token/core/zkatdlog/nogh/v1/testdata/bn254/idemix/msp/RevocationPublicKey similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bn254/idemix/msp/RevocationPublicKey rename to token/core/zkatdlog/nogh/v1/testdata/bn254/idemix/msp/RevocationPublicKey diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/bn254/idemix/user/SignerConfig b/token/core/zkatdlog/nogh/v1/testdata/bn254/idemix/user/SignerConfig similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/bn254/idemix/user/SignerConfig rename to token/core/zkatdlog/nogh/v1/testdata/bn254/idemix/user/SignerConfig diff --git a/token/core/zkatdlog/nogh/v1/validator/testdata/generate.go b/token/core/zkatdlog/nogh/v1/testdata/generate.go similarity index 100% rename from token/core/zkatdlog/nogh/v1/validator/testdata/generate.go rename to token/core/zkatdlog/nogh/v1/testdata/generate.go diff --git a/token/core/zkatdlog/nogh/v1/transfer/sender_test.go b/token/core/zkatdlog/nogh/v1/transfer/sender_test.go index 2f8e55a3b5..3e2844637a 100644 --- a/token/core/zkatdlog/nogh/v1/transfer/sender_test.go +++ b/token/core/zkatdlog/nogh/v1/transfer/sender_test.go @@ -9,19 +9,18 @@ package transfer_test import ( "context" "fmt" - "runtime" "strconv" "testing" math "github.com/IBM/mathlib" "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/errors" - "github.com/hyperledger-labs/fabric-token-sdk/token/core/common/benchmark" - math2 "github.com/hyperledger-labs/fabric-token-sdk/token/core/common/crypto/math" + "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/benchmark" v1 "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/setup" "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/token" "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/transfer" "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/transfer/mock" "github.com/hyperledger-labs/fabric-token-sdk/token/driver" + benchmark2 "github.com/hyperledger-labs/fabric-token-sdk/token/services/benchmark" token2 "github.com/hyperledger-labs/fabric-token-sdk/token/token" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -65,166 +64,176 @@ func TestSender(t *testing.T) { // BenchmarkSender benchmarks transfer action generation and serialization. // This includes the proof generation as well. func BenchmarkSender(b *testing.B) { - bits, err := benchmark.Bits(32, 64) + bits, curves, cases := benchmark2.GenerateCasesWithDefaults(b) + configurations, err := benchmark.NewSetupConfigurations("./../testdata", bits, curves) require.NoError(b, err) - curves := benchmark.Curves(math.BN254, math.BLS12_381_BBS_GURVY, math2.BLS12_381_BBS_GURVY_FAST_RNG) - inputs, err := benchmark.NumInputs(1, 2, 3) - require.NoError(b, err) - outputs, err := benchmark.NumOutputs(1, 2, 3) + + test := benchmark2.NewTest[*benchmarkSenderEnv](cases) + test.GoBenchmark(b, + func(c *benchmark2.Case) (*benchmarkSenderEnv, error) { + return newBenchmarkSenderEnv(1, c, configurations) + }, + func(env *benchmarkSenderEnv) error { + transfer, _, err := env.SenderEnvs[0].sender.GenerateZKTransfer( + b.Context(), + env.SenderEnvs[0].outvalues, + env.SenderEnvs[0].owners, + ) + if err != nil { + return err + } + _, err = transfer.Serialize() + return err + }, + ) +} + +// BenchmarkParallelSender benchmarks parallel transfer action generation and serialization. +func BenchmarkParallelSender(b *testing.B) { + bits, curves, cases := benchmark2.GenerateCasesWithDefaults(b) + configurations, err := benchmark.NewSetupConfigurations("./../testdata", bits, curves) require.NoError(b, err) - testCases := benchmark.GenerateCases(bits, curves, inputs, outputs, []int{1}) - - for _, tc := range testCases { - b.Run(tc.Name, func(b *testing.B) { - env, err := newBenchmarkSenderEnv(b.N, tc.BenchmarkCase) - require.NoError(b, err) - - b.ResetTimer() - - i := 0 - for b.Loop() { - transfer, _, err := env.SenderEnvs[i].sender.GenerateZKTransfer( - b.Context(), - env.SenderEnvs[i].outvalues, - env.SenderEnvs[i].owners, - ) - require.NoError(b, err) - assert.NotNil(b, transfer) - _, err = transfer.Serialize() - require.NoError(b, err) - i++ + + test := benchmark2.NewTest[*benchmarkSenderEnv](cases) + test.GoBenchmarkParallel(b, + func(c *benchmark2.Case) (*benchmarkSenderEnv, error) { + return newBenchmarkSenderEnv(1, c, configurations) + }, + func(env *benchmarkSenderEnv) error { + transfer, _, err := env.SenderEnvs[0].sender.GenerateZKTransfer( + b.Context(), + env.SenderEnvs[0].outvalues, + env.SenderEnvs[0].owners, + ) + if err != nil { + return err } - }) - } + _, err = transfer.Serialize() + return err + }, + ) } // TestParallelBenchmarkSender benchmarks transfer action generation and serialization when multiple go routines are doing the same thing. func TestParallelBenchmarkSender(t *testing.T) { - bits, err := benchmark.Bits(32) + bits, curves, cases := benchmark2.GenerateCasesWithDefaults(t) + configurations, err := benchmark.NewSetupConfigurations("./../testdata", bits, curves) require.NoError(t, err) - curves := benchmark.Curves(math.BN254) - inputs, err := benchmark.NumInputs(2) - require.NoError(t, err) - outputs, err := benchmark.NumOutputs(2) - require.NoError(t, err) - workers, err := benchmark.Workers(runtime.NumCPU()) - require.NoError(t, err) - testCases := benchmark.GenerateCases(bits, curves, inputs, outputs, workers) - - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - r := benchmark.RunBenchmark( - tc.BenchmarkCase.Workers, - benchmark.Duration(), - func() *benchmarkSenderEnv { - env, err := newBenchmarkSenderEnv(1, tc.BenchmarkCase) - require.NoError(t, err) - return env - }, - func(env *benchmarkSenderEnv) { - transfer, _, err := env.SenderEnvs[0].sender.GenerateZKTransfer( - t.Context(), - env.SenderEnvs[0].outvalues, - env.SenderEnvs[0].owners, - ) - require.NoError(t, err) - assert.NotNil(t, transfer) - _, err = transfer.Serialize() - require.NoError(t, err) - }, + + test := benchmark2.NewTest[*benchmarkSenderEnv](cases) + test.RunBenchmark(t, + func(c *benchmark2.Case) (*benchmarkSenderEnv, error) { + return newBenchmarkSenderEnv(1, c, configurations) + }, + func(env *benchmarkSenderEnv) error { + transfer, _, err := env.SenderEnvs[0].sender.GenerateZKTransfer( + t.Context(), + env.SenderEnvs[0].outvalues, + env.SenderEnvs[0].owners, ) - r.Print() - }) - } + if err != nil { + return err + } + _, err = transfer.Serialize() + return err + }, + ) } // BenchmarkVerificationSenderProof benchmarks transfer action deserialization and proof verification. func BenchmarkVerificationSenderProof(b *testing.B) { - bits, err := benchmark.Bits(32, 64) - require.NoError(b, err) - curves := benchmark.Curves(math.BN254, math.BLS12_381_BBS_GURVY, math2.BLS12_381_BBS_GURVY_FAST_RNG) - inputs, err := benchmark.NumInputs(1, 2, 3) + bits, curves, cases := benchmark2.GenerateCasesWithDefaults(b) + configurations, err := benchmark.NewSetupConfigurations("./../testdata", bits, curves) require.NoError(b, err) - outputs, err := benchmark.NumOutputs(1, 2, 3) + + test := benchmark2.NewTest[*benchmarkSenderEnv](cases) + test.GoBenchmark(b, + func(c *benchmark2.Case) (*benchmarkSenderEnv, error) { + return newBenchmarkSenderProofVerificationEnv(b.Context(), 1, c, configurations) + }, + func(env *benchmarkSenderEnv) error { + // deserialize action + ta := &transfer.Action{} + if err := ta.Deserialize(env.SenderEnvs[0].transferRaw); err != nil { + return err + } + inputTokens := make([]*math.G1, len(ta.Inputs)) + for j, in := range ta.Inputs { + inputTokens[j] = in.Token.Data + } + + // instantiate the verifier and verify + return transfer.NewVerifier( + inputTokens, + ta.GetOutputCommitments(), + env.SenderEnvs[0].sender.PublicParams, + ).Verify(ta.GetProof()) + }, + ) +} + +// BenchmarkVerificationSenderProof benchmarks transfer action deserialization and proof verification. +func BenchmarkVerificationParallelSenderProof(b *testing.B) { + bits, curves, cases := benchmark2.GenerateCasesWithDefaults(b) + configurations, err := benchmark.NewSetupConfigurations("./../testdata", bits, curves) require.NoError(b, err) - testCases := benchmark.GenerateCases(bits, curves, inputs, outputs, []int{1}) - - for _, tc := range testCases { - b.Run(tc.Name, func(b *testing.B) { - env, err := newBenchmarkSenderProofVerificationEnv(b.Context(), b.N, tc.BenchmarkCase) - require.NoError(b, err) - - b.ResetTimer() - - i := 0 - for b.Loop() { - // deserialize action - ta := &transfer.Action{} - require.NoError(b, ta.Deserialize(env.SenderEnvs[i].transferRaw)) - inputTokens := make([]*math.G1, len(ta.Inputs)) - for j, in := range ta.Inputs { - inputTokens[j] = in.Token.Data - } - - // instantiate the verifier and verify - require.NoError(b, - transfer.NewVerifier( - inputTokens, - ta.GetOutputCommitments(), - env.SenderEnvs[i].sender.PublicParams, - ).Verify(ta.GetProof()), - ) - i++ + + test := benchmark2.NewTest[*benchmarkSenderEnv](cases) + test.GoBenchmarkParallel(b, + func(c *benchmark2.Case) (*benchmarkSenderEnv, error) { + return newBenchmarkSenderProofVerificationEnv(b.Context(), 1, c, configurations) + }, + func(env *benchmarkSenderEnv) error { + // deserialize action + ta := &transfer.Action{} + if err := ta.Deserialize(env.SenderEnvs[0].transferRaw); err != nil { + return err } - }) - } + inputTokens := make([]*math.G1, len(ta.Inputs)) + for j, in := range ta.Inputs { + inputTokens[j] = in.Token.Data + } + + // instantiate the verifier and verify + return transfer.NewVerifier( + inputTokens, + ta.GetOutputCommitments(), + env.SenderEnvs[0].sender.PublicParams, + ).Verify(ta.GetProof()) + }, + ) } // TestParallelBenchmarkVerificationSenderProof benchmarks transfer action deserialization and proof verification when multiple go routines are doing the same thing. func TestParallelBenchmarkVerificationSenderProof(t *testing.T) { - bits, err := benchmark.Bits(32) - require.NoError(t, err) - curves := benchmark.Curves(math.BN254) - inputs, err := benchmark.NumInputs(2) + bits, curves, cases := benchmark2.GenerateCasesWithDefaults(t) + configurations, err := benchmark.NewSetupConfigurations("./../testdata", bits, curves) require.NoError(t, err) - outputs, err := benchmark.NumOutputs(2) - require.NoError(t, err) - workers, err := benchmark.Workers(runtime.NumCPU()) - require.NoError(t, err) - testCases := benchmark.GenerateCases(bits, curves, inputs, outputs, workers) - - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - r := benchmark.RunBenchmark( - tc.BenchmarkCase.Workers, - benchmark.Duration(), - func() *benchmarkSenderEnv { - env, err := newBenchmarkSenderProofVerificationEnv(t.Context(), 1, tc.BenchmarkCase) - require.NoError(t, err) - return env - }, - func(env *benchmarkSenderEnv) { - // deserialize action - ta := &transfer.Action{} - require.NoError(t, ta.Deserialize(env.SenderEnvs[0].transferRaw)) - inputTokens := make([]*math.G1, len(ta.Inputs)) - for j, in := range ta.Inputs { - inputTokens[j] = in.Token.Data - } - - // instantiate the verifier and verify - require.NoError(t, - transfer.NewVerifier( - inputTokens, - ta.GetOutputCommitments(), - env.SenderEnvs[0].sender.PublicParams, - ).Verify(ta.GetProof()), - ) - }, - ) - r.Print() - }) - } + + test := benchmark2.NewTest[*benchmarkSenderEnv](cases) + test.RunBenchmark(t, + func(c *benchmark2.Case) (*benchmarkSenderEnv, error) { + return newBenchmarkSenderProofVerificationEnv(t.Context(), 1, c, configurations) + }, + func(env *benchmarkSenderEnv) error { + // deserialize action + ta := &transfer.Action{} + if err := ta.Deserialize(env.SenderEnvs[0].transferRaw); err != nil { + return err + } + inputTokens := make([]*math.G1, len(ta.Inputs)) + for j, in := range ta.Inputs { + inputTokens[j] = in.Token.Data + } + + // instantiate the verifier and verify + return transfer.NewVerifier( + inputTokens, + ta.GetOutputCommitments(), + env.SenderEnvs[0].sender.PublicParams, + ).Verify(ta.GetProof()) + }, + ) } func prepareTokens(values, bf []*math.Zr, ttype string, pp []*math.G1, curve *math.Curve) []*math.G1 { @@ -329,7 +338,7 @@ type benchmarkSenderEnv struct { SenderEnvs []*senderEnv } -func newBenchmarkSenderEnv(n int, benchmarkCase *benchmark.Case) (*benchmarkSenderEnv, error) { +func newBenchmarkSenderEnv(n int, benchmarkCase *benchmark2.Case, configurations *benchmark.SetupConfigurations) (*benchmarkSenderEnv, error) { envs := make([]*senderEnv, n) pp, err := setup(benchmarkCase.Bits, benchmarkCase.CurveID) if err != nil { @@ -344,12 +353,13 @@ func newBenchmarkSenderEnv(n int, benchmarkCase *benchmark.Case) (*benchmarkSend return &benchmarkSenderEnv{SenderEnvs: envs}, nil } -func newBenchmarkSenderProofVerificationEnv(ctx context.Context, n int, benchmarkCase *benchmark.Case) (*benchmarkSenderEnv, error) { +func newBenchmarkSenderProofVerificationEnv(ctx context.Context, n int, benchmarkCase *benchmark2.Case, configurations *benchmark.SetupConfigurations) (*benchmarkSenderEnv, error) { envs := make([]*senderEnv, n) - pp, err := setup(benchmarkCase.Bits, benchmarkCase.CurveID) + pp, err := configurations.GetPublicParams(benchmarkCase.Bits, benchmarkCase.CurveID) if err != nil { return nil, err } + for i := range envs { env, err := newSenderEnv(pp, benchmarkCase.NumInputs, benchmarkCase.NumOutputs) if err != nil { diff --git a/token/core/zkatdlog/nogh/v1/transfer/transfer_test.go b/token/core/zkatdlog/nogh/v1/transfer/transfer_test.go index 7f27274ccf..d741fd9664 100644 --- a/token/core/zkatdlog/nogh/v1/transfer/transfer_test.go +++ b/token/core/zkatdlog/nogh/v1/transfer/transfer_test.go @@ -9,13 +9,14 @@ package transfer_test import ( "runtime" "testing" + "time" math "github.com/IBM/mathlib" - "github.com/hyperledger-labs/fabric-token-sdk/token/core/common/benchmark" math2 "github.com/hyperledger-labs/fabric-token-sdk/token/core/common/crypto/math" v1 "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/setup" "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/token" "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/transfer" + benchmark2 "github.com/hyperledger-labs/fabric-token-sdk/token/services/benchmark" token2 "github.com/hyperledger-labs/fabric-token-sdk/token/token" "github.com/stretchr/testify/require" ) @@ -61,14 +62,14 @@ func TestTransfer(t *testing.T) { // BenchmarkTransferProofGeneration benchmarks the ZK proof generation for a transfer operation func BenchmarkTransferProofGeneration(b *testing.B) { - bits, err := benchmark.Bits(32, 64) + bits, err := benchmark2.Bits(32, 64) require.NoError(b, err) - curves := benchmark.Curves(math.BN254, math.BLS12_381_BBS_GURVY, math2.BLS12_381_BBS_GURVY_FAST_RNG) - inputs, err := benchmark.NumInputs(1, 2, 3) + curves := benchmark2.Curves(math.BN254, math.BLS12_381_BBS_GURVY, math2.BLS12_381_BBS_GURVY_FAST_RNG) + inputs, err := benchmark2.NumInputs(1, 2, 3) require.NoError(b, err) - outputs, err := benchmark.NumOutputs(1, 2, 3) + outputs, err := benchmark2.NumOutputs(1, 2, 3) require.NoError(b, err) - testCases := benchmark.GenerateCases(bits, curves, inputs, outputs, []int{1}) + testCases := benchmark2.GenerateCases(bits, curves, inputs, outputs, []int{1}) for _, tc := range testCases { b.Run(tc.Name, func(b *testing.B) { @@ -98,28 +99,29 @@ func BenchmarkTransferProofGeneration(b *testing.B) { // TestParallelBenchmarkTransferProofGeneration benchmarks ZK proof generation for a transfer operation when multiple go routines are doing the same thing. func TestParallelBenchmarkTransferProofGeneration(t *testing.T) { - bits, err := benchmark.Bits(32) + bits, err := benchmark2.Bits(32) require.NoError(t, err) - curves := benchmark.Curves(math.BN254) - inputs, err := benchmark.NumInputs(2) + curves := benchmark2.Curves(math.BN254) + inputs, err := benchmark2.NumInputs(2) require.NoError(t, err) - outputs, err := benchmark.NumOutputs(2) + outputs, err := benchmark2.NumOutputs(2) require.NoError(t, err) - workers, err := benchmark.Workers(runtime.NumCPU()) + workers, err := benchmark2.Workers(runtime.NumCPU()) require.NoError(t, err) - testCases := benchmark.GenerateCases(bits, curves, inputs, outputs, workers) + testCases := benchmark2.GenerateCases(bits, curves, inputs, outputs, workers) for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - r := benchmark.RunBenchmark( - tc.BenchmarkCase.Workers, - benchmark.Duration(), + r := benchmark2.RunBenchmark( + benchmark2.NewConfig(tc.BenchmarkCase.Workers, + benchmark2.Duration(), + 3*time.Second), func() *benchmarkTransferEnv { env, err := newBenchmarkTransferEnv(1, tc.BenchmarkCase) require.NoError(t, err) return env }, - func(env *benchmarkTransferEnv) { + func(env *benchmarkTransferEnv) error { prover, err := transfer.NewProver( env.ProverEnvs[0].a, env.ProverEnvs[0].b, @@ -127,9 +129,11 @@ func TestParallelBenchmarkTransferProofGeneration(t *testing.T) { env.ProverEnvs[0].d, env.pp, ) - require.NoError(t, err) + if err != nil { + return err + } _, err = prover.Prove() - require.NoError(t, err) + return err }, ) r.Print() @@ -345,7 +349,7 @@ type benchmarkTransferEnv struct { pp *v1.PublicParams } -func newBenchmarkTransferEnv(n int, benchmarkCase *benchmark.Case) (*benchmarkTransferEnv, error) { +func newBenchmarkTransferEnv(n int, benchmarkCase *benchmark2.Case) (*benchmarkTransferEnv, error) { pp, err := setup(benchmarkCase.Bits, benchmarkCase.CurveID) if err != nil { return nil, err diff --git a/token/core/zkatdlog/nogh/v1/transfer_test.go b/token/core/zkatdlog/nogh/v1/transfer_test.go index 7316db68e4..9606019ab6 100644 --- a/token/core/zkatdlog/nogh/v1/transfer_test.go +++ b/token/core/zkatdlog/nogh/v1/transfer_test.go @@ -7,23 +7,21 @@ SPDX-License-Identifier: Apache-2.0 package v1_test import ( - "os" - "runtime" "strconv" "testing" math "github.com/IBM/mathlib" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/metrics/disabled" "github.com/hyperledger-labs/fabric-token-sdk/token/core/common" - "github.com/hyperledger-labs/fabric-token-sdk/token/core/common/benchmark" math2 "github.com/hyperledger-labs/fabric-token-sdk/token/core/common/crypto/math" v1 "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1" + "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/benchmark" driver2 "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/driver" "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/mock" - v1setup "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/setup" v1token "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/token" "github.com/hyperledger-labs/fabric-token-sdk/token/driver" mock2 "github.com/hyperledger-labs/fabric-token-sdk/token/driver/mock" + benchmark2 "github.com/hyperledger-labs/fabric-token-sdk/token/services/benchmark" "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity" "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity/idemix" "github.com/hyperledger-labs/fabric-token-sdk/token/services/logging" @@ -33,6 +31,8 @@ import ( "go.opentelemetry.io/otel/trace/noop" ) +var logger = logging.MustGetLogger() + func TestTransferService_VerifyTransfer(t *testing.T) { tests := []struct { name string @@ -61,19 +61,19 @@ func TestTransferService_VerifyTransfer(t *testing.T) { } } -func BenchmarkTransfer(b *testing.B) { - bits, err := benchmark.Bits(32, 64) +func BenchmarkTransferServiceTransfer(b *testing.B) { + bits, err := benchmark2.Bits(32, 64) require.NoError(b, err) - curves := benchmark.Curves(math.BN254, math.BLS12_381_BBS_GURVY, math2.BLS12_381_BBS_GURVY_FAST_RNG) - inputs, err := benchmark.NumInputs(1, 2, 3) + curves := benchmark2.Curves(math.BN254, math.BLS12_381_BBS_GURVY, math2.BLS12_381_BBS_GURVY_FAST_RNG) + inputs, err := benchmark2.NumInputs(1, 2, 3) require.NoError(b, err) - outputs, err := benchmark.NumOutputs(1, 2, 3) + outputs, err := benchmark2.NumOutputs(1, 2, 3) require.NoError(b, err) - testCases := benchmark.GenerateCases(bits, curves, inputs, outputs, []int{1}) + testCases := benchmark2.GenerateCases(bits, curves, inputs, outputs, []int{1}) for _, tc := range testCases { b.Run(tc.Name, func(b *testing.B) { - env, err := newBenchmarkTransferEnv(b.N, tc.BenchmarkCase) + env, err := newBenchmarkTransferEnv(b.N, tc.BenchmarkCase, nil) require.NoError(b, err) // Optional: Reset timer if you had expensive setup code above @@ -97,47 +97,32 @@ func BenchmarkTransfer(b *testing.B) { } } -func TestBenchmarkTransferParallel(t *testing.T) { - bits, err := benchmark.Bits(32) - require.NoError(t, err) - curves := benchmark.Curves(math.BN254) - inputs, err := benchmark.NumInputs(2) - require.NoError(t, err) - outputs, err := benchmark.NumOutputs(2) +func TestParallelBenchmarkTransferServiceTransfer(t *testing.T) { + bits, curves, cases := benchmark2.GenerateCasesWithDefaults(t) + configurations, err := benchmark.NewSetupConfigurations("./testdata", bits, curves) require.NoError(t, err) - workers, err := benchmark.Workers(runtime.NumCPU()) - require.NoError(t, err) - testCases := benchmark.GenerateCases(bits, curves, inputs, outputs, workers) - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - r := benchmark.RunBenchmark( - tc.BenchmarkCase.Workers, - benchmark.Duration(), - func() *benchmarkTransferEnv { - env, err := newBenchmarkTransferEnv(1, tc.BenchmarkCase) - require.NoError(t, err) - return env - }, - func(env *benchmarkTransferEnv) { - action, _, err := env.Envs[0].ts.Transfer( - t.Context(), - "an_anchor", - nil, - env.Envs[0].ids, - env.Envs[0].outputs, - nil, - ) - require.NoError(t, err) - assert.NotNil(t, action) - raw, err := action.Serialize() - require.NoError(t, err) - require.NotEmpty(t, raw) - }, + test := benchmark2.NewTest[*benchmarkTransferEnv](cases) + test.RunBenchmark(t, + func(c *benchmark2.Case) (*benchmarkTransferEnv, error) { + return newBenchmarkTransferEnv(1, c, configurations) + }, + func(env *benchmarkTransferEnv) error { + action, _, err := env.Envs[0].ts.Transfer( + t.Context(), + "an_anchor", + nil, + env.Envs[0].ids, + env.Envs[0].outputs, + nil, ) - r.Print() - }) - } + if err != nil { + return err + } + _, err = action.Serialize() + return err + }, + ) } type transferEnv struct { @@ -146,31 +131,11 @@ type transferEnv struct { ids []*token.ID } -func newTransferEnv(benchmarkCase *benchmark.Case) (*transferEnv, error) { - logger := logging.MustGetLogger() - - var ipk []byte - var err error - switch benchmarkCase.CurveID { - case math.BN254: - ipk, err = os.ReadFile("./validator/testdata/bn254/idemix/msp/IssuerPublicKey") - if err != nil { - return nil, err - } - case math.BLS12_381_BBS_GURVY: - fallthrough - case math2.BLS12_381_BBS_GURVY_FAST_RNG: - ipk, err = os.ReadFile("./validator/testdata/bls12_381_bbs/idemix/msp/IssuerPublicKey") - if err != nil { - return nil, err - } - } - - pp, err := v1setup.Setup(benchmarkCase.Bits, ipk, benchmarkCase.CurveID) +func newTransferEnv(benchmarkCase *benchmark2.Case, configurations *benchmark.SetupConfigurations) (*transferEnv, error) { + pp, err := configurations.GetPublicParams(benchmarkCase.Bits, benchmarkCase.CurveID) if err != nil { return nil, err } - pp.AddIssuer([]byte("an_issuer")) ppm, err := common.NewPublicParamsManagerFromParams(pp) if err != nil { return nil, err @@ -268,10 +233,10 @@ type benchmarkTransferEnv struct { Envs []*transferEnv } -func newBenchmarkTransferEnv(n int, benchmarkCase *benchmark.Case) (*benchmarkTransferEnv, error) { +func newBenchmarkTransferEnv(n int, benchmarkCase *benchmark2.Case, configurations *benchmark.SetupConfigurations) (*benchmarkTransferEnv, error) { envs := make([]*transferEnv, n) for i := 0; i < n; i++ { - env, err := newTransferEnv(benchmarkCase) + env, err := newTransferEnv(benchmarkCase, configurations) if err != nil { return nil, err } diff --git a/token/core/zkatdlog/nogh/v1/validator/ledger.go b/token/core/zkatdlog/nogh/v1/validator/ledger.go deleted file mode 100644 index c23fbcb4b6..0000000000 --- a/token/core/zkatdlog/nogh/v1/validator/ledger.go +++ /dev/null @@ -1,14 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ -package validator - -import "github.com/hyperledger-labs/fabric-token-sdk/token/token" - -//go:generate counterfeiter -o mock/ledger.go -fake-name Ledger . Ledger - -type Ledger interface { - GetState(id token.ID) ([]byte, error) -} diff --git a/token/core/zkatdlog/nogh/v1/validator/mock/ledger.go b/token/core/zkatdlog/nogh/v1/validator/mock/ledger.go deleted file mode 100644 index e150c34fe4..0000000000 --- a/token/core/zkatdlog/nogh/v1/validator/mock/ledger.go +++ /dev/null @@ -1,117 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package mock - -import ( - "sync" - - "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/validator" - "github.com/hyperledger-labs/fabric-token-sdk/token/token" -) - -type Ledger struct { - GetStateStub func(token.ID) ([]byte, error) - getStateMutex sync.RWMutex - getStateArgsForCall []struct { - arg1 token.ID - } - getStateReturns struct { - result1 []byte - result2 error - } - getStateReturnsOnCall map[int]struct { - result1 []byte - result2 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *Ledger) GetState(arg1 token.ID) ([]byte, error) { - fake.getStateMutex.Lock() - ret, specificReturn := fake.getStateReturnsOnCall[len(fake.getStateArgsForCall)] - fake.getStateArgsForCall = append(fake.getStateArgsForCall, struct { - arg1 token.ID - }{arg1}) - stub := fake.GetStateStub - fakeReturns := fake.getStateReturns - fake.recordInvocation("GetState", []interface{}{arg1}) - fake.getStateMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *Ledger) GetStateCallCount() int { - fake.getStateMutex.RLock() - defer fake.getStateMutex.RUnlock() - return len(fake.getStateArgsForCall) -} - -func (fake *Ledger) GetStateCalls(stub func(token.ID) ([]byte, error)) { - fake.getStateMutex.Lock() - defer fake.getStateMutex.Unlock() - fake.GetStateStub = stub -} - -func (fake *Ledger) GetStateArgsForCall(i int) token.ID { - fake.getStateMutex.RLock() - defer fake.getStateMutex.RUnlock() - argsForCall := fake.getStateArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *Ledger) GetStateReturns(result1 []byte, result2 error) { - fake.getStateMutex.Lock() - defer fake.getStateMutex.Unlock() - fake.GetStateStub = nil - fake.getStateReturns = struct { - result1 []byte - result2 error - }{result1, result2} -} - -func (fake *Ledger) GetStateReturnsOnCall(i int, result1 []byte, result2 error) { - fake.getStateMutex.Lock() - defer fake.getStateMutex.Unlock() - fake.GetStateStub = nil - if fake.getStateReturnsOnCall == nil { - fake.getStateReturnsOnCall = make(map[int]struct { - result1 []byte - result2 error - }) - } - fake.getStateReturnsOnCall[i] = struct { - result1 []byte - result2 error - }{result1, result2} -} - -func (fake *Ledger) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.getStateMutex.RLock() - defer fake.getStateMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *Ledger) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ validator.Ledger = new(Ledger) diff --git a/token/core/zkatdlog/nogh/v1/validator/validator_suite_test.go b/token/core/zkatdlog/nogh/v1/validator/validator_suite_test.go deleted file mode 100644 index 9c95e72a8d..0000000000 --- a/token/core/zkatdlog/nogh/v1/validator/validator_suite_test.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ -package validator_test - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestEngine(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Validator Suite") -} diff --git a/token/core/zkatdlog/nogh/v1/validator/validator_test.go b/token/core/zkatdlog/nogh/v1/validator/validator_test.go index ed5f885ba6..6bf1b1bcf2 100644 --- a/token/core/zkatdlog/nogh/v1/validator/validator_test.go +++ b/token/core/zkatdlog/nogh/v1/validator/validator_test.go @@ -9,392 +9,345 @@ package validator_test import ( "bytes" "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/asn1" - "encoding/pem" - "fmt" - "math/big" - "os" - - "github.com/IBM/idemix/bccsp/types" + "testing" + math "github.com/IBM/mathlib" "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/errors" "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/audit" + "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/benchmark" zkatdlog "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/driver" issue2 "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/issue" v1 "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/setup" tokn "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/token" "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/transfer" enginedlog "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/validator" - "github.com/hyperledger-labs/fabric-token-sdk/token/core/zkatdlog/nogh/v1/validator/mock" "github.com/hyperledger-labs/fabric-token-sdk/token/driver" + benchmark2 "github.com/hyperledger-labs/fabric-token-sdk/token/services/benchmark" "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity" "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity/deserializer" idemix2 "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity/idemix" "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity/idemix/crypto" - "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity/storage/kvs" ix509 "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity/x509" "github.com/hyperledger-labs/fabric-token-sdk/token/services/logging" utils2 "github.com/hyperledger-labs/fabric-token-sdk/token/services/utils" "github.com/hyperledger-labs/fabric-token-sdk/token/services/utils/slices" token2 "github.com/hyperledger-labs/fabric-token-sdk/token/token" - "github.com/hyperledger/fabric-lib-go/bccsp/utils" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" + "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/trace/noop" ) -var fakeLedger *mock.Ledger +var ( + testUseCase = &benchmark2.Case{ + Bits: 32, + CurveID: math.BLS12_381_BBS_GURVY, + NumInputs: 2, + NumOutputs: 2, + } +) + +func TestValidator(t *testing.T) { + t.Run("Validator is called correctly with a non-anonymous issue action", func(t *testing.T) { + configurations, err := benchmark.NewSetupConfigurations("./../testdata", []uint64{testUseCase.Bits}, []math.CurveID{testUseCase.CurveID}) + require.NoError(t, err) + env, err := newEnv(testUseCase, configurations) + require.NoError(t, err) + + raw, err := env.ir.Bytes() + require.NoError(t, err) + actions, _, err := env.engine.VerifyTokenRequestFromRaw(t.Context(), nil, "1", raw) + require.NoError(t, err) + require.Len(t, actions, 1) + }) + t.Run("validator is called correctly with a transfer action", func(t *testing.T) { + configurations, err := benchmark.NewSetupConfigurations("./../testdata", []uint64{testUseCase.Bits}, []math.CurveID{testUseCase.CurveID}) + require.NoError(t, err) + env, err := newEnv(testUseCase, configurations) + require.NoError(t, err) + + actions, _, err := env.engine.VerifyTokenRequestFromRaw(t.Context(), nil, "1", env.transferRaw) + require.NoError(t, err) + require.Len(t, actions, 1) + }) + t.Run("validator is called correctly with a redeem action", func(t *testing.T) { + configurations, err := benchmark.NewSetupConfigurations("./../testdata", []uint64{testUseCase.Bits}, []math.CurveID{testUseCase.CurveID}) + require.NoError(t, err) + env, err := newEnv(testUseCase, configurations) + require.NoError(t, err) + + raw, err := env.rr.Bytes() + require.NoError(t, err) + + actions, _, err := env.engine.VerifyTokenRequestFromRaw(t.Context(), nil, "1", raw) + require.NoError(t, err) + require.Len(t, actions, 1) + }) + t.Run("engine is called correctly with atomic swap", func(t *testing.T) { + configurations, err := benchmark.NewSetupConfigurations("./../testdata", []uint64{testUseCase.Bits}, []math.CurveID{testUseCase.CurveID}) + require.NoError(t, err) + env, err := newEnv(testUseCase, configurations) + require.NoError(t, err) + + raw, err := env.ar.Bytes() + require.NoError(t, err) + + actions, _, err := env.engine.VerifyTokenRequestFromRaw(t.Context(), nil, "2", raw) + require.NoError(t, err) + require.Len(t, actions, 1) + }) + t.Run("when the sender's signature is not valid: wrong txID", func(t *testing.T) { + configurations, err := benchmark.NewSetupConfigurations("./../testdata", []uint64{testUseCase.Bits}, []math.CurveID{testUseCase.CurveID}) + require.NoError(t, err) + env, err := newEnv(testUseCase, configurations) + require.NoError(t, err) + + request := &driver.TokenRequest{Issues: env.ar.Issues, Transfers: env.ar.Transfers} + raw, err := request.MarshalToMessageToSign([]byte("3")) + require.NoError(t, err) + + signatures, err := env.sender.SignTokenActions(raw) + require.NoError(t, err) + env.ar.Signatures[1] = signatures[0] + + raw, err = env.ar.Bytes() + require.NoError(t, err) + + _, _, err = env.engine.VerifyTokenRequestFromRaw(t.Context(), nil, "2", raw) + require.Error(t, err) + require.ErrorContains(t, err, "failed signature verification") + }) +} + +func TestParallelBenchmarkValidatorTransfer(t *testing.T) { + bits, curves, cases := benchmark2.GenerateCasesWithDefaults(t) + configurations, err := benchmark.NewSetupConfigurations("./../testdata", bits, curves) + require.NoError(t, err) + + test := benchmark2.NewTest[*env](cases) + test.RunBenchmark(t, + func(c *benchmark2.Case) (*env, error) { + return newEnv(c, configurations) + }, + func(env *env) error { + _, _, err := env.engine.VerifyTokenRequestFromRaw(t.Context(), nil, "1", env.transferRaw) + return err + }, + ) +} -var _ = Describe("validator", func() { +type env struct { + ir *driver.TokenRequest + engine *enginedlog.Validator + inputsForTransfer []*tokn.Token + tr *driver.TokenRequest + inputsForRedeem []*tokn.Token + rr *driver.TokenRequest + ar *driver.TokenRequest + sender *transfer.Sender + transferRaw []byte +} + +func newEnv(benchCase *benchmark2.Case, configurations *benchmark.SetupConfigurations) (*env, error) { var ( engine *enginedlog.Validator - pp *v1.PublicParams inputsForRedeem []*tokn.Token inputsForTransfer []*tokn.Token sender *transfer.Sender auditor *audit.Auditor - ipk []byte ir *driver.TokenRequest // regular issue request rr *driver.TokenRequest // redeem request tr *driver.TokenRequest // transfer request ar *driver.TokenRequest // atomic action request ) - BeforeEach(func() { - fakeLedger = &mock.Ledger{} - var err error - // prepare public parameters - ipk, err = os.ReadFile("./testdata/bls12_381_bbs/idemix/msp/IssuerPublicKey") - Expect(err).NotTo(HaveOccurred()) - pp, err = v1.Setup(32, ipk, math.BLS12_381_BBS_GURVY) - Expect(err).NotTo(HaveOccurred()) - - c := math.Curves[pp.Curve] - - asigner, _ := prepareECDSASigner() - idemixDes, err := idemix2.NewDeserializer(slices.GetUnique(pp.IdemixIssuerPublicKeys).PublicKey, math.BLS12_381_BBS_GURVY) - Expect(err).NotTo(HaveOccurred()) - des := deserializer.NewTypedVerifierDeserializerMultiplex() - des.AddTypedVerifierDeserializer(idemix2.IdentityType, deserializer.NewTypedIdentityVerifierDeserializer(idemixDes, idemixDes)) - des.AddTypedVerifierDeserializer(ix509.IdentityType, deserializer.NewTypedIdentityVerifierDeserializer(&Deserializer{}, &Deserializer{})) - auditor = audit.NewAuditor(logging.MustGetLogger(), &noop.Tracer{}, des, pp.PedersenGenerators, asigner, c) - araw, err := asigner.Serialize() - Expect(err).NotTo(HaveOccurred()) - pp.SetAuditors([]driver.Identity{araw}) - - // initialize enginw with pp - deserializer, err := zkatdlog.NewDeserializer(pp) - Expect(err).NotTo(HaveOccurred()) - engine = enginedlog.New( - logging.MustGetLogger(), - pp, - deserializer, - nil, - nil, - nil, - ) - - // non-anonymous issue - _, ir, _ = prepareNonAnonymousIssueRequest(pp, auditor) - Expect(ir).NotTo(BeNil()) - - // prepare redeem - sender, rr, _, inputsForRedeem = prepareRedeemRequest(pp, auditor) - Expect(sender).NotTo(BeNil()) - - // prepare transfer - var trmetadata *driver.TokenRequestMetadata - sender, tr, trmetadata, inputsForTransfer = prepareTransferRequest(pp, auditor) - Expect(sender).NotTo(BeNil()) - Expect(trmetadata).NotTo(BeNil()) - - // atomic action request - ar = &driver.TokenRequest{Transfers: tr.Transfers} - raw, err := ar.MarshalToMessageToSign([]byte("2")) - Expect(err).NotTo(HaveOccurred()) - - // sender signs request - signatures, err := sender.SignTokenActions(raw) - Expect(err).NotTo(HaveOccurred()) - - // auditor inspect token - metadata := &driver.TokenRequestMetadata{} - metadata.Transfers = []*driver.TransferMetadata{trmetadata.Transfers[0]} - - tokns := make([][]*tokn.Token, 1) - for i := range 2 { - tokns[0] = append(tokns[0], inputsForTransfer[i]) - } - err = auditor.Check(context.Background(), ar, metadata, tokns, "2") - Expect(err).NotTo(HaveOccurred()) - sigma, err := auditor.Endorse(ar, "2") - Expect(err).NotTo(HaveOccurred()) - ar.AuditorSignatures = append(ar.AuditorSignatures, &driver.AuditorSignature{ - Identity: araw, - Signature: sigma, - }) - ar.Signatures = append(ar.Signatures, signatures...) - }) - Describe("Verify Token Requests", func() { - Context("Validator is called correctly with a non-anonymous issue action", func() { - var ( - err error - raw []byte - ) - BeforeEach(func() { - raw, err = ir.Bytes() - Expect(err).NotTo(HaveOccurred()) - }) - It("succeeds", func() { - actions, _, err := engine.VerifyTokenRequestFromRaw(context.TODO(), fakeLedger.GetStateStub, "1", raw) - Expect(err).NotTo(HaveOccurred()) - Expect(actions).To(HaveLen(1)) - }) - }) + // prepare public parameters + setupConfiguration, err := configurations.GetSetupConfiguration(benchCase.Bits, benchCase.CurveID) + if err != nil { + return nil, err + } + pp := setupConfiguration.PP + oID := setupConfiguration.OwnerIdentity - Context("validator is called correctly with a transfer action", func() { - var ( - err error - raw []byte - ) - BeforeEach(func() { - raw, err = inputsForTransfer[0].Serialize() - Expect(err).NotTo(HaveOccurred()) - fakeLedger.GetStateReturnsOnCall(0, raw, nil) - - raw, err = inputsForTransfer[1].Serialize() - Expect(err).NotTo(HaveOccurred()) - fakeLedger.GetStateReturnsOnCall(1, raw, nil) - - raw, err = inputsForTransfer[0].Serialize() - Expect(err).NotTo(HaveOccurred()) - fakeLedger.GetStateReturnsOnCall(2, raw, nil) - - raw, err = inputsForTransfer[1].Serialize() - Expect(err).NotTo(HaveOccurred()) - fakeLedger.GetStateReturnsOnCall(3, raw, nil) - - fakeLedger.GetStateReturnsOnCall(4, nil, nil) - fakeLedger.GetStateReturnsOnCall(5, nil, nil) - - raw, err = tr.Bytes() - Expect(err).NotTo(HaveOccurred()) - }) - It("succeeds", func() { - actions, _, err := engine.VerifyTokenRequestFromRaw(context.TODO(), getState, "1", raw) - Expect(err).NotTo(HaveOccurred()) - Expect(actions).To(HaveLen(1)) - }) - }) - Context("validator is called correctly with a redeem action", func() { - var ( - err error - raw []byte - ) - BeforeEach(func() { - - raw, err = inputsForRedeem[0].Serialize() - Expect(err).NotTo(HaveOccurred()) - fakeLedger.GetStateReturnsOnCall(0, raw, nil) - - raw, err = inputsForRedeem[1].Serialize() - Expect(err).NotTo(HaveOccurred()) - fakeLedger.GetStateReturnsOnCall(1, raw, nil) - - raw, err = inputsForRedeem[0].Serialize() - Expect(err).NotTo(HaveOccurred()) - fakeLedger.GetStateReturnsOnCall(2, raw, nil) - - raw, err = inputsForRedeem[1].Serialize() - Expect(err).NotTo(HaveOccurred()) - fakeLedger.GetStateReturnsOnCall(3, raw, nil) - - fakeLedger.GetStateReturnsOnCall(4, nil, nil) - - raw, err = rr.Bytes() - Expect(err).NotTo(HaveOccurred()) - - }) - It("succeeds", func() { - actions, _, err := engine.VerifyTokenRequestFromRaw(context.TODO(), getState, "1", raw) - Expect(err).NotTo(HaveOccurred()) - Expect(actions).To(HaveLen(1)) - }) - }) - Context("enginve is called correctly with atomic swap", func() { - var ( - err error - raw []byte - ) - BeforeEach(func() { - raw, err = inputsForTransfer[0].Serialize() - Expect(err).NotTo(HaveOccurred()) - fakeLedger.GetStateReturnsOnCall(0, raw, nil) - - raw, err = inputsForTransfer[1].Serialize() - Expect(err).NotTo(HaveOccurred()) - fakeLedger.GetStateReturnsOnCall(1, raw, nil) - - fakeLedger.GetStateReturnsOnCall(2, nil, nil) - - raw, err = inputsForTransfer[0].Serialize() - Expect(err).NotTo(HaveOccurred()) - fakeLedger.GetStateReturnsOnCall(3, raw, nil) - - raw, err = inputsForTransfer[1].Serialize() - Expect(err).NotTo(HaveOccurred()) - fakeLedger.GetStateReturnsOnCall(4, raw, nil) - - fakeLedger.GetStateReturnsOnCall(5, nil, nil) - fakeLedger.GetStateReturnsOnCall(6, nil, nil) - - raw, err = ar.Bytes() - Expect(err).NotTo(HaveOccurred()) - - }) - It("succeeds", func() { - actions, _, err := engine.VerifyTokenRequestFromRaw(context.TODO(), getState, "2", raw) - Expect(err).NotTo(HaveOccurred()) - Expect(actions).To(HaveLen(1)) - }) - - Context("when the sender's signature is not valid: wrong txID", func() { - BeforeEach(func() { - request := &driver.TokenRequest{Issues: ar.Issues, Transfers: ar.Transfers} - raw, err = request.MarshalToMessageToSign([]byte("3")) - Expect(err).NotTo(HaveOccurred()) - - signatures, err := sender.SignTokenActions(raw) - Expect(err).NotTo(HaveOccurred()) - ar.Signatures[1] = signatures[0] - - raw, err = ar.Bytes() - Expect(err).NotTo(HaveOccurred()) - - }) - It("fails", func() { - _, _, err := engine.VerifyTokenRequestFromRaw(context.TODO(), getState, "2", raw) - Expect(err.Error()).To(ContainSubstring("failed signature verification")) - - }) - }) - }) + c := math.Curves[pp.Curve] + + idemixDes, err := idemix2.NewDeserializer(slices.GetUnique(pp.IdemixIssuerPublicKeys).PublicKey, benchCase.CurveID) + if err != nil { + return nil, err + } + multiplexer := deserializer.NewTypedVerifierDeserializerMultiplex() + multiplexer.AddTypedVerifierDeserializer(idemix2.IdentityType, deserializer.NewTypedIdentityVerifierDeserializer(idemixDes, idemixDes)) + multiplexer.AddTypedVerifierDeserializer(ix509.IdentityType, deserializer.NewTypedIdentityVerifierDeserializer(&Deserializer{}, &Deserializer{})) + auditor = audit.NewAuditor( + logging.MustGetLogger(), + &noop.Tracer{}, + multiplexer, + pp.PedersenGenerators, + setupConfiguration.AuditorSigner, + c, + ) + + // initialize enginw with pp + des, err := zkatdlog.NewDeserializer(pp) + if err != nil { + return nil, err + } + engine = enginedlog.New( + logging.MustGetLogger(), + pp, + des, + nil, + nil, + nil, + ) + + // non-anonymous issue + _, ir, _, err = prepareNonAnonymousIssueRequest(pp, auditor, setupConfiguration) + if err != nil { + return nil, err + } + + // prepare redeem + _, rr, _, inputsForRedeem, err = prepareRedeemRequest(pp, auditor, setupConfiguration) + if err != nil { + return nil, err + } + + // prepare transfer + var trmetadata *driver.TokenRequestMetadata + sender, tr, trmetadata, inputsForTransfer, err = prepareTransferRequest(pp, auditor, oID) + if err != nil { + return nil, err + } + transferRaw, err := tr.Bytes() + if err != nil { + return nil, err + } + + // atomic action request + ar = &driver.TokenRequest{Transfers: tr.Transfers} + raw, err := ar.MarshalToMessageToSign([]byte("2")) + if err != nil { + return nil, err + } + + // sender signs request + signatures, err := sender.SignTokenActions(raw) + if err != nil { + return nil, err + } + + // auditor inspect token + metadata := &driver.TokenRequestMetadata{} + metadata.Transfers = []*driver.TransferMetadata{trmetadata.Transfers[0]} + + tokns := make([][]*tokn.Token, 1) + for i := range 2 { + tokns[0] = append(tokns[0], inputsForTransfer[i]) + } + err = auditor.Check(context.Background(), ar, metadata, tokns, "2") + if err != nil { + return nil, err + } + sigma, err := auditor.Endorse(ar, "2") + if err != nil { + return nil, err + } + ar.AuditorSignatures = append(ar.AuditorSignatures, &driver.AuditorSignature{ + Identity: pp.Auditors()[0], + Signature: sigma, }) -}) -func prepareECDSASigner() (*Signer, *Verifier) { - signer, err := NewECDSASigner() - Expect(err).NotTo(HaveOccurred()) - return signer, signer.Verifier + ar.Signatures = append(ar.Signatures, signatures...) + + return &env{ + ir: ir, + tr: tr, + engine: engine, + inputsForTransfer: inputsForTransfer, + inputsForRedeem: inputsForRedeem, + rr: rr, + ar: ar, + sender: sender, + transferRaw: transferRaw, + }, nil } -func prepareNonAnonymousIssueRequest(pp *v1.PublicParams, auditor *audit.Auditor) (*issue2.Issuer, *driver.TokenRequest, *driver.TokenRequestMetadata) { - signer, err := NewECDSASigner() - Expect(err).NotTo(HaveOccurred()) - - issuer := issue2.NewIssuer("ABC", signer, pp) - issuerIdentity, err := signer.Serialize() - Expect(err).NotTo(HaveOccurred()) - ir, metadata := prepareIssue(auditor, issuer, issuerIdentity) +func prepareNonAnonymousIssueRequest(pp *v1.PublicParams, auditor *audit.Auditor, setupConfiguration *benchmark.SetupConfiguration) (*issue2.Issuer, *driver.TokenRequest, *driver.TokenRequestMetadata, error) { + issuer := issue2.NewIssuer("ABC", setupConfiguration.IssuerSigner, pp) + issuerIdentity, err := setupConfiguration.IssuerSigner.Serialize() + if err != nil { + return nil, nil, nil, err + } + ir, metadata, err := prepareIssue(auditor, issuer, issuerIdentity, setupConfiguration.OwnerIdentity) + if err != nil { + return nil, nil, nil, err + } - return issuer, ir, metadata + return issuer, ir, metadata, nil } -func prepareRedeemRequest(pp *v1.PublicParams, auditor *audit.Auditor) (*transfer.Sender, *driver.TokenRequest, *driver.TokenRequestMetadata, []*tokn.Token) { - id, auditInfo, signer := getIdemixInfo("./testdata/bls12_381_bbs/idemix") +func prepareRedeemRequest(pp *v1.PublicParams, auditor *audit.Auditor, setupConfig *benchmark.SetupConfiguration) (*transfer.Sender, *driver.TokenRequest, *driver.TokenRequestMetadata, []*tokn.Token, error) { owners := make([][]byte, 2) - owners[0] = id - - issuerSigner, err := NewECDSASigner() - Expect(err).NotTo(HaveOccurred()) + owners[0] = setupConfig.OwnerIdentity.ID - issuer := issue2.NewIssuer("ABC", issuerSigner, pp) - issuerIdentity, err := issuerSigner.Serialize() - Expect(err).NotTo(HaveOccurred()) + issuer := issue2.NewIssuer("ABC", setupConfig.IssuerSigner, pp) + issuerIdentity, err := setupConfig.IssuerSigner.Serialize() + if err != nil { + return nil, nil, nil, nil, err + } - return prepareTransfer(pp, signer, auditor, auditInfo, id, owners, issuer, issuerIdentity) + return prepareTransfer( + pp, + setupConfig.OwnerIdentity.Signer, + auditor, + setupConfig.OwnerIdentity.AuditInfo, + setupConfig.OwnerIdentity.ID, + owners, + issuer, + issuerIdentity, + ) } -func prepareTransferRequest(pp *v1.PublicParams, auditor *audit.Auditor) (*transfer.Sender, *driver.TokenRequest, *driver.TokenRequestMetadata, []*tokn.Token) { - id, auditInfo, signer := getIdemixInfo("./testdata/bls12_381_bbs/idemix") +func prepareTransferRequest(pp *v1.PublicParams, auditor *audit.Auditor, oID *benchmark.OwnerIdentity) (*transfer.Sender, *driver.TokenRequest, *driver.TokenRequestMetadata, []*tokn.Token, error) { owners := make([][]byte, 2) - owners[0] = id - owners[1] = id + owners[0] = oID.ID + owners[1] = oID.ID - return prepareTransfer(pp, signer, auditor, auditInfo, id, owners, nil, nil) + return prepareTransfer(pp, oID.Signer, auditor, oID.AuditInfo, oID.ID, owners, nil, nil) } -func prepareTokens(values, bf []*math.Zr, ttype string, pp []*math.G1, curve *math.Curve) []*math.G1 { +func prepareTokens(values, bf []*math.Zr, tokenType string, pp []*math.G1, curve *math.Curve) []*math.G1 { tokens := make([]*math.G1, len(values)) for i := range values { - tokens[i] = prepareToken(values[i], bf[i], ttype, pp, curve) + tokens[i] = prepareToken(values[i], bf[i], tokenType, pp, curve) } return tokens } -func prepareToken(value *math.Zr, rand *math.Zr, ttype string, pp []*math.G1, curve *math.Curve) *math.G1 { +func prepareToken(value *math.Zr, rand *math.Zr, tokenType string, pp []*math.G1, curve *math.Curve) *math.G1 { token := curve.NewG1() - token.Add(pp[0].Mul(curve.HashToZr([]byte(ttype)))) + token.Add(pp[0].Mul(curve.HashToZr([]byte(tokenType)))) token.Add(pp[1].Mul(value)) token.Add(pp[2].Mul(rand)) return token } -func getIdemixInfo(dir string) (driver.Identity, *crypto.AuditInfo, driver.SigningIdentity) { - backend, err := kvs.NewInMemory() - Expect(err).NotTo(HaveOccurred()) - config, err := crypto.NewConfig(dir) - Expect(err).NotTo(HaveOccurred()) - curveID := math.BLS12_381_BBS_GURVY - keyStore, err := crypto.NewKeyStore(curveID, kvs.Keystore(backend)) - Expect(err).NotTo(HaveOccurred()) - cryptoProvider, err := crypto.NewBCCSP(keyStore, curveID) - Expect(err).NotTo(HaveOccurred()) - p, err := idemix2.NewKeyManager(config, types.EidNymRhNym, cryptoProvider) - Expect(err).NotTo(HaveOccurred()) - Expect(p).NotTo(BeNil()) - - identityDescriptor, err := p.Identity(context.Background(), nil) - Expect(err).NotTo(HaveOccurred()) - id := identityDescriptor.Identity - audit := identityDescriptor.AuditInfo - Expect(id).NotTo(BeNil()) - Expect(audit).NotTo(BeNil()) - - auditInfo, err := p.DeserializeAuditInfo(context.Background(), audit) - Expect(err).NotTo(HaveOccurred()) - err = auditInfo.Match(context.Background(), id) - Expect(err).NotTo(HaveOccurred()) - - signer, err := p.DeserializeSigningIdentity(context.Background(), id) - Expect(err).NotTo(HaveOccurred()) - - id, err = identity.WrapWithType(idemix2.IdentityType, id) - Expect(err).NotTo(HaveOccurred()) - - return id, auditInfo, signer -} - -func prepareIssue(auditor *audit.Auditor, issuer *issue2.Issuer, issuerIdentity []byte) (*driver.TokenRequest, *driver.TokenRequestMetadata) { - id, auditInfo, _ := getIdemixInfo("./testdata/bls12_381_bbs/idemix") +func prepareIssue(auditor *audit.Auditor, issuer *issue2.Issuer, issuerIdentity []byte, oID *benchmark.OwnerIdentity) (*driver.TokenRequest, *driver.TokenRequestMetadata, error) { owners := make([][]byte, 1) - owners[0] = id + owners[0] = oID.ID values := []uint64{40} issue, inf, err := issuer.GenerateZKIssue(values, owners) - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, err + } - auditInfoRaw, err := auditInfo.Bytes() - Expect(err).NotTo(HaveOccurred()) + auditInfoRaw, err := oID.AuditInfo.Bytes() + if err != nil { + return nil, nil, err + } metadata := &driver.IssueMetadata{ Issuer: driver.AuditableIdentity{ Identity: issuerIdentity, @@ -403,7 +356,9 @@ func prepareIssue(auditor *audit.Auditor, issuer *issue2.Issuer, issuerIdentity } for i := range len(issue.Outputs) { marshalledinf, err := inf[i].Serialize() - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, err + } metadata.Outputs = append(metadata.Outputs, &driver.IssueOutputMetadata{ OutputMetadata: marshalledinf, Receivers: []*driver.AuditableIdentity{ @@ -417,33 +372,45 @@ func prepareIssue(auditor *audit.Auditor, issuer *issue2.Issuer, issuerIdentity // serialize token action raw, err := issue.Serialize() - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, err + } // sign token request ir := &driver.TokenRequest{Issues: [][]byte{raw}} raw, err = ir.MarshalToMessageToSign([]byte("1")) - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, err + } sig, err := issuer.SignTokenActions(raw) - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, err + } ir.Signatures = append(ir.Signatures, sig) issueMetadata := &driver.TokenRequestMetadata{Issues: []*driver.IssueMetadata{metadata}} err = auditor.Check(context.Background(), ir, issueMetadata, nil, "1") - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, err + } sigma, err := auditor.Endorse(ir, "1") - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, err + } araw, err := auditor.Signer.Serialize() - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, err + } ir.AuditorSignatures = append(ir.AuditorSignatures, &driver.AuditorSignature{ Identity: araw, Signature: sigma, }) - return ir, issueMetadata + return ir, issueMetadata, nil } -func prepareTransfer(pp *v1.PublicParams, signer driver.SigningIdentity, auditor *audit.Auditor, auditInfo *crypto.AuditInfo, id []byte, owners [][]byte, issuer *issue2.Issuer, issuerIdentity []byte) (*transfer.Sender, *driver.TokenRequest, *driver.TokenRequestMetadata, []*tokn.Token) { +func prepareTransfer(pp *v1.PublicParams, signer driver.SigningIdentity, auditor *audit.Auditor, auditInfo *crypto.AuditInfo, id []byte, owners [][]byte, issuer *issue2.Issuer, issuerIdentity []byte) (*transfer.Sender, *driver.TokenRequest, *driver.TokenRequestMetadata, []*tokn.Token, error) { signers := make([]driver.Signer, 2) signers[0] = signer signers[1] = signer @@ -455,7 +422,9 @@ func prepareTransfer(pp *v1.PublicParams, signer driver.SigningIdentity, auditor inBF := make([]*math.Zr, 2) rand, err := c.Rand() - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, nil, nil, err + } for i := range 2 { inBF[i] = c.NewRandomZr(rand) } @@ -476,29 +445,41 @@ func prepareTransfer(pp *v1.PublicParams, signer driver.SigningIdentity, auditor inputInf[0] = &tokn.Metadata{Type: "ABC", Value: invalues[0], BlindingFactor: inBF[0]} inputInf[1] = &tokn.Metadata{Type: "ABC", Value: invalues[1], BlindingFactor: inBF[1]} sender, err := transfer.NewSender(signers, tokens, ids, inputInf, pp) - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, nil, nil, err + } - transfer2, metas, err := sender.GenerateZKTransfer(context.TODO(), outvalues, owners) - Expect(err).NotTo(HaveOccurred()) + transfer2, metas, err := sender.GenerateZKTransfer(context.Background(), outvalues, owners) + if err != nil { + return nil, nil, nil, nil, err + } if issuerIdentity != nil { - transfer2.Issuer = driver.Identity(issuerIdentity) + transfer2.Issuer = issuerIdentity } transferRaw, err := transfer2.Serialize() - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, nil, nil, err + } tr := &driver.TokenRequest{Transfers: [][]byte{transferRaw}} raw, err := tr.MarshalToMessageToSign([]byte("1")) - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, nil, nil, err + } marshalledInfo := make([][]byte, len(metas)) for i := range metas { marshalledInfo[i], err = metas[i].Serialize() - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, nil, nil, err + } } auditInfoRaw, err := auditInfo.Bytes() - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, nil, nil, err + } metadata := &driver.TransferMetadata{} for range len(transfer2.Inputs) { metadata.Inputs = append(metadata.Inputs, &driver.TransferInputMetadata{ @@ -510,12 +491,13 @@ func prepareTransfer(pp *v1.PublicParams, signer driver.SigningIdentity, auditor }, }, }) - Expect(err).NotTo(HaveOccurred()) } for i := range len(transfer2.Outputs) { marshalledinf, err := metas[i].Serialize() - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, nil, nil, err + } metadata.Outputs = append(metadata.Outputs, &driver.TransferOutputMetadata{ OutputMetadata: marshalledinf, OutputAuditInfo: auditInfoRaw, @@ -537,192 +519,46 @@ func prepareTransfer(pp *v1.PublicParams, signer driver.SigningIdentity, auditor transferMetadata := &driver.TokenRequestMetadata{Transfers: []*driver.TransferMetadata{metadata}} err = auditor.Check(context.Background(), tr, transferMetadata, tokns, "1") - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, nil, nil, err + } sigma, err := auditor.Endorse(tr, "1") - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, nil, nil, err + } araw, err := auditor.Signer.Serialize() - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, nil, nil, err + } tr.AuditorSignatures = append(tr.AuditorSignatures, &driver.AuditorSignature{ Identity: araw, Signature: sigma, }) signatures, err := sender.SignTokenActions(raw) - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, nil, nil, err + } tr.Signatures = append(tr.Signatures, signatures...) // Add issuer signature for redeem case if issuer != nil { issuerSignature, err := issuer.Signer.Sign(raw) - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return nil, nil, nil, nil, err + } tr.Signatures = append(tr.Signatures, issuerSignature) } - return sender, tr, transferMetadata, tokens -} - -func getState(id token2.ID) ([]byte, error) { - return fakeLedger.GetState(id) -} - -var ( - // curveHalfOrders contains the precomputed curve group orders halved. - // It is used to ensure that signature' S value is lower or equal to the - // curve group order halved. We accept only low-S signatures. - // They are precomputed for efficiency reasons. - curveHalfOrders = map[elliptic.Curve]*big.Int{ - elliptic.P224(): new(big.Int).Rsh(elliptic.P224().Params().N, 1), - elliptic.P256(): new(big.Int).Rsh(elliptic.P256().Params().N, 1), - elliptic.P384(): new(big.Int).Rsh(elliptic.P384().Params().N, 1), - elliptic.P521(): new(big.Int).Rsh(elliptic.P521().Params().N, 1), - } -) - -type Signature struct { - R, S *big.Int -} - -type Signer struct { - *Verifier - SK *ecdsa.PrivateKey -} - -func (d *Signer) Sign(message []byte) ([]byte, error) { - dgst := sha256.Sum256(message) - - r, s, err := ecdsa.Sign(rand.Reader, d.SK, dgst[:]) - if err != nil { - return nil, err - } - - s, _, err = ToLowS(&d.SK.PublicKey, s) - if err != nil { - return nil, err - } - - return utils.MarshalECDSASignature(r, s) -} - -func (d *Signer) Serialize() ([]byte, error) { - return d.Verifier.Serialize() -} - -type Verifier struct { - PK *ecdsa.PublicKey -} - -func NewECDSASigner() (*Signer, error) { - // Create ephemeral key and store it in the context - sk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return nil, err - } - return &Signer{SK: sk, Verifier: &Verifier{PK: &sk.PublicKey}}, nil -} - -func (v *Verifier) Verify(message, sigma []byte) error { - signature := &Signature{} - _, err := asn1.Unmarshal(sigma, signature) - if err != nil { - return err - } - - hash := sha256.New() - n, err := hash.Write(message) - if n != len(message) { - return errors.Errorf("hash failure") - } - if err != nil { - return err - } - digest := hash.Sum(nil) - - lowS, err := IsLowS(v.PK, signature.S) - if err != nil { - return err - } - if !lowS { - return errors.New("signature is not in lowS") - } - - valid := ecdsa.Verify(v.PK, digest, signature.R, signature.S) - if !valid { - return errors.Errorf("signature not valid") - } - - return nil -} - -func (v *Verifier) Serialize() ([]byte, error) { - pkRaw, err := PemEncodeKey(v.PK) - if err != nil { - return nil, errors.Wrap(err, "failed marshalling public key") - } - - wrap, err := identity.WrapWithType(ix509.IdentityType, pkRaw) - if err != nil { - return nil, errors.Wrap(err, "failed wrapping identity") - } - - return wrap, nil -} - -// PemEncodeKey takes a Go key and converts it to bytes -func PemEncodeKey(key interface{}) ([]byte, error) { - var encoded []byte - var err error - var keyType string - - switch key.(type) { - case *ecdsa.PrivateKey, *rsa.PrivateKey: - keyType = "PRIVATE" - encoded, err = x509.MarshalPKCS8PrivateKey(key) - case *ecdsa.PublicKey, *rsa.PublicKey: - keyType = "PUBLIC" - encoded, err = x509.MarshalPKIXPublicKey(key) - default: - err = errors.Errorf("Programming error, unexpected key type %T", key) - } - if err != nil { - return nil, err - } - - return pem.EncodeToMemory(&pem.Block{Type: keyType + " KEY", Bytes: encoded}), nil -} - -// IsLowS checks that s is a low-S -func IsLowS(k *ecdsa.PublicKey, s *big.Int) (bool, error) { - halfOrder, ok := curveHalfOrders[k.Curve] - if !ok { - return false, fmt.Errorf("curve not recognized [%s]", k.Curve) - } - - return s.Cmp(halfOrder) != 1, nil -} - -func ToLowS(k *ecdsa.PublicKey, s *big.Int) (*big.Int, bool, error) { - lowS, err := IsLowS(k, s) - if err != nil { - return nil, false, err - } - - if !lowS { - // Set s to N - s that will be then in the lower part of signature space - // less or equal to half order - s.Sub(k.Params().N, s) - - return s, true, nil - } - - return s, false, nil + return sender, tr, transferMetadata, tokens, nil } type Deserializer struct { auditInfo []byte } -func (d *Deserializer) Match(ctx context.Context, id []byte) error { +func (d *Deserializer) Match(_ context.Context, id []byte) error { identity, err := identity.WrapWithType(ix509.IdentityType, id) if err != nil { return errors.Wrapf(err, "failed to unmarshal identity [%s]", id) @@ -733,10 +569,10 @@ func (d *Deserializer) Match(ctx context.Context, id []byte) error { return nil } -func (d *Deserializer) GetAuditInfoMatcher(ctx context.Context, owner driver.Identity, auditInfo []byte) (driver.Matcher, error) { +func (d *Deserializer) GetAuditInfoMatcher(_ context.Context, _ driver.Identity, auditInfo []byte) (driver.Matcher, error) { return &Deserializer{auditInfo: auditInfo}, nil } -func (d *Deserializer) DeserializeVerifier(ctx context.Context, id driver.Identity) (driver.Verifier, error) { +func (d *Deserializer) DeserializeVerifier(_ context.Context, _ driver.Identity) (driver.Verifier, error) { panic("implement me") } diff --git a/token/driver/validator.go b/token/driver/validator.go index 49ee623f71..0b87566fe7 100644 --- a/token/driver/validator.go +++ b/token/driver/validator.go @@ -22,6 +22,8 @@ type ValidationAttributes = map[ValidationAttributeID][]byte type GetStateFnc = func(id token.ID) ([]byte, error) // Ledger models a read-only ledger +// +//go:generate counterfeiter -o mock/ledger.go -fake-name Ledger . Ledger type Ledger interface { // GetState returns the value for the given key GetState(id token.ID) ([]byte, error) diff --git a/token/services/benchmark/cmd/memcheck/README.md b/token/services/benchmark/cmd/memcheck/README.md new file mode 100644 index 0000000000..e7358b4dff --- /dev/null +++ b/token/services/benchmark/cmd/memcheck/README.md @@ -0,0 +1,71 @@ +# Go Pprof Memory Analyzer + +A specialized command-line tool for deep-diving into Go heap profiles (`pprof`). +This analyzer goes beyond standard pprof tools by applying heuristics to detect common memory anti-patterns, potential leaks, and high-pressure allocation hotspots. + +## Features + +This tool parses a standard `pprof` heap profile and generates a 7-section unified report: + +1. **Anti-Pattern Detection**: Automatically flags common mistakes (e.g., `time.After` in loops, heavy JSON reflection, unoptimized slice growth). +2. **Hot Lines**: Pinpoints the exact file and line number responsible for the most allocations. +3. **Business Logic Context**: aggregates data by pprof labels (if present) to show cost per request/worker/context. +4. **Top Object Producers**: Identifies functions creating the most garbage (GC pressure), calculated by allocation count and average object size. +5. **Leak Candidates**: Highlights functions with high "In-Use" memory but low "Allocated" throughput—a strong signal for memory leaks or unbounded caches. +6. **Root Cause Traces**: Displays the call stack for the top 5 heavy allocators to show *who* is calling the expensive function. +7. **ASCII Flame Graph**: Visualizes the call tree directly in your terminal for paths consuming >1% of memory. + +## Installation + +You can run the tool directly or build it as a binary. + +### Build +``` +make memcheck +``` + +### Run +``` +memcheck +``` + +Or run directly with `go run`: +``` +go run main.go heap.pb.gz +``` + +*Note: You must provide a standard Go heap profile (proto format). You can generate one from a running Go app using:* +`curl -o heap.pb.gz http://localhost:6060/debug/pprof/heap` + +## Report Sections Explained + +### 1. Detected Anti-Patterns & Heuristics +Checks your profile against a list of known Go performance pitfalls: +- **Loop Timer Leak**: Misuse of `time.After` inside loops. +- **Repeated RegEx**: `regexp.Compile` appearing in hot paths. +- **Heavy JSON**: Excessive allocation in `json.Unmarshal`. +- **Interface Boxing**: High cost of converting concrete types to `interface{}`. +- **Slice/Map Growth**: High allocations in `growslice` or `mapassign` (suggests missing capacity pre-allocation). + +### 2. Hot Lines +Shows the exact source code location (File:Line) for the top allocators. This is often more useful than function names alone. + +### 3. Business Logic Context (Labels) +If your application uses pprof labels (e.g., `pprof.Do`), this section breaks down memory usage by those labels (e.g., per HTTP route or background worker ID). + +### 4. Top Object Producers +Focuses on **GC Pressure**. Functions listed here churn through many small objects, causing the Garbage Collector to run frequently, even if total memory usage is low. + +### 5. Persistent Memory (Leak Candidates) +Focuses on **RAM Usage**. Lists functions that allocate memory that *stays* allocated. +- **High In-Use %**: Suggests a cache, global variable, or memory leak. +- **Diagnosis**: The tool provides specific advice (e.g., "Unbounded Map?", "Check capacity reset") based on the ratio of In-Use vs. Allocated bytes. + +### 6. Root Cause Trace +For the top 5 allocators, this prints the stack trace up to the root. It attempts to tag the "Likely Cause" by skipping standard library functions to find your code. + +### 7. ASCII Flame Graph +A text-based tree view of memory consumption. +- `├──` indicates a child call. +- Shows total bytes and percentage of heap for that path. +- Hides paths contributing <1% to reduce noise. diff --git a/token/services/benchmark/cmd/memcheck/main.go b/token/services/benchmark/cmd/memcheck/main.go new file mode 100644 index 0000000000..7c83f9fb73 --- /dev/null +++ b/token/services/benchmark/cmd/memcheck/main.go @@ -0,0 +1,587 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package main + +import ( + "flag" + "fmt" + "log" + "os" + "sort" + "strings" + "text/tabwriter" + + "github.com/google/pprof/profile" +) + +const ( + NoiseFloor = 0.005 // 0.5% +) + +type FuncStat struct { + Name string + File string + Line int + FlatAllocBytes int64 + FlatAllocObj int64 + FlatInUseBytes int64 + FlatInUseObj int64 + CumAllocBytes int64 + Callers map[string]int64 +} + +type LineStat struct { + File string + Line int + Function string + AllocBytes int64 +} + +type LabelStat struct { + Name string + AllocBytes int64 + InUseBytes int64 +} + +type StackRecord struct { + Stack []string // Leaf -> Root + Bytes int64 +} + +// FlameNode for the ASCII Tree +type FlameNode struct { + Name string + Total int64 + Children map[string]*FlameNode +} + +func NewFuncStat(name, file string, line int) *FuncStat { + return &FuncStat{ + Name: name, + File: file, + Line: line, + Callers: make(map[string]int64), + } +} + +func printUnifiedReport(stats []*FuncStat, labels []*LabelStat, lines []*LineStat, stacks []StackRecord, totalAlloc, totalInUse int64) { + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + + writef(w, "\n================ ULTIMATE GO MEMORY ANALYZER ================\n") + writef(w, "Total Allocated: %s | Total In-Use: %s\n\n", formatBytes(totalAlloc), formatBytes(totalInUse)) + + detectAntiPatterns(stats, w) + + // --- SECTION 2: HOT LINES --- + writef(w, "\n## 2. HOT LINES (Exact Source Location)\n") + writeLine(w, "FILE:LINE\tFUNCTION\tALLOC BYTES\t% TOTAL") + writeLine(w, "---------\t--------\t-----------\t-------") + for i := 0; i < 10 && i < len(lines); i++ { + l := lines[i] + ratio := float64(l.AllocBytes) / float64(totalAlloc) + if ratio < NoiseFloor { + continue + } + writef(w, "%s:%d\t%s\t%s\t%.1f%%\n", shortenPath(l.File), l.Line, shortenName(l.Function), formatBytes(l.AllocBytes), ratio*100) + } + + // --- SECTION 3: LABELS --- + if len(labels) > 0 { + writef(w, "\n## 3. BUSINESS LOGIC CONTEXT (Labels)\n") + writeLine(w, "LABEL\tALLOC %\tALLOC BYTES\tIN-USE BYTES") + writeLine(w, "-----\t-------\t-----------\t------------") + for i := 0; i < 10 && i < len(labels); i++ { + l := labels[i] + ratio := float64(l.AllocBytes) / float64(totalAlloc) + writef(w, "%s\t%.1f%%\t%s\t%s\n", l.Name, ratio*100, formatBytes(l.AllocBytes), formatBytes(l.InUseBytes)) + } + } + + // --- SECTION 4: TOP ALLOCATORS --- + writef(w, "\n## 4. TOP OBJECT PRODUCERS (GC Pressure)\n") + writeLine(w, "NAME\tFLAT %\tFLAT BYTES\tAVG SIZE\tIMMEDIATE CALLER") + writeLine(w, "----\t------\t----------\t--------\t----------------") + sort.Slice(stats, func(i, j int) bool { return stats[i].FlatAllocBytes > stats[j].FlatAllocBytes }) + for i := 0; i < 20 && i < len(stats); i++ { + s := stats[i] + ratio := float64(s.FlatAllocBytes) / float64(totalAlloc) + if ratio < NoiseFloor { + continue + } + avgSize := int64(0) + if s.FlatAllocObj > 0 { + avgSize = s.FlatAllocBytes / s.FlatAllocObj + } + writef(w, "%s\t%.1f%%\t%s\t%d B\t%s\n", shortenName(s.Name), ratio*100, formatBytes(s.FlatAllocBytes), avgSize, getTopCallers(s.Callers, s.FlatAllocBytes)) + } + + // --- SECTION 5: LEAKS --- + writef(w, "\n## 5. PERSISTENT MEMORY (Leak Candidates)\n") + writeLine(w, "NAME\tIN-USE %\tIN-USE BYTES\tALLOC %\tSUGGESTION/DIAGNOSIS") + writeLine(w, "----\t--------\t------------\t-------\t--------------------") + sort.Slice(stats, func(i, j int) bool { return stats[i].FlatInUseBytes > stats[j].FlatInUseBytes }) + for i := 0; i < 15 && i < len(stats); i++ { + s := stats[i] + inUseRatio := float64(s.FlatInUseBytes) / float64(totalInUse) + allocRatio := float64(s.FlatAllocBytes) / float64(totalAlloc) + if inUseRatio < NoiseFloor { + continue + } + writef(w, "%s\t%.1f%%\t%s\t%.1f%%\t%s\n", shortenName(s.Name), inUseRatio*100, formatBytes(s.FlatInUseBytes), allocRatio*100, suggestFix(s, inUseRatio, allocRatio)) + } + + // --- SECTION 6: TRACES --- + if len(stats) > 0 { + writef(w, "\n## 6. ROOT CAUSE TRACE (Top 5 Allocators)\n") + sort.Slice(stats, func(i, j int) bool { return stats[i].FlatAllocBytes > stats[j].FlatAllocBytes }) + count := 0 + for i := 0; i < len(stats) && count < 5; i++ { + s := stats[i] + if float64(s.FlatAllocBytes)/float64(totalAlloc) < NoiseFloor { + continue + } + writef(w, "\n [Rank #%d] Offender: %s\n", count+1, shortenName(s.Name)) + printHotStackWithBlame(w, s.Name, stacks) + count++ + } + } + + // --- SECTION 7: ASCII FLAME GRAPH --- + writef(w, "\n## 7. ASCII FLAME GRAPH (Call Tree)\n") + writef(w, " Showing paths consuming >1%% of total memory.\n\n") + printFlameGraph(w, stacks, totalAlloc) + + if err := w.Flush(); err != nil { + _, _ = fmt.Fprintln(os.Stderr, "benchmark: flush error:", err) + } +} + +func printFlameGraph(w *tabwriter.Writer, stacks []StackRecord, totalAlloc int64) { + // 1. Build Trie + root := &FlameNode{Name: "Total", Total: 0, Children: make(map[string]*FlameNode)} + + for _, rec := range stacks { + // rec.Stack is Leaf -> Root (e.g. [Malloc, FuncA, Main]) + // We need Root -> Leaf for the tree (e.g. Main -> FuncA -> Malloc) + if len(rec.Stack) == 0 { + continue + } + current := root + root.Total += rec.Bytes + + for i := len(rec.Stack) - 1; i >= 0; i-- { + fnName := rec.Stack[i] + if _, exists := current.Children[fnName]; !exists { + current.Children[fnName] = &FlameNode{ + Name: fnName, + Children: make(map[string]*FlameNode), + } + } + current = current.Children[fnName] + current.Total += rec.Bytes + } + } + + // 2. Print Trie + printFlameNode(w, root, "", totalAlloc, true) +} + +func printFlameNode(w *tabwriter.Writer, node *FlameNode, prefix string, totalAlloc int64, isLast bool) { + // Cutoff: Hide nodes with < 1% impact + ratio := float64(node.Total) / float64(totalAlloc) + if ratio < 0.01 { + return + } + + // Prepare display + connector := "├── " + if isLast { + connector = "└── " + } + if prefix == "" { + connector = "" // Root + } + + // Print Node + name := shortenName(node.Name) + if node.Name == "Total" { + name = "TOTAL ALLOC" + } + writef(w, "%s%s%s (%s, %.1f%%)\n", prefix, connector, name, formatBytes(node.Total), ratio*100) + + // Prepare prefix for children + childPrefix := prefix + if prefix == "" { + childPrefix = "" + } else if isLast { + childPrefix += " " + } else { + childPrefix += "│ " + } + + // Sort Children by Total Bytes (descending) + type childSort struct { + Name string + Total int64 + } + var children []childSort + for _, c := range node.Children { + children = append(children, childSort{c.Name, c.Total}) + } + sort.Slice(children, func(i, j int) bool { return children[i].Total > children[j].Total }) + + // Recursively print children + for i, c := range children { + childNode := node.Children[c.Name] + printFlameNode(w, childNode, childPrefix, totalAlloc, i == len(children)-1) + } +} + +// --- Existing Heuristics & Helpers --- + +func detectAntiPatterns(stats []*FuncStat, w *tabwriter.Writer) { + writef(w, "## 1. DETECTED ANTI-PATTERNS & HEURISTICS\n") + writeLine(w, "FUNCTION\tISSUE\tADVICE") + writeLine(w, "--------\t-----\t------") + + found := false + for _, s := range stats { + name := strings.ToLower(s.Name) + + if strings.Contains(name, "time.after") { + writef(w, "%s\tLoop Timer Leak\tUse time.NewTicker or time.Timer + Stop()\n", shortenName(s.Name)) + found = true + } + if strings.Contains(name, "regexp.compile") && s.FlatAllocObj > 50 { + writef(w, "%s\tRepeated RegEx\tCompile once in global var or init()\n", shortenName(s.Name)) + found = true + } + if strings.Contains(name, "json.unmarshal") && s.FlatAllocBytes > 1024*1024*10 { + writef(w, "%s\tHeavy JSON\tUse json.Decoder or easyjson\n", shortenName(s.Name)) + found = true + } + if (strings.Contains(name, "slicebytetostring") || strings.Contains(name, "stringtoslicebyte")) && s.FlatAllocBytes > 1024*1024 { + writef(w, "%s\tType Conv (Safe)\tHeavy []byte <-> string.\n", shortenName(s.Name)) + found = true + } + if strings.Contains(name, "runtime.convt") && s.FlatAllocBytes > 1024*1024 { + writef(w, "%s\tInterface Boxing\tConcrete -> interface{}. Generics?\n", shortenName(s.Name)) + found = true + } + if strings.Contains(name, "growslice") && s.FlatAllocBytes > 1024*1024 { + writef(w, "%s\tSlice Append\tPre-allocate: make([], 0, cap)\n", shortenName(s.Name)) + found = true + } + if (strings.Contains(name, "mapassign") || strings.Contains(name, "evacuate")) && s.FlatAllocBytes > 1024*1024 { + writef(w, "%s\tMap Growth\tPre-allocate: make(map, cap)\n", shortenName(s.Name)) + found = true + } + if strings.Contains(name, "runtime.malg") && s.FlatAllocObj > 1000 { + writef(w, "%s\tGoroutine Churn\tStarting %d+ goroutines. Worker Pool?\n", shortenName(s.Name), s.FlatAllocObj) + found = true + } + } + if !found { + writeLine(w, "None\t-\tNo obvious anti-patterns found.") + } +} + +func suggestFix(s *FuncStat, inUseRatio, allocRatio float64) string { + name := strings.ToLower(s.Name) + if strings.Contains(name, "buf") || strings.Contains(name, "read") { + return "Buffer growth? Check capacity reset." + } + if strings.Contains(name, "cache") || strings.Contains(name, "map") { + return "Unbounded Map/Cache? Add eviction." + } + if allocRatio < 0.001 { + return "Static Data. Safe if expected." + } + if inUseRatio > 0.30 { + return "CRITICAL: Holds >30% RAM." + } + return "Inspect retention logic." +} + +func printHotStackWithBlame(w *tabwriter.Writer, targetFunc string, stacks []StackRecord) { + stackSums := make(map[string]int64) + stackDefinitions := make(map[string][]string) + + for _, rec := range stacks { + if len(rec.Stack) == 0 { + continue + } + if rec.Stack[0] == targetFunc { + sig := strings.Join(rec.Stack, ";") + stackSums[sig] += rec.Bytes + stackDefinitions[sig] = rec.Stack + } + } + + var maxSig string + var maxBytes int64 + for sig, b := range stackSums { + if b > maxBytes { + maxBytes = b + maxSig = sig + } + } + + if maxSig == "" { + writeLine(w, " (No trace found)") + return + } + + trace := stackDefinitions[maxSig] + writeLine(w, " Trace (Leaf -> Root):") + blameFound := false + for i, fn := range trace { + indent := strings.Repeat(" ", i) + marker := "" + if !blameFound && !isStdLib(fn) { + marker = " <-- [LIKELY CAUSE / ENTRY POINT]" + blameFound = true + } + if i == 0 { + marker = " (Allocator)" + } + writef(w, " %s-> %s%s\n", indent, shortenName(fn), marker) + if i >= 15 { + writef(w, " %s ...\n", indent) + break + } + } +} + +func isStdLib(funcName string) bool { + prefixes := []string{ + "runtime", "sync", "syscall", "net", "io", "bufio", "bytes", "strings", + "encoding", "time", "reflect", "math", "sort", "compress", "crypto", + "internal", "os", "path", "fmt", "log", + } + clean := strings.TrimLeft(funcName, "*") + for _, p := range prefixes { + if strings.HasPrefix(clean, p+".") || strings.HasPrefix(clean, p+"/") { + return true + } + } + return false +} + +func funcKey(fn *profile.Function) string { + return fmt.Sprintf("%s:%s", fn.Name, fn.Filename) +} + +func getTopCallers(callers map[string]int64, total int64) string { + if len(callers) == 0 { + return "[Root]" + } + type caller struct { + Name string + Bytes int64 + } + var list []caller + for k, v := range callers { + list = append(list, caller{k, v}) + } + sort.Slice(list, func(i, j int) bool { return list[i].Bytes > list[j].Bytes }) + var parts []string + for i := 0; i < 2 && i < len(list); i++ { + pct := float64(list[i].Bytes) / float64(total) * 100 + parts = append(parts, fmt.Sprintf("%s (%.0f%%)", shortenName(list[i].Name), pct)) + } + return strings.Join(parts, ", ") +} + +func formatBytes(b int64) string { + const unit = 1024 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := int64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "KMGTPE"[exp]) +} + +func shortenName(n string) string { + parts := strings.Split(n, "/") + return parts[len(parts)-1] +} + +func shortenPath(p string) string { + parts := strings.Split(p, "/") + if len(parts) > 2 { + return strings.Join(parts[len(parts)-2:], "/") + } + return p +} + +func writef(w *tabwriter.Writer, format string, a ...interface{}) { + _, _ = fmt.Fprintf(w, format, a...) +} + +func writeLine(w *tabwriter.Writer, s string) { + _, _ = fmt.Fprintln(w, s) +} + +func main() { + flag.Parse() + if flag.NArg() < 1 { + log.Fatal("Usage: memcheck ") + } + + filename := flag.Arg(0) + f, err := os.Open(filename) + if err != nil { + log.Fatalf("Failed to open file: %v", err) + } + defer func() { + _ = f.Close() + }() + + p, err := profile.Parse(f) + if err != nil { + log.Fatalf("Failed to parse profile: %v", err) + } + + // 1. Identify Metrics + idxAllocSpace, idxAllocObj := -1, -1 + idxInUseSpace, idxInUseObj := -1, -1 + + for i, st := range p.SampleType { + switch st.Type { + case "alloc_space", "alloc_bytes": + idxAllocSpace = i + case "alloc_objects", "alloc_count": + idxAllocObj = i + case "inuse_space", "inuse_bytes": + idxInUseSpace = i + case "inuse_objects", "inuse_count": + idxInUseObj = i + } + } + + if idxAllocSpace == -1 { + log.Fatal("Profile missing 'alloc_space'. Ensure this is a heap profile.") + } + + // 2. Aggregate Data + stats := make(map[string]*FuncStat) + lineStats := make(map[string]*LineStat) + labelStats := make(map[string]*LabelStat) + var totalAllocBytes, totalInUseBytes int64 + var topStacks []StackRecord + + for _, s := range p.Sample { + allocBytes := s.Value[idxAllocSpace] + allocObj := s.Value[idxAllocObj] + inUseBytes := s.Value[idxInUseSpace] + inUseObj := s.Value[idxInUseObj] + + totalAllocBytes += allocBytes + totalInUseBytes += inUseBytes + + // A. Function Analysis + seen := make(map[string]bool) + if len(s.Location) > 0 { + leafLoc := s.Location[0] + if len(leafLoc.Line) > 0 { + fn := leafLoc.Line[0].Function + lineNo := int(leafLoc.Line[0].Line) + + if fn != nil { + key := funcKey(fn) + if _, ok := stats[key]; !ok { + stats[key] = NewFuncStat(fn.Name, fn.Filename, lineNo) + } + stats[key].FlatAllocBytes += allocBytes + stats[key].FlatAllocObj += allocObj + stats[key].FlatInUseBytes += inUseBytes + stats[key].FlatInUseObj += inUseObj + + if len(s.Location) > 1 { + parentLoc := s.Location[1] + if len(parentLoc.Line) > 0 { + pFn := parentLoc.Line[0].Function + if pFn != nil { + stats[key].Callers[pFn.Name] += allocBytes + } + } + } + + // Line Stat + lineKey := fmt.Sprintf("%s:%d", fn.Filename, lineNo) + if _, ok := lineStats[lineKey]; !ok { + lineStats[lineKey] = &LineStat{File: fn.Filename, Line: lineNo, Function: fn.Name} + } + lineStats[lineKey].AllocBytes += allocBytes + } + } + } + + // B. Stack Trace Collection (Cumulative & Tree) + var currentStack []string + for _, loc := range s.Location { + for _, line := range loc.Line { + fn := line.Function + if fn == nil { + continue + } + currentStack = append(currentStack, fn.Name) + + key := funcKey(fn) + if seen[key] { + continue + } + seen[key] = true + + if _, ok := stats[key]; !ok { + stats[key] = NewFuncStat(fn.Name, fn.Filename, int(line.Line)) + } + stats[key].CumAllocBytes += allocBytes + } + } + + // C. Label Analysis + for key, values := range s.Label { + for _, val := range values { + labelID := fmt.Sprintf("%s:%s", key, val) + if _, ok := labelStats[labelID]; !ok { + labelStats[labelID] = &LabelStat{Name: labelID} + } + labelStats[labelID].AllocBytes += allocBytes + labelStats[labelID].InUseBytes += inUseBytes + } + } + + if allocBytes > 0 { + topStacks = append(topStacks, StackRecord{Stack: currentStack, Bytes: allocBytes}) + } + } + + // 3. Sorting + var statList []*FuncStat + for _, s := range stats { + statList = append(statList, s) + } + var labelList []*LabelStat + for _, s := range labelStats { + labelList = append(labelList, s) + } + sort.Slice(labelList, func(i, j int) bool { return labelList[i].AllocBytes > labelList[j].AllocBytes }) + var lineList []*LineStat + for _, s := range lineStats { + lineList = append(lineList, s) + } + sort.Slice(lineList, func(i, j int) bool { return lineList[i].AllocBytes > lineList[j].AllocBytes }) + + // 5. Generate Report + printUnifiedReport(statList, labelList, lineList, topStacks, totalAllocBytes, totalInUseBytes) +} diff --git a/token/services/benchmark/cmd/traceinspector/README.md b/token/services/benchmark/cmd/traceinspector/README.md new file mode 100644 index 0000000000..7cf97f0ba3 --- /dev/null +++ b/token/services/benchmark/cmd/traceinspector/README.md @@ -0,0 +1,86 @@ +# Go Pprof Trace Analyzer + +A lightweight command-line tool for analyzing Go execution traces (`trace.out`). +This tool processes traces generated by the Go runtime to identify performance bottlenecks, scheduler latency issues, contention, and potential goroutine leaks. + +It uses the modern `golang.org/x/exp/trace` API (compatible with Go 1.22+) to parse and aggregate event data. + +## Features + +- **Scheduler Latency Analysis**: Detects goroutines waiting too long in the global run queue before executing. +- **Blocking/Contention Analysis**: Identifies goroutines blocked on synchronization primitives (Mutex, Channels, WaitGroups) longer than a specified threshold. +- **Goroutine Leak Detection**: Reports goroutines that remain active/running at the very end of the trace. +- **CPU & Lifecycle Statistics**: Aggregates CPU time, system call time, and lifetime for top resource-consuming goroutines. + +## Requirements + +- Go 1.22 or higher (required for `golang.org/x/exp/trace`) + +## Installation + +1. Clone or copy the source code. +2. Build the binary: + +```shell +make traceinspector +``` + +## Usage + +### 1. Generate a Trace +First, generate a trace file from your Go application. You can do this using `runtime/trace` in your code or via tests: + +**In code:** +``` +f, _ := os.Create("trace.out") +trace.Start(f) +defer trace.Stop() +``` + +**Via tests:** +``` +go test -trace=trace.out +``` + +### 2. Run the Analyzer +Run the tool against your generated trace file. + +``` +trace-analyzer -file trace.out +``` + +### Command-Line Flags + +| Flag | Default | Description | +|------|---------|-------------| +| `-file` | `trace.out` | Path to the Go execution trace file. | +| `-latency` | `10ms` | Threshold for warning about slow scheduler latency (Runnable → Running). | +| `-block` | `10ms` | Threshold for warning about blocking events (Mutex/Channel contention). | + +### Examples + +**Analyze with custom thresholds:** +Detect blocking events longer than 50ms and scheduler delays longer than 20ms: +``` +./trace-analyzer -file mytrace.out -block 50ms -latency 20ms +``` + +## Understanding the Output + +The tool prints a summary report to standard output containing four sections: + +1. **Suspiciously Active Goroutines (Potential Leaks)** + * Lists goroutines that were still in a `Running` state when the trace ended. + * Useful for finding background workers that didn't shut down gracefully. + +2. **Scheduler Latency Summary** + * **Avg Scheduler Latency**: Average time goroutines spent waiting in the run queue. + * **Max Scheduler Latency**: The worst-case wait time observed. + +3. **Top 5 Most Blocked Goroutines** + * Goroutines that spent the most time in a `Waiting` state due to synchronization (e.g., `chan receive`, `Mutex.Lock`). + * Shows the specific reason and the stack trace where the goroutine was created. + +4. **Top 5 CPU-Heavy Goroutines** + * Goroutines with the highest total `CPUTime` (User + Syscall). + * Includes lifetime duration and average runnable wait time. diff --git a/token/services/benchmark/cmd/traceinspector/main.go b/token/services/benchmark/cmd/traceinspector/main.go new file mode 100644 index 0000000000..1f6f737348 --- /dev/null +++ b/token/services/benchmark/cmd/traceinspector/main.go @@ -0,0 +1,447 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package main + +import ( + "flag" + "fmt" + "io" + "log" + "os" + "sort" + "strings" + "time" + + "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/errors" + "golang.org/x/exp/trace" +) + +// GoroutineStats tracks the lifecycle and issues for a single goroutine. +type GoroutineStats struct { + ID trace.GoID + StartTime trace.Time + EndTime trace.Time + IsRunning bool + BlockedTime time.Duration + BlockReasons map[string]time.Duration + CreationStack string // The code that created this goroutine (creator's stack) + + // CPU / scheduler data. + CPUTime time.Duration // Total time in executing states (running + syscall) + RunningTime time.Duration // Time in GoRunning + SyscallTime time.Duration // Time in GoSyscall + LastExecState trace.GoState + + // Per-goroutine runnable-queue wait. + RunnableWait time.Duration + RunnableWaitCount int64 +} + +// AnalysisOptions holds the parameters for the trace analysis. +type AnalysisOptions struct { + LatencyThreshold time.Duration + BlockThreshold time.Duration +} + +// Result holds the aggregated results of a trace analysis. +type Result struct { + GoroutineStats map[trace.GoID]*GoroutineStats + TotalLatency time.Duration + LatencyCount int64 + MaxLatency time.Duration + TraceEndTime trace.Time +} + +// Print displays the full summary report from the analysis results. +func (r *Result) Print() { + fmt.Println("---------------------------------------------------") + fmt.Println("SUMMARY REPORT") + fmt.Println("---------------------------------------------------") + r.printLeakSummary() + r.printLatencySummary() + r.printBlockingSummary() + r.printCPUSummary() +} + +func (r *Result) printLeakSummary() { + activeCount := 0 + var activeGoroutines []string + for _, stat := range r.GoroutineStats { + if stat.IsRunning { + activeCount++ + if len(activeGoroutines) < 5 { + activeGoroutines = append(activeGoroutines, fmt.Sprintf(" [LEAK?] G%d created at: %s", stat.ID, stat.CreationStack)) + } + } + } + + fmt.Println("Suspiciously Active Goroutines (Potential Leaks):") + if activeCount == 0 { + fmt.Println(" None detected.") + } else { + for _, line := range activeGoroutines { + fmt.Println(line) + } + if activeCount > len(activeGoroutines) { + fmt.Printf(" ... and %d more.\n", activeCount-len(activeGoroutines)) + } + } +} + +func (r *Result) printLatencySummary() { + fmt.Println() + if r.LatencyCount > 0 { + avgLatency := r.TotalLatency / time.Duration(r.LatencyCount) + fmt.Printf("Avg Scheduler Latency: %v over %d transitions\n", avgLatency, r.LatencyCount) + fmt.Printf("Max Scheduler Latency: %v\n", r.MaxLatency) + } else { + fmt.Println("No runnable->running transitions observed.") + } +} + +func (r *Result) printBlockingSummary() { + type blockEntry struct { + ID trace.GoID + Duration time.Duration + Reason string + Stack string + } + + var blockedList []blockEntry + for _, stat := range r.GoroutineStats { + if stat.BlockedTime > 0 { + maxReason, maxDur := "", time.Duration(0) + for r, d := range stat.BlockReasons { + if d > maxDur { + maxDur, maxReason = d, r + } + } + blockedList = append(blockedList, blockEntry{stat.ID, stat.BlockedTime, maxReason, stat.CreationStack}) + } + } + + sort.Slice(blockedList, func(i, j int) bool { return blockedList[i].Duration > blockedList[j].Duration }) + + fmt.Println("\nTop 5 Most Blocked Goroutines:") + if len(blockedList) == 0 { + fmt.Println(" None with measurable blocking.") + } else { + limit := 5 + if len(blockedList) < limit { + limit = len(blockedList) + } + for i := 0; i < limit; i++ { + e := blockedList[i] + fmt.Printf(" G%d: %v blocked (most time on: %s)\n Created at: %s\n", e.ID, e.Duration, e.Reason, e.Stack) + } + } +} + +func (r *Result) printCPUSummary() { + type cpuEntry struct { + ID trace.GoID + CPUTime time.Duration + RunningTime time.Duration + SyscallTime time.Duration + Lifetime time.Duration + AvgRunnableWait time.Duration + CreationStack string + } + + var cpuList []cpuEntry + for _, stat := range r.GoroutineStats { + if stat.CPUTime > 0 { + var lifetime time.Duration + if stat.StartTime != 0 { + end := stat.EndTime + if end == 0 && r.TraceEndTime != 0 { + end = r.TraceEndTime + } + if end > stat.StartTime { + lifetime = time.Duration(end - stat.StartTime) + } + } + var avgWait time.Duration + if stat.RunnableWaitCount > 0 { + avgWait = stat.RunnableWait / time.Duration(stat.RunnableWaitCount) + } + cpuList = append(cpuList, cpuEntry{stat.ID, stat.CPUTime, stat.RunningTime, stat.SyscallTime, lifetime, avgWait, stat.CreationStack}) + } + } + + sort.Slice(cpuList, func(i, j int) bool { return cpuList[i].CPUTime > cpuList[j].CPUTime }) + + fmt.Println("\nTop 5 CPU-Heavy Goroutines:") + if len(cpuList) == 0 { + fmt.Println(" No goroutines with measurable CPU time.") + } else { + limit := 5 + if len(cpuList) < limit { + limit = len(cpuList) + } + for i := 0; i < limit; i++ { + e := cpuList[i] + fmt.Printf(" G%d: CPU=%v (run=%v, sys=%v), lifetime≈%v, avg run-q wait≈%v\n Created at: %s\n", + e.ID, e.CPUTime, e.RunningTime, e.SyscallTime, e.Lifetime, e.AvgRunnableWait, e.CreationStack) + } + } +} + +// analysisState holds the live state during the analysis of the trace. +type analysisState struct { + opts AnalysisOptions + stats map[trace.GoID]*GoroutineStats + runnable map[trace.GoID]trace.Time + blocked map[trace.GoID]struct { + Time trace.Time + Reason string + Stack trace.Stack + } + execStart map[trace.GoID]trace.Time + totalLatency time.Duration + latencyCount int64 + maxLatency time.Duration + lastEventTime trace.Time +} + +// analyze reads and processes a Go execution trace from an io.Reader. +func analyze(r io.Reader, opts AnalysisOptions) (*Result, error) { + traceReader, err := trace.NewReader(r) + if err != nil { + return nil, fmt.Errorf("failed to create trace reader: %w", err) + } + + state := &analysisState{ + opts: opts, + stats: make(map[trace.GoID]*GoroutineStats), + runnable: make(map[trace.GoID]trace.Time), + blocked: make(map[trace.GoID]struct { + Time trace.Time + Reason string + Stack trace.Stack + }), + execStart: make(map[trace.GoID]trace.Time), + } + + for { + ev, err := traceReader.ReadEvent() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, fmt.Errorf("failed to read event: %w", err) + } + processEvent(ev, state) + } + + // Finalize any open intervals at the end of the trace. + finalizeCPUAccounting(state) + + return &Result{ + GoroutineStats: state.stats, + TotalLatency: state.totalLatency, + LatencyCount: state.latencyCount, + MaxLatency: state.maxLatency, + TraceEndTime: state.lastEventTime, + }, nil +} + +// processEvent handles a single trace event. +func processEvent(ev trace.Event, state *analysisState) { + state.lastEventTime = ev.Time() + + if ev.Kind() != trace.EventStateTransition { + return + } + + st := ev.StateTransition() + if st.Resource.Kind != trace.ResourceGoroutine { + return + } + + handleGoroutineTransition(ev, st, state) +} + +// handleGoroutineTransition processes a state transition for a goroutine. +func handleGoroutineTransition(ev trace.Event, st trace.StateTransition, state *analysisState) { + id := st.Resource.Goroutine() + from, to := st.Goroutine() + + // Ensure stats struct exists. + if _, exists := state.stats[id]; !exists { + state.stats[id] = &GoroutineStats{ + ID: id, + BlockReasons: make(map[string]time.Duration), + } + } + gs := state.stats[id] + + // Update CPU time accounting. + updateCPUStats(ev, id, from, to, gs, state) + + // Lifecycle: Creation and termination. + if from == trace.GoNotExist && to == trace.GoRunnable { + gs.StartTime = ev.Time() + gs.IsRunning = true + gs.CreationStack = formatStack(ev.Stack()) // Creator's stack. + } + if to == trace.GoNotExist { + gs.EndTime = ev.Time() + gs.IsRunning = false + delete(state.runnable, id) + delete(state.blocked, id) + delete(state.execStart, id) + } + + // Scheduler latency analysis. + analyzeSchedulerLatency(ev, st, id, from, to, gs, state) + + // Blocking analysis. + analyzeBlocking(ev, st, id, from, to, gs, state) +} + +// updateCPUStats updates the time a goroutine spends in an executing state. +func updateCPUStats(ev trace.Event, id trace.GoID, from, to trace.GoState, gs *GoroutineStats, state *analysisState) { + if from.Executing() { + if start, ok := state.execStart[id]; ok { + dur := ev.Time().Sub(start) + gs.CPUTime += dur + switch from { + case trace.GoRunning: + gs.RunningTime += dur + case trace.GoSyscall: + gs.SyscallTime += dur + } + } + delete(state.execStart, id) + } + + if to.Executing() { + state.execStart[id] = ev.Time() + gs.LastExecState = to + } +} + +// analyzeSchedulerLatency measures the time a goroutine spends in the runnable queue. +func analyzeSchedulerLatency(ev trace.Event, st trace.StateTransition, id trace.GoID, from, to trace.GoState, gs *GoroutineStats, state *analysisState) { + if to == trace.GoRunnable { + state.runnable[id] = ev.Time() + } + + if from == trace.GoRunnable && to == trace.GoRunning { + if startWait, ok := state.runnable[id]; ok { + waitDuration := ev.Time().Sub(startWait) + state.totalLatency += waitDuration + state.latencyCount++ + if waitDuration > state.maxLatency { + state.maxLatency = waitDuration + } + gs.RunnableWait += waitDuration + gs.RunnableWaitCount++ + + if waitDuration > state.opts.LatencyThreshold { + fmt.Printf("[SCHEDULER SLOW] G%d waited %v to run.\n Resume at: %s\n", + id, waitDuration, formatStack(st.Stack)) + } + } + delete(state.runnable, id) + } +} + +// analyzeBlocking measures the time a goroutine spends in a blocked state. +func analyzeBlocking(ev trace.Event, st trace.StateTransition, id trace.GoID, from, to trace.GoState, gs *GoroutineStats, state *analysisState) { + if to == trace.GoWaiting { + state.blocked[id] = struct { + Time trace.Time + Reason string + Stack trace.Stack + }{ev.Time(), st.Reason, st.Stack} + } + + if from == trace.GoWaiting { + if blockInfo, ok := state.blocked[id]; ok { + blockDuration := ev.Time().Sub(blockInfo.Time) + gs.BlockedTime += blockDuration + gs.BlockReasons[blockInfo.Reason] += blockDuration + + isSyncBlock := strings.Contains(blockInfo.Reason, "Sync") || strings.Contains(blockInfo.Reason, "Mutex") || strings.Contains(blockInfo.Reason, "chan") + if isSyncBlock && blockDuration > state.opts.BlockThreshold { + fmt.Printf("[CONTENTION] G%d blocked for %v on %s.\n Blocked at: %s\n", + id, blockDuration, blockInfo.Reason, formatStack(blockInfo.Stack)) + } + delete(state.blocked, id) + } + } +} + +// finalizeCPUAccounting adds CPU time for goroutines still executing at the end of the trace. +func finalizeCPUAccounting(state *analysisState) { + for id, start := range state.execStart { + gs, ok := state.stats[id] + if !ok || state.lastEventTime == 0 || start == 0 { + continue + } + dur := state.lastEventTime.Sub(start) + gs.CPUTime += dur + switch gs.LastExecState { + case trace.GoRunning: + gs.RunningTime += dur + case trace.GoSyscall: + gs.SyscallTime += dur + } + } +} + +// formatStack formats the top relevant frame of a stack trace. +func formatStack(st trace.Stack) string { + var frames []trace.StackFrame + for f := range st.Frames() { + frames = append(frames, f) + } + if len(frames) == 0 { + return "(no stack)" + } + for _, f := range frames { + if !strings.Contains(f.File, "runtime/") && !strings.Contains(f.File, "internal/") { + return fmt.Sprintf("%s (%s:%d)", f.Func, f.File, f.Line) + } + } + f := frames[0] + return fmt.Sprintf("%s (%s:%d)", f.Func, f.File, f.Line) +} + +// main is the entry point of the application. It handles flag parsing, +// file opening, and orchestrates the analysis and printing of results. +func main() { + tracePath := flag.String("file", "trace.out", "Path to the Go execution trace file") + latencyThreshold := flag.Duration("latency", 10*time.Millisecond, "Threshold for scheduler latency warnings") + blockThreshold := flag.Duration("block", 10*time.Millisecond, "Threshold for mutex blocking warnings") + flag.Parse() + + opts := AnalysisOptions{ + LatencyThreshold: *latencyThreshold, + BlockThreshold: *blockThreshold, + } + + f, err := os.Open(*tracePath) + if err != nil { + log.Fatalf("failed to open trace file: %v", err) + } + defer func() { + _ = f.Close() + }() + + fmt.Printf("Analyzing %s...\n", *tracePath) + fmt.Println("---------------------------------------------------") + + result, err := analyze(f, opts) + if err != nil { + log.Fatalf("Analysis failed: %v", err) + } + + result.Print() +} diff --git a/token/core/common/benchmark/flags.go b/token/services/benchmark/flags.go similarity index 75% rename from token/core/common/benchmark/flags.go rename to token/services/benchmark/flags.go index 83ae2f1696..54f89ec139 100644 --- a/token/core/common/benchmark/flags.go +++ b/token/services/benchmark/flags.go @@ -19,12 +19,14 @@ import ( ) var ( - bits = flag.String("bits", "", "a comma-separated list of bit sizes (32, 64,...)") - duration = flag.Duration("duration", 1*time.Second, "test duration (1s, 1m, 1h,...)") - curves = flag.String("curves", "", "comma-separated list of curves. Supported curves are: BN254, BLS12_381_BBS_GURVY, BLS12_381_BBS_GURVY_FAST_RNG") - numInputs = flag.String("num_inputs", "", "a comma-separate list of number of inputs (1,2,3,...)") - numOutputs = flag.String("num_outputs", "", "a comma-separate list of number of outputs (1,2,3,...)") - workers = flag.String("workers", "", "a comma-separate list of workers (1,2,3,...,NumCPU), where NumCPU is converted to the number of available CPUs") + bits = flag.String("bits", "", "a comma-separated list of bit sizes (32, 64,...)") + duration = flag.Duration("duration", 1*time.Second, "test duration (1s, 1m, 1h,...)") + curves = flag.String("curves", "", "comma-separated list of curves. Supported curves are: BN254, BLS12_381_BBS_GURVY, BLS12_381_BBS_GURVY_FAST_RNG") + numInputs = flag.String("num_inputs", "", "a comma-separate list of number of inputs (1,2,3,...)") + numOutputs = flag.String("num_outputs", "", "a comma-separate list of number of outputs (1,2,3,...)") + workers = flag.String("workers", "", "a comma-separate list of workers (1,2,3,...,NumCPU), where NumCPU is converted to the number of available CPUs") + profile = flag.Bool("profile", false, "write pprof profiles to file") + setupSamples = flag.Uint("setup_samples", 0, "number of setup samples, 0 disables it") ) // Bits parses the package-level `-bits` flag and returns a slice of bit sizes. @@ -129,3 +131,13 @@ func Integers[T constraints.Integer](str string, defaults ...T) ([]T, error) { } return values, nil } + +// ProfileEnabled returns true if profiling has been requested, false otherwise +func ProfileEnabled() bool { + return *profile +} + +// SetupSamples returns the number of setup samples to use. When 0, a setup will be generated for each evaluation. +func SetupSamples() uint { + return *setupSamples +} diff --git a/token/core/common/benchmark/flags_test.go b/token/services/benchmark/flags_test.go similarity index 100% rename from token/core/common/benchmark/flags_test.go rename to token/services/benchmark/flags_test.go diff --git a/token/services/benchmark/runner.go b/token/services/benchmark/runner.go new file mode 100644 index 0000000000..c9114c54ab --- /dev/null +++ b/token/services/benchmark/runner.go @@ -0,0 +1,813 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package benchmark + +import ( + "context" + "fmt" + "math" + "os" + "runtime" + "slices" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +// Config controls the benchmark execution. +type Config struct { + Workers int // Number of concurrent goroutines + Duration time.Duration // Time to record execution + WarmupDuration time.Duration // Time to run before recording + RateLimit float64 // Total Ops/Sec limit (0 = Unlimited/Closed-Loop) +} + +func NewConfig(workers int, duration time.Duration, warmupDuration time.Duration) Config { + return Config{Workers: workers, Duration: duration, WarmupDuration: warmupDuration} +} + +// Result holds the comprehensive benchmark metrics. +type Result struct { + Config Config + GoRoutines int // Workers (kept for compatibility) + + // Throughput + OpsTotal uint64 + Duration time.Duration + OpsPerSecReal float64 // Wall-clock throughput + OpsPerSecPure float64 // Theoretical concurrency / avg_latency + + // Latency Stats + AvgLatency time.Duration + StdDevLatency time.Duration + Variance float64 // Variance in nanoseconds^2 + P50Latency time.Duration // Median + P75Latency time.Duration + P95Latency time.Duration + P99Latency time.Duration + P999Latency time.Duration // 99.9th percentile + P9999Latency time.Duration // 99.99th percentile + MinLatency time.Duration + MaxLatency time.Duration + IQR time.Duration // Interquartile Range (P75 - P25) + Jitter time.Duration // Avg change between consecutive latencies + CoeffVar float64 // Coefficient of Variation (StdDev / Mean) + + // Stability & Time Series + Timeline []TimePoint + + // Memory & GC Stats + BytesPerOp uint64 + AllocsPerOp uint64 + AllocRateMBPS float64 // Allocations in MB per second + NumGC uint32 // Number of GC cycles during the recorded phase + GCPauseTotal time.Duration // Total time the world was stopped for GC + GCOverhead float64 // Percentage of time spent in GC + + // Reliability + ErrorCount uint64 + ErrorRate float64 + Histogram []Bucket +} + +// TimePoint captures system state at a specific moment. +type TimePoint struct { + Timestamp time.Duration + OpsSec float64 + ActiveCount int +} + +// Bucket represents a latency range and its frequency. +type Bucket struct { + LowBound time.Duration + HighBound time.Duration + Count int +} + +// chunk holds a fixed-size batch of latencies. +const chunkSize = 10000 + +type chunk struct { + data [chunkSize]time.Duration + next *chunk + idx int +} + +// workerStats aggregates results from a single worker. +type workerStats struct { + head *chunk + errors uint64 +} + +// ANSI Color Codes for output. +const ( + ColorReset = "\033[0m" + ColorRed = "\033[31m" + ColorGreen = "\033[32m" + ColorYellow = "\033[33m" + ColorBlue = "\033[34m" + ColorCyan = "\033[36m" +) + +// RunBenchmark executes the benchmark. +func RunBenchmark[T any]( + cfg Config, + setup func() T, + work func(T) error, +) Result { + // Sanity defaults + if cfg.Workers <= 0 { + cfg.Workers = 1 + } + if cfg.Duration <= 0 { + cfg.Duration = 1 * time.Second + } + + // --------------------------------------------------------- + // PHASE 1: Memory Analysis (Serial & Isolated) + // --------------------------------------------------------- + memBytes, memAllocs := measureMemory(setup, work) + + // --------------------------------------------------------- + // PHASE 2: Throughput & Latency (Concurrent) + // --------------------------------------------------------- + runtime.GC() + time.Sleep(50 * time.Millisecond) + + var ( + running atomic.Bool + recording atomic.Bool + opsCounter atomic.Uint64 + startWg sync.WaitGroup + endWg sync.WaitGroup + startGlobal time.Time + ) + + running.Store(true) + recording.Store(false) + workerResults := make([]workerStats, cfg.Workers) + + // Rate Limiter Calculation + var intervalPerOp time.Duration + if cfg.RateLimit > 0 { + ratePerWorker := cfg.RateLimit / float64(cfg.Workers) + if ratePerWorker > 0 { + intervalPerOp = time.Duration(float64(time.Second) / ratePerWorker) + } + } + + startWg.Add(cfg.Workers) + endWg.Add(cfg.Workers) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for i := 0; i < cfg.Workers; i++ { + workerID := i + go func() { + defer endWg.Done() + + currentChunk := &chunk{} + headChunk := currentChunk + var localErrors uint64 + var nextTick time.Time + + if intervalPerOp > 0 { + nextTick = time.Now() + } + + // This acts as a "starting gun" (Barrier pattern). + // It ensures that all worker goroutines are spawned, initialized, and ready to go before any of them begin execution. + startWg.Done() + startWg.Wait() + + d := setup() + + for running.Load() { + // Open-Loop Throttling + if intervalPerOp > 0 { + now := time.Now() + if now.Before(nextTick) { + time.Sleep(nextTick.Sub(now)) + } + nextTick = nextTick.Add(intervalPerOp) + if time.Since(nextTick) > intervalPerOp*10 { + nextTick = time.Now() + } + } + + tStart := time.Now() + err := work(d) + dur := time.Since(tStart) + + if recording.Load() { + // STRICT CHECK: Ensure op started AFTER recording began + if tStart.After(startGlobal) { + opsCounter.Add(1) + if err != nil { + localErrors++ + } + + if currentChunk.idx >= chunkSize { + newC := &chunk{} + currentChunk.next = newC + currentChunk = newC + } + currentChunk.data[currentChunk.idx] = dur + currentChunk.idx++ + } + } + } + workerResults[workerID] = workerStats{head: headChunk, errors: localErrors} + }() + } + + startWg.Wait() + + if cfg.WarmupDuration > 0 { + time.Sleep(cfg.WarmupDuration) + } + + var memBefore, memAfter runtime.MemStats + runtime.ReadMemStats(&memBefore) + + startGlobal = time.Now() + recording.Store(true) + + // Timeline Monitor + timeline := make([]TimePoint, 0, int(cfg.Duration.Seconds())+1) + + var monitorWg sync.WaitGroup + monitorWg.Add(1) + + go func() { + defer monitorWg.Done() + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + var prevOps uint64 + startTime := time.Now() + for { + select { + case <-ctx.Done(): + return + case t := <-ticker.C: + if !running.Load() { + return + } + currOps := opsCounter.Load() + delta := currOps - prevOps + prevOps = currOps + pt := TimePoint{Timestamp: t.Sub(startTime), OpsSec: float64(delta)} + timeline = append(timeline, pt) + } + } + }() + + time.Sleep(cfg.Duration) + + running.Store(false) + endWg.Wait() + cancel() + + globalDuration := time.Since(startGlobal) + monitorWg.Wait() // BLOCK here until monitor goroutine returns + + runtime.ReadMemStats(&memAfter) + + return analyzeResults(cfg, workerResults, memBytes, memAllocs, memBefore, memAfter, globalDuration, timeline) +} + +func measureMemory[T any](setup func() T, work func(T) error) (bytes, allocs uint64) { + var totalAllocs, totalBytes uint64 + const samples = 5 + data := setup() + + for i := 0; i < samples; i++ { + runtime.GC() + time.Sleep(10 * time.Millisecond) + var m1, m2 runtime.MemStats + runtime.ReadMemStats(&m1) + _ = work(data) + runtime.ReadMemStats(&m2) + totalAllocs += m2.Mallocs - m1.Mallocs + totalBytes += m2.TotalAlloc - m1.TotalAlloc + } + return totalBytes / samples, totalAllocs / samples +} + +func analyzeResults( + cfg Config, + workers []workerStats, + memBytes, memAllocs uint64, + mStart, mEnd runtime.MemStats, + duration time.Duration, + timeline []TimePoint, +) Result { + var totalOps uint64 + var totalErrors uint64 + var totalTimeNs int64 + + estimatedOps := uint64(len(workers)) * uint64(chunkSize) * 2 + allLatencies := make([]time.Duration, 0, estimatedOps) + + var totalJitter float64 + var jitterSamples uint64 + + for _, w := range workers { + totalErrors += w.errors + curr := w.head + var prevLat time.Duration + first := true + + for curr != nil { + limit := curr.idx + totalOps += uint64(limit) + for k := 0; k < limit; k++ { + lat := curr.data[k] + if lat == 0 { + continue + } + + totalTimeNs += int64(lat) + allLatencies = append(allLatencies, lat) + + if !first { + diff := float64(lat - prevLat) + if diff < 0 { + diff = -diff + } + totalJitter += diff + jitterSamples++ + } + prevLat = lat + first = false + } + curr = curr.next + } + } + + if totalOps == 0 { + return Result{Config: cfg, ErrorRate: 100.0} + } + + opsPerSecReal := float64(totalOps) / duration.Seconds() + avgLatency := time.Duration(totalTimeNs / int64(totalOps)) + opsPerSecPure := 0.0 + if avgLatency > 0 { + opsPerSecPure = float64(cfg.Workers) / avgLatency.Seconds() + } + + // Optimization: Use slices.Sort + slices.Sort(allLatencies) + + // Percentiles + p25 := percentile(allLatencies, 0.25) + p50 := percentile(allLatencies, 0.50) + p75 := percentile(allLatencies, 0.75) + p95 := percentile(allLatencies, 0.95) + p99 := percentile(allLatencies, 0.99) + p999 := percentile(allLatencies, 0.999) + p9999 := percentile(allLatencies, 0.9999) + minLat := allLatencies[0] + maxLat := allLatencies[len(allLatencies)-1] + + // Stats + iqr := p75 - p25 + + jitter := time.Duration(0) + if jitterSamples > 0 { + jitter = time.Duration(totalJitter / float64(jitterSamples)) + } + + meanNs := float64(avgLatency.Nanoseconds()) + var sumSqDiff float64 + for _, lat := range allLatencies { + diff := float64(lat.Nanoseconds()) - meanNs + sumSqDiff += diff * diff + } + variance := sumSqDiff / float64(len(allLatencies)) + stdDev := time.Duration(math.Sqrt(variance)) + + coeffVar := 0.0 + if avgLatency > 0 { + coeffVar = float64(stdDev) / float64(avgLatency) + } + + // GC Stats + numGC := mEnd.NumGC - mStart.NumGC + pauseNs := mEnd.PauseTotalNs - mStart.PauseTotalNs + gcOverhead := (float64(pauseNs) / float64(duration.Nanoseconds())) * 100 + allocRate := (float64(mEnd.TotalAlloc-mStart.TotalAlloc) / 1024 / 1024) / duration.Seconds() + + return Result{ + Config: cfg, + GoRoutines: cfg.Workers, + OpsTotal: totalOps, + Duration: duration, + OpsPerSecReal: opsPerSecReal, + OpsPerSecPure: opsPerSecPure, + AvgLatency: avgLatency, + StdDevLatency: stdDev, + Variance: variance, + P50Latency: p50, + P75Latency: p75, + P95Latency: p95, + P99Latency: p99, + P999Latency: p999, + P9999Latency: p9999, + MinLatency: minLat, + MaxLatency: maxLat, + IQR: iqr, + Jitter: jitter, + CoeffVar: coeffVar, + BytesPerOp: memBytes, + AllocsPerOp: memAllocs, + AllocRateMBPS: allocRate, + NumGC: numGC, + GCPauseTotal: time.Duration(pauseNs), + GCOverhead: gcOverhead, + ErrorCount: totalErrors, + ErrorRate: (float64(totalErrors) / float64(totalOps)) * 100, + Histogram: calcHistogramImproved(allLatencies, minLat, maxLat, 20), + Timeline: timeline, + } +} + +// ----------------------------------------------------------------------------- +// OUTPUT FORMATTING (Restored Original Logic) +// ----------------------------------------------------------------------------- + +func (r Result) Print() { + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + + // Print Header info + if r.Config.RateLimit > 0 { + writef(w, "%s[Running in Open-Loop Mode (Limit: %.0f/s)]%s\n", ColorCyan, r.Config.RateLimit, ColorReset) + } + + cvPct, tailRatio := r.printMainMetrics(w) + r.printSystemHealth(w) + r.printHeatmap(w) + r.printAnalysis(w, cvPct, tailRatio) + + // Append the new Sparkline (Timeline) at the very bottom + if len(r.Timeline) > 1 { + writeLine(w, "") + writeLine(w, ColorBlue+"--- Throughput Timeline ---"+ColorReset) + printSparkline(w, r.Timeline) + writeLine(w, "") + } + + if err := w.Flush(); err != nil { + _, _ = fmt.Fprintln(os.Stderr, "benchmark: flush error:", err) + } +} + +// printMainMetrics prints the main metrics, latency distribution and stability +func (r Result) printMainMetrics(w *tabwriter.Writer) (cvPct float64, tailRatio float64) { + // Helper for coloring status. + status := func(condition bool, goodMsg, badMsg string) string { + if condition { + return ColorGreen + goodMsg + ColorReset + } + return ColorRed + badMsg + ColorReset + } + + writeLine(w, "Metric\tValue\tDescription") + writeLine(w, "------\t-----\t-----------") + writef(w, "Workers\t%d\t\n", r.GoRoutines) + writef(w, "Total Ops\t%d\t%s\n", + r.OpsTotal, + status(r.OpsTotal > 5000, "(Robust Sample)", "(Low Sample Size)"), + ) + writef(w, "Duration\t%v\t%s\n", + r.Duration.Round(time.Millisecond), + status(r.Duration > 1*time.Second, "(Good Duration)", "(Too Short < 1s)"), + ) + writef(w, "Real Throughput\t%.2f/s\tObserved Ops/sec (Wall Clock)\n", r.OpsPerSecReal) + + // Overhead Check. + overheadPct := 0.0 + if r.OpsPerSecPure > 0 && r.OpsPerSecReal > 0 { + overheadPct = (1.0 - (r.OpsPerSecReal / r.OpsPerSecPure)) * 100 + } + overheadStatus := "(Low Overhead)" + if overheadPct > 15.0 { + overheadStatus = ColorYellow + fmt.Sprintf("(High Setup Cost: %.1f%%)", overheadPct) + ColorReset + } + writef(w, "Pure Throughput\t%.2f/s\tTheoretical Max %s\n", r.OpsPerSecPure, overheadStatus) + + writeLine(w, "") + writeLine(w, "Latency Distribution:") + writef(w, " Min\t%v\t\n", r.MinLatency) + writef(w, " P50 (Median)\t%v\t\n", r.P50Latency) + writef(w, " Average\t%v\t\n", r.AvgLatency) + writef(w, " P95\t%v\t\n", r.P95Latency) + writef(w, " P99\t%v\t\n", r.P99Latency) + + // Add new high-precision metrics seamlessly + writef(w, " P99.9\t%v\t\n", r.P999Latency) + + // Tail Latency Check. + tailRatio = 0.0 + if r.P99Latency > 0 { + tailRatio = float64(r.MaxLatency) / float64(r.P99Latency) + } + maxStatus := ColorGreen + "(Stable Tail)" + ColorReset + if tailRatio > 10.0 { + maxStatus = ColorRed + fmt.Sprintf("(Extreme Outliers: Max is %.1fx P99)", tailRatio) + ColorReset + } + writef(w, " Max\t%v\t%s\n", r.MaxLatency, maxStatus) + + writeLine(w, "") + writeLine(w, "Stability Metrics:") + writef(w, " Std Dev\t%v\t\n", r.StdDevLatency) + writef(w, " IQR\t%v\tInterquartile Range\n", r.IQR) + writef(w, " Jitter\t%v\tAvg delta per worker\n", r.Jitter) + + // CV Check. + cvPct = r.CoeffVar * 100 + cvStatus := ColorGreen + "Excellent Stability (<5%)" + ColorReset + if cvPct > 20.0 { + cvStatus = ColorRed + "Unstable (>20%) - Result is Noisy" + ColorReset + } else if cvPct > 10.0 { + cvStatus = ColorYellow + "Moderate Variance (10-20%)" + ColorReset + } else if cvPct > 5.0 { + cvStatus = "(Acceptable 5-10%)" + } + writef(w, " CV\t%.2f%%\t%s\n", cvPct, cvStatus) + writeLine(w, "") + + return cvPct, tailRatio +} + +// printSystemHealth renders GC, Memory and Error statistics +func (r Result) printSystemHealth(w *tabwriter.Writer) { + writeLine(w, "System Health & Reliability:") + + // 1. Error Rate + errStatus := ColorGreen + "(100% Success)" + ColorReset + if r.ErrorRate > 0 { + errStatus = ColorRed + fmt.Sprintf("(%.2f%% Failures)", r.ErrorRate) + ColorReset + } + writef(w, " Error Rate\t%.4f%%\t%s (%d errors)\n", r.ErrorRate, errStatus, r.ErrorCount) + + // 2. Memory Allocations + writef(w, " Memory\t%d B/op\tAllocated bytes per operation\n", r.BytesPerOp) + writef(w, " Allocs\t%d allocs/op\tAllocations per operation\n", r.AllocsPerOp) + writef(w, " Alloc Rate\t%.2f MB/s\tMemory pressure on system\n", r.AllocRateMBPS) + + // 3. GC Analysis + gcStatus := ColorGreen + "(Healthy)" + ColorReset + if r.GCOverhead > 5.0 { + gcStatus = ColorRed + "(Severe GC Thrashing)" + ColorReset + } else if r.GCOverhead > 1.0 { + gcStatus = ColorYellow + "(High GC Pressure)" + ColorReset + } + writef(w, " GC Overhead\t%.2f%%\t%s\n", r.GCOverhead, gcStatus) + writef(w, " GC Pause\t%v\tTotal Stop-The-World time\n", r.GCPauseTotal) + writef(w, " GC Cycles\t%d\tFull garbage collection cycles\n", r.NumGC) + writeLine(w, "") +} + +// printHeatmap renders the histogram heatmap. +func (r Result) printHeatmap(w *tabwriter.Writer) { + writeLine(w, "Latency Heatmap (Dynamic Range):") + writeLine(w, "Range\tFreq\tDistribution Graph") + + maxCount := 0 + for _, b := range r.Histogram { + if b.Count > maxCount { + maxCount = b.Count + } + } + + for _, b := range r.Histogram { + if b.Count == 0 { + continue + } + + // 1. Draw Bar + barLen := 0 + if maxCount > 0 { + barLen = (b.Count * 40) / maxCount + } + + ratio := 0.0 + if maxCount > 0 { + ratio = float64(b.Count) / float64(maxCount) + } + + // Heat Color Logic (RESTORED) + color := ColorBlue + if ratio > 0.75 { + color = ColorRed + } else if ratio > 0.3 { + color = ColorYellow + } else if ratio > 0.1 { + color = ColorGreen + } + + bar := "" + for i := 0; i < barLen; i++ { + bar += "█" + } + + // 2. Format Label + label := fmt.Sprintf("%v-%v", b.LowBound, b.HighBound) + if b.HighBound-b.LowBound < time.Microsecond { + label = fmt.Sprintf("%dns-%dns", b.LowBound.Nanoseconds(), b.HighBound.Nanoseconds()) + } + + percentage := 0.0 + if r.OpsTotal > 0 { + percentage = (float64(b.Count) / float64(r.OpsTotal)) * 100 + } + + writef(w, " %s\t%d\t%s%s %s(%.1f%%)\n", + label, b.Count, color, bar, ColorReset, percentage, + ) + } +} + +// printAnalysis prints the analysis and recommendations section. +func (r Result) printAnalysis(w *tabwriter.Writer, cvPct float64, tailRatio float64) { + writeLine(w, "") + writeLine(w, ColorBlue+"--- Analysis & Recommendations ---"+ColorReset) + + // 1. Sample Size Check + if r.OpsTotal < 5000 { + writef(w, "%s[WARN] Low sample size (%d). Results may not be statistically significant. Run for longer.%s\n", + ColorRed, r.OpsTotal, ColorReset) + } + + // 2. Duration Check + if r.Duration < 1*time.Second { + writef(w, "%s[WARN] Test ran for less than 1s. Go runtime/scheduler might not have stabilized.%s\n", + ColorYellow, ColorReset) + } + + // 3. Variance Check + if cvPct > 20.0 { + writef(w, "%s[FAIL] High Variance (CV %.2f%%). System noise is affecting results. Isolate the machine or increase duration.%s\n", + ColorRed, cvPct, ColorReset) + } + + // 4. Memory Check + if r.AllocsPerOp > 100 { + writef(w, "%s[INFO] High Allocations (%d/op). This will trigger frequent GC cycles and increase Max Latency.%s\n", + ColorYellow, r.AllocsPerOp, ColorReset) + } + + // 5. Outlier Check + if tailRatio > 20.0 { + writef(w, "%s[CRITICAL] Massive Latency Spikes Detected. Max is %.0fx higher than P99. Check for Stop-The-World GC or Lock Contention.%s\n", + ColorRed, tailRatio, ColorReset) + } + + // 6. Error Check + if r.ErrorRate > 1.0 { + writef(w, "%s[FAIL] High Error Rate (%.2f%%). System is failing under load.%s\n", ColorRed, r.ErrorRate, ColorReset) + } + + if cvPct < 10.0 && r.OpsTotal > 10000 && tailRatio < 10.0 && r.ErrorRate == 0 { + writef(w, "%s[PASS] RunBenchmark looks healthy and statistically sound.%s\n", ColorGreen, ColorReset) + } + writeLine(w, "----------------------------------") +} + +// --- HELPER FUNCTIONS --- + +func writef(w *tabwriter.Writer, format string, a ...interface{}) { + _, _ = fmt.Fprintf(w, format, a...) +} + +func writeLine(w *tabwriter.Writer, s string) { + _, _ = fmt.Fprintln(w, s) +} + +func percentile(sorted []time.Duration, p float64) time.Duration { + if len(sorted) == 0 { + return 0 + } + if p <= 0 { + return sorted[0] + } + if p >= 1 { + return sorted[len(sorted)-1] + } + + pos := p * float64(len(sorted)-1) + lower := int(math.Floor(pos)) + upper := int(math.Ceil(pos)) + + if lower == upper { + return sorted[lower] + } + + fraction := pos - float64(lower) + valLower := float64(sorted[lower]) + valUpper := float64(sorted[upper]) + return time.Duration(valLower + fraction*(valUpper-valLower)) +} + +func calcHistogramImproved(latencies []time.Duration, min, max time.Duration, buckets int) []Bucket { + if len(latencies) == 0 { + return nil + } + if min <= 0 { + min = 1 + } + if max < min { + max = min + } + + res := make([]Bucket, buckets) + + // Geometric series: min * factor^N = max + factor := math.Pow(float64(max)/float64(min), 1.0/float64(buckets)) + + // Avoid degenerate case + if factor <= 1.0 { + factor = 1.00001 + } + + currLower := float64(min) + for i := 0; i < buckets; i++ { + currUpper := currLower * factor + if i == buckets-1 { + currUpper = float64(max) + } + res[i] = Bucket{ + LowBound: time.Duration(currLower), + HighBound: time.Duration(currUpper), + } + currLower = currUpper + } + + logMin := math.Log(float64(min)) + logFactor := math.Log(factor) + + for _, lat := range latencies { + val := float64(lat) + if val < float64(min) { + res[0].Count++ + continue + } + + idx := int((math.Log(val) - logMin) / logFactor) + if idx < 0 { + idx = 0 + } + if idx >= buckets { + idx = buckets - 1 + } + res[idx].Count++ + } + return res +} + +func printSparkline(w *tabwriter.Writer, timeline []TimePoint) { + if len(timeline) == 0 { + return + } + + maxOps := 0.0 + for _, p := range timeline { + if p.OpsSec > maxOps { + maxOps = p.OpsSec + } + } + + blocks := []string{" ", "▂", "▃", "▄", "▅", "▆", "▇", "█"} + fmt.Print("Timeline: [") + for _, p := range timeline { + if maxOps == 0 { + writef(w, " ") + continue + } + ratio := p.OpsSec / maxOps + idx := int(ratio * float64(len(blocks)-1)) + if idx < 0 { + idx = 0 + } + if idx >= len(blocks) { + idx = len(blocks) - 1 + } + + color := ColorGreen + if ratio < 0.5 { + color = ColorYellow + } + if ratio < 0.2 { + color = ColorRed + } + writef(w, "%s%s%s", color, blocks[idx], ColorReset) + } + writef(w, "] (Max: %.0f ops/s)\n", maxOps) +} diff --git a/token/core/common/benchmark/runner.md b/token/services/benchmark/runner.md similarity index 67% rename from token/core/common/benchmark/runner.md rename to token/services/benchmark/runner.md index 23f2aeb18f..d3edca3212 100644 --- a/token/core/common/benchmark/runner.md +++ b/token/services/benchmark/runner.md @@ -11,50 +11,75 @@ It orchestrates the execution of this work across multiple goroutines, captures ## Key Features * **Concurrent Execution**: Runs the benchmark across a user-defined number of workers (goroutines). +* **Open-Loop & Closed-Loop Modes**: + * **Closed-Loop**: Workers run as fast as possible (standard benchmark). + * **Open-Loop**: Workers are rate-limited to simulate real-world arrival rates (fixes "Coordinated Omission" biases). +* **Time Series Visualization**: Generates a "Sparkline" graph of throughput over time to detect performance degradation or cold starts. * **Two-Phase Measurement**: * **Phase 1 (Memory)**: Runs serially in isolation to accurately measure heap allocations per operation without concurrency noise. * **Phase 2 (Throughput/Latency)**: Runs concurrently to measure real-world throughput and latency distribution. * **Low-Overhead Recording**: Uses pre-allocated memory chunks to record latency data, ensuring the measuring process itself doesn't trigger Garbage Collection (GC) or skew results. -* **Statistical Rigor**: Calculates advanced metrics like Interquartile Range (IQR), Jitter, Coefficient of Variation (CV), and interpolated percentiles. +* **High-Precision Statistics**: Calculates **P99.9**, **P99.99**, IQR, Jitter, and Coefficient of Variation (CV) using exact data (no sampling approximations). * **Visual Output**: Generates an ASCII-based latency heatmap and color-coded status indicators directly in the terminal. * **Automated Analysis**: Provides "Analysis & Recommendations" at the end of the run, flagging issues like high variance, GC pressure, or unstable tail latencies. ## Usage -The core entry point is the generic `RunBenchmark` function. +The core entry point is the generic `RunBenchmark` function, which accepts a `Config` object to control execution parameters. ### Function Signature -```go +``` +// Config controls the benchmark execution. +type Config struct { +Workers int // Number of concurrent goroutines +Duration time.Duration // Time to record execution +WarmupDuration time.Duration // Time to run before recording +RateLimit float64 // Total Ops/Sec limit (0 = Unlimited/Closed-Loop) +} + +// RunBenchmark executes the benchmark. +// T is the type of data created by setup() and passed to work(). func RunBenchmark[T any]( - workers int, // Number of concurrent goroutines - benchDuration time.Duration, // How long to run the test - setup func() T, // Function to prepare data for each op - work func(T), // The function to benchmark +cfg Config, // Configuration object +setup func() T, // Function to prepare data for each op +work func(T) error, // The function to benchmark ) Result ``` ### Example -```go +``` package main import ( - "time" - "your/package/benchmark" // Import the runner +"fmt" +"time" +"your/package/benchmark" // Import the runner ) func main() { - // Define the benchmark +// Define the configuration +cfg := benchmark.Config{ +Workers: 10, // 10 concurrent workers +Duration: 5*time.Second, // Run for 5 seconds +WarmupDuration: 1*time.Second, // Warmup to stabilize pools/JIT +RateLimit: 0, // 0 = Full Speed (Closed-Loop) +} + + // Run the benchmark result := benchmark.RunBenchmark( - 10, // 10 concurrent workers - 5*time.Second, // Run for 5 seconds + cfg, func() int { // Setup: Prepare data (not timed) + // Example: Create a payload or connection return 42 }, - func(input int) { // Work: The operation to measure (timed) + func(input int) error { // Work: The operation to measure (timed) // Simulate work - process(input) + if process(input) != nil { + return fmt.Errorf("failed") + } + return nil }, ) @@ -77,6 +102,7 @@ Basic throughput and volume statistics. ### 2. Latency Distribution A detailed look at how long operations took. * **P50, P95, P99**: Standard percentiles. +* **P99.9, P99.99**: Critical tail latency metrics for SLA verification. * **Tail Latency Check**: Compares Max Latency to P99 to identify extreme outliers (e.g., "Max is 12x P99"). ### 3. Stability Metrics @@ -86,21 +112,29 @@ Measures how consistent the system is. * **Jitter**: The average change in latency between consecutive operations on the same worker. * **CV (Coefficient of Variation)**: `StdDev / Mean`. Used to grade stability (e.g., <5% is "Excellent"). -### 4. Memory -* **Allocated**: Bytes allocated per operation. -* **Allocs**: Number of heap allocations per operation. +### 4. Memory & GC +* **Memory**: Bytes allocated per operation (Phase 1). +* **Allocs**: Number of heap allocations per operation (Phase 1). +* **Alloc Rate**: Total memory pressure on the system in MB/s. +* **GC Overhead**: Percentage of wall-clock time lost to Stop-The-World GC pauses. ### 5. Latency Heatmap An ASCII bar chart visualizing the distribution of latencies. It uses color coding (Green/Yellow/Red) to indicate frequency density. -```text +``` Range Freq Distribution Graph 100µs-200µs 500 ██████ (5.0%) 200µs-400µs 8000 ██████████████████████ (80.0%) ... ``` -### 6. Analysis & Recommendations +### 6. Throughput Timeline (Sparkline) +A condensed graph showing performance over time (1-second buckets). Helps identify degradation or cold starts. +``` +Timeline: [ ▅▇██▆▄ ] (Max: 5000 ops/s) +``` + +### 7. Analysis & Recommendations The runner automatically evaluates the results and prints warnings or pass/fail statuses: * **[WARN] Low sample size**: If total operations < 5000. * **[FAIL] High Variance**: If the Coefficient of Variation > 20%. @@ -131,16 +165,4 @@ The visualization is hardcoded to 20 buckets using an exponential scale. ### 5. Stop-Time Latency The runner signals workers to stop using an atomic flag, but it waits for the current operation to finish. -* **Constraint**: If a single operation hangs or takes minutes, the benchmark cannot stop immediately when the duration expires. It must wait for stragglers to complete. - -## Implementation Details - -### Zero-Allocation Recording -To prevent the benchmark from measuring itself, the runner uses a linked-list of fixed-size arrays (`chunk`). -* **Structure**: `type chunk struct { data [10000]time.Duration; ... }` -* **Benefit**: Recording a latency requires only a simple array index increment. No `append()` or slice growth occurs during the "hot path" of the benchmark. - -### Histogram Calculation -It uses an **Exponential Histogram** (`calcExponentialHistogramImproved`) logic. -* Buckets grow exponentially, allowing the runner to capture both very small (nanosecond) and very large (second) latencies in the same graph with high fidelity. -* It handles boundary conditions precisely to avoid floating-point drift. \ No newline at end of file +* **Constraint**: If a single operation hangs or takes minutes, the benchmark cannot stop immediately when the duration expires. It must wait for stragglers to complete. \ No newline at end of file diff --git a/token/core/common/benchmark/runner_test.go b/token/services/benchmark/runner_test.go similarity index 64% rename from token/core/common/benchmark/runner_test.go rename to token/services/benchmark/runner_test.go index abd41b7b06..2fa11e50d5 100644 --- a/token/core/common/benchmark/runner_test.go +++ b/token/services/benchmark/runner_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/hyperledger-labs/fabric-token-sdk/token/core/common/benchmark" + "github.com/hyperledger-labs/fabric-token-sdk/token/services/benchmark" ) func TestRunBenchmark(t *testing.T) { @@ -23,13 +23,14 @@ func TestRunBenchmark(t *testing.T) { } // 2. Define Work (The target of measurement) - work := func(data []byte) { + work := func(data []byte) error { // Simulate processing time.Sleep(500 * time.Microsecond) _ = len(data) + return nil } - fmt.Println("Running Benchmark...") - res := benchmark.RunBenchmark(8, 2*time.Second, setup, work) // 8 workers, 2 seconds + fmt.Println("Running RunBenchmark...") + res := benchmark.RunBenchmark(benchmark.NewConfig(8, 2*time.Second, 5*time.Second), setup, work) // 8 workers, 2 seconds res.Print() } diff --git a/token/services/benchmark/test.go b/token/services/benchmark/test.go new file mode 100644 index 0000000000..15b5a41dd9 --- /dev/null +++ b/token/services/benchmark/test.go @@ -0,0 +1,153 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package benchmark + +import ( + "math/rand" + "runtime" + "testing" + "time" + + math "github.com/IBM/mathlib" + fscprofile "github.com/hyperledger-labs/fabric-smart-client/node/start/profile" + "github.com/stretchr/testify/require" +) + +// GenerateCasesWithDefaults returns all combinations of Case created from the value of the flags: +// bits, curves, num_inputs, num_outputs, workers. +// It uses the following predefined values: +// - bits: 32 +// - curves: BN254 +// - num_inputs: 2 +// - num_outputs: 2 +// - workers: NumCPU +func GenerateCasesWithDefaults(tb testing.TB) ([]uint64, []math.CurveID, []TestCase) { + tb.Helper() + bits, err := Bits(32) + require.NoError(tb, err) + curves := Curves(math.BN254) + inputs, err := NumInputs(2) + require.NoError(tb, err) + outputs, err := NumOutputs(2) + require.NoError(tb, err) + workers, err := Workers(runtime.NumCPU()) + require.NoError(tb, err) + return bits, curves, GenerateCases(bits, curves, inputs, outputs, workers) +} + +type Test[T any] struct { + TestCases []TestCase +} + +func NewTest[T any](testCases []TestCase) *Test[T] { + return &Test[T]{TestCases: testCases} +} + +func (test *Test[T]) GoBenchmark(b *testing.B, newEnv func(*Case) (T, error), work func(env T) error) { + b.Helper() + if ProfileEnabled() { + p, err := fscprofile.New(fscprofile.WithAll(), fscprofile.WithPath("./profile")) + require.NoError(b, err) + require.NoError(b, p.Start()) + defer p.Stop() + } + + for _, tc := range test.TestCases { + b.Run(tc.Name, func(b *testing.B) { + n := SetupSamples() + envs := make([]T, 0, n) + if n == 0 { + n = uint(b.N) + } + if n > 0 { + for range n { + e, err := newEnv(tc.BenchmarkCase) + require.NoError(b, err) + envs = append(envs, e) + } + } + + for b.Loop() { + require.NoError(b, work(envs[rand.Intn(int(n))])) + } + }) + } +} + +func (test *Test[T]) GoBenchmarkParallel(b *testing.B, newEnv func(*Case) (T, error), work func(env T) error) { + b.Helper() + if ProfileEnabled() { + p, err := fscprofile.New(fscprofile.WithAll(), fscprofile.WithPath("./profile")) + require.NoError(b, err) + require.NoError(b, p.Start()) + defer p.Stop() + } + + for _, tc := range test.TestCases { + n := SetupSamples() + envs := make([]T, 0, n) + if n == 0 { + n = uint(b.N) + } + if n > 0 { + for range n { + e, err := newEnv(tc.BenchmarkCase) + require.NoError(b, err) + envs = append(envs, e) + } + } + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + require.NoError(b, work(envs[rand.Intn(int(n))])) + } + }) + } +} + +func (test *Test[T]) RunBenchmark(t *testing.T, newEnv func(*Case) (T, error), work func(env T) error) { + t.Helper() + if ProfileEnabled() { + p, err := fscprofile.New(fscprofile.WithAll(), fscprofile.WithPath("./profile")) + require.NoError(t, err) + require.NoError(t, p.Start()) + defer p.Stop() + } + + for _, tc := range test.TestCases { + t.Run(tc.Name, func(t *testing.T) { + n := SetupSamples() + envs := make([]T, 0, n) + if n > 0 { + for range n { + e, err := newEnv(tc.BenchmarkCase) + require.NoError(t, err) + envs = append(envs, e) + } + } + + r := RunBenchmark( + NewConfig( + tc.BenchmarkCase.Workers, + Duration(), + 3*time.Second), + func() T { + if n > 0 { + return envs[rand.Intn(int(n))] + } + e, err := newEnv(tc.BenchmarkCase) + require.NoError(t, err) + return e + }, + func(env T) error { + return work(env) + }, + ) + r.Print() + }) + } +} diff --git a/token/core/common/benchmark/utils.go b/token/services/benchmark/utils.go similarity index 100% rename from token/core/common/benchmark/utils.go rename to token/services/benchmark/utils.go diff --git a/token/services/identity/idemix/km_bench_test.go b/token/services/identity/idemix/km_bench_test.go index faa5ea3322..1344e5ed88 100644 --- a/token/services/identity/idemix/km_bench_test.go +++ b/token/services/identity/idemix/km_bench_test.go @@ -7,11 +7,14 @@ SPDX-License-Identifier: Apache-2.0 package idemix import ( + "math/rand" "runtime" "testing" + "time" math "github.com/IBM/mathlib" - "github.com/hyperledger-labs/fabric-token-sdk/token/core/common/benchmark" + "github.com/hyperledger-labs/fabric-token-sdk/token/driver" + benchmark2 "github.com/hyperledger-labs/fabric-token-sdk/token/services/benchmark" "github.com/stretchr/testify/require" ) @@ -52,17 +55,116 @@ func TestParallelBenchmarkIdemixKMIdentity(t *testing.T) { keyManager, cleanup := setupKeyManager(t, "./testdata/bls12_381_bbs_gurvy/idemix", math.BLS12_381_BBS_GURVY) defer cleanup() - workers, err := benchmark.Workers(runtime.NumCPU()) + workers, err := benchmark2.Workers(runtime.NumCPU()) require.NoError(t, err) - r := benchmark.RunBenchmark( - workers[0], - benchmark.Duration(), + r := benchmark2.RunBenchmark( + benchmark2.NewConfig(workers[0], + benchmark2.Duration(), + 3*time.Second), func() *KeyManager { return keyManager }, - func(km *KeyManager) { - _, _ = keyManager.Identity(t.Context(), nil) + func(km *KeyManager) error { + _, err := keyManager.Identity(t.Context(), nil) + return err + }, + ) + r.Print() +} + +func TestParallelBenchmarkIdemixSign(t *testing.T) { + keyManager, cleanup := setupKeyManager(t, "./testdata/bls12_381_bbs_gurvy/idemix", math.BLS12_381_BBS_GURVY) + defer cleanup() + id, err := keyManager.Identity(t.Context(), nil) + require.NoError(t, err) + + workers, err := benchmark2.Workers(runtime.NumCPU()) + require.NoError(t, err) + + r := benchmark2.RunBenchmark( + benchmark2.NewConfig( + workers[0], + benchmark2.Duration(), + 3*time.Second, + ), + func() driver.Signer { + return id.Signer + }, + func(s driver.Signer) error { + _, err := s.Sign([]byte("hello world")) + return err + }, + ) + r.Print() +} + +func TestParallelBenchmarkIdemixVerify(t *testing.T) { + keyManager, cleanup := setupKeyManager(t, "./testdata/bls12_381_bbs_gurvy/idemix", math.BLS12_381_BBS_GURVY) + defer cleanup() + id, err := keyManager.Identity(t.Context(), nil) + require.NoError(t, err) + + workers, err := benchmark2.Workers(runtime.NumCPU()) + require.NoError(t, err) + + n := benchmark2.SetupSamples() + if n == 0 { + n = 128 + } + signatures := make([][]byte, 0, n) + for range n { + sigma, err := id.Signer.Sign([]byte("hello world")) + require.NoError(t, err) + signatures = append(signatures, sigma) + } + + r := benchmark2.RunBenchmark( + benchmark2.NewConfig( + workers[0], + benchmark2.Duration(), + 3*time.Second, + ), + func() []byte { + return signatures[rand.Intn(len(signatures))] + }, + func(s []byte) error { + return id.Verifier.Verify([]byte("hello world"), s) + }, + ) + r.Print() +} + +func TestParallelBenchmarkIdemixDeserializeSigner(t *testing.T) { + keyManager, cleanup := setupKeyManager(t, "./testdata/bls12_381_bbs_gurvy/idemix", math.BLS12_381_BBS_GURVY) + defer cleanup() + + workers, err := benchmark2.Workers(runtime.NumCPU()) + require.NoError(t, err) + + n := benchmark2.SetupSamples() + if n == 0 { + n = 128 + } + ids := make([][]byte, 0, n) + for range n { + id, err := keyManager.Identity(t.Context(), nil) + require.NoError(t, err) + ids = append(ids, id.Identity) + } + + r := benchmark2.RunBenchmark( + benchmark2.NewConfig( + workers[0], + benchmark2.Duration(), + 3*time.Second, + ), + func() []byte { + return ids[rand.Intn(len(ids))] + }, + func(s []byte) error { + _, err := keyManager.DeserializeSigner(t.Context(), s) + return err }, ) r.Print() diff --git a/token/services/identity/multisig/identity_test.go b/token/services/identity/multisig/identity_test.go index 1f8875fc0e..9d70a4de88 100644 --- a/token/services/identity/multisig/identity_test.go +++ b/token/services/identity/multisig/identity_test.go @@ -7,6 +7,7 @@ SPDX-License-Identifier: Apache-2.0 package multisig import ( + "bytes" "context" "testing" @@ -160,7 +161,7 @@ type mockMatcher struct { } func (m *mockMatcher) Match(ctx context.Context, raw []byte) error { - if string(raw) != string(m.expected) { + if !bytes.Equal(raw, m.expected) { return errors.New("mismatch") } return nil diff --git a/token/services/identity/storage/kvs/hashicorp/go.mod b/token/services/identity/storage/kvs/hashicorp/go.mod index c5f13eb0e5..f2abdae576 100644 --- a/token/services/identity/storage/kvs/hashicorp/go.mod +++ b/token/services/identity/storage/kvs/hashicorp/go.mod @@ -114,7 +114,7 @@ require ( go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.45.0 // indirect - golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect + golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 // indirect golang.org/x/net v0.47.0 // indirect golang.org/x/sync v0.18.0 // indirect golang.org/x/sys v0.38.0 // indirect diff --git a/token/services/identity/storage/kvs/hashicorp/go.sum b/token/services/identity/storage/kvs/hashicorp/go.sum index b091058458..fd19d1853e 100644 --- a/token/services/identity/storage/kvs/hashicorp/go.sum +++ b/token/services/identity/storage/kvs/hashicorp/go.sum @@ -299,14 +299,14 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 h1:DHNhtq3sNNzrvduZZIiFyXWOL9IWaDPHqTnLJp+rCBY= +golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -345,8 +345,8 @@ golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/token/services/identity/x509/crypto/ecdsa.go b/token/services/identity/x509/crypto/ecdsa.go index c090820661..141666ed63 100644 --- a/token/services/identity/x509/crypto/ecdsa.go +++ b/token/services/identity/x509/crypto/ecdsa.go @@ -9,6 +9,7 @@ package crypto import ( "crypto/ecdsa" "crypto/elliptic" + "crypto/rand" "crypto/sha256" "crypto/x509" "encoding/asn1" @@ -76,6 +77,30 @@ func (d *ecdsaVerifier) Verify(message, sigma []byte) error { return nil } +type ecdsaSigner struct { + sk *ecdsa.PrivateKey +} + +func NewEcdsaSigner(sk *ecdsa.PrivateKey) *ecdsaSigner { + return &ecdsaSigner{sk: sk} +} + +func (d *ecdsaSigner) Sign(message []byte) ([]byte, error) { + dgst := sha256.Sum256(message) + + r, s, err := ecdsa.Sign(rand.Reader, d.sk, dgst[:]) + if err != nil { + return nil, err + } + + s, _, err = ToLowS(&d.sk.PublicKey, s) + if err != nil { + return nil, err + } + + return asn1.Marshal(ECDSASignature{R: r, S: s}) +} + // IsLowS checks that s is a low-S func IsLowS(k *ecdsa.PublicKey, s *big.Int) (bool, error) { halfOrder, ok := curveHalfOrders[k.Curve] @@ -86,6 +111,23 @@ func IsLowS(k *ecdsa.PublicKey, s *big.Int) (bool, error) { return s.Cmp(halfOrder) != 1, nil } +func ToLowS(k *ecdsa.PublicKey, s *big.Int) (*big.Int, bool, error) { + lowS, err := IsLowS(k, s) + if err != nil { + return nil, false, err + } + + if !lowS { + // Set s to N - s that will be then in the lower part of signature space + // less or equal to half order + s.Sub(k.Params().N, s) + + return s, true, nil + } + + return s, false, nil +} + // PemDecodeKey takes bytes and returns a Go key func PemDecodeKey(keyBytes []byte) (interface{}, error) { block, _ := pem.Decode(keyBytes) diff --git a/token/services/utils/json/decoder_test.go b/token/services/utils/json/decoder_test.go index 36808abf05..50233e811c 100644 --- a/token/services/utils/json/decoder_test.go +++ b/token/services/utils/json/decoder_test.go @@ -51,7 +51,7 @@ func TestUnmarshalWithDisallowUnknownFields(t *testing.T) { } } -// Benchmark comparing UnmarshalWithDisallowUnknownFields to json.Unmarshal +// RunBenchmark comparing UnmarshalWithDisallowUnknownFields to json.Unmarshal func BenchmarkUnmarshal(b *testing.B) { type TestStruct struct { Name string `json:"name"`