diff --git a/apps/submitter/bin/stressTestCreateAccount.ts b/apps/submitter/bin/stressTestCreateAccount.ts new file mode 100644 index 0000000000..5f34931667 --- /dev/null +++ b/apps/submitter/bin/stressTestCreateAccount.ts @@ -0,0 +1,227 @@ +/** + * Stress test for the createAccount endpoint. + * Sends multiple concurrent requests to create accounts and measures performance. + */ + +import { BoopClient, CreateAccount } from "@happy.tech/boop-sdk" +import { type Hex, stringify } from "@happy.tech/common" +import { generatePrivateKey, privateKeyToAccount } from "viem/accounts" +import { endNonceMonitoring, startNonceMonitoring, trackCreatedAccount } from "./utils/nonceMonitor" + +// Configuration +const DEFAULT_CONFIG = { + concurrentRequests: 100, + batchSize: 10, + delayBetweenBatchesMs: 100, + submitterUrl: "http://localhost:3001", +} + +interface StressTestConfig { + concurrentRequests: number + batchSize: number + delayBetweenBatchesMs: number + submitterUrl: string +} + +interface TestResult { + success: boolean + latencyMs: number + status: string + address?: string + error?: string +} + +/** + * Run the stress test for createAccount endpoint + */ +async function runStressTest(config: StressTestConfig = DEFAULT_CONFIG): Promise { + console.log(` +=== CREATE ACCOUNT STRESS TEST === +Submitter URL: ${config.submitterUrl} +Concurrent Requests: ${config.concurrentRequests} +Batch Size: ${config.batchSize} +Delay Between Batches: ${config.delayBetweenBatchesMs}ms +=============================== +`) + + // Start monitoring executor nonces + await startNonceMonitoring() + + const boopClient = new BoopClient({ + submitterUrl: config.submitterUrl, + }) + + const results: TestResult[] = [] + const batches = Math.ceil(config.concurrentRequests / config.batchSize) + + console.log(`Executing ${batches} batches of ${config.batchSize} requests...`) + + for (let batchIndex = 0; batchIndex < batches; batchIndex++) { + const batchStart = performance.now() + const batchPromises: Promise[] = [] + + const remainingRequests = Math.min(config.batchSize, config.concurrentRequests - batchIndex * config.batchSize) + + console.log(`Batch ${batchIndex + 1}/${batches}: Sending ${remainingRequests} requests...`) + + for (let i = 0; i < remainingRequests; i++) { + batchPromises.push( + (async () => { + const eoa = privateKeyToAccount(generatePrivateKey()) + const salt = `0x${(batchIndex * config.batchSize + i).toString(16).padStart(64, "0")}` as Hex + + const start = performance.now() + try { + const result = await boopClient.createAccount({ + owner: eoa.address, + salt, + }) + + const success = + result.status === CreateAccount.Success || result.status === "createAccountAlreadyCreated" + + // Track the created account if successful + if (success && result.address) { + trackCreatedAccount(result.address) + } + + results.push({ + success, + latencyMs: Math.round(performance.now() - start), + status: result.status, + address: result.address, + }) + } catch (error) { + results.push({ + success: false, + latencyMs: Math.round(performance.now() - start), + status: "error", + error: stringify(error), + }) + } + })(), + ) + } + + await Promise.all(batchPromises) + + const batchDuration = performance.now() - batchStart + console.log(`Batch ${batchIndex + 1} completed in ${Math.round(batchDuration)}ms`) + + // Add delay between batches if not the last batch + if (batchIndex < batches - 1 && config.delayBetweenBatchesMs > 0) { + console.log(`Waiting ${config.delayBetweenBatchesMs}ms before next batch...`) + await new Promise((resolve) => setTimeout(resolve, config.delayBetweenBatchesMs)) + } + } + + // Calculate and display statistics + const successCount = results.filter((r) => r.success).length + const failureCount = results.length - successCount + const successRate = (successCount / results.length) * 100 + + const latencies = results.map((r) => r.latencyMs) + const avgLatency = latencies.reduce((sum, val) => sum + val, 0) / latencies.length + const minLatency = Math.min(...latencies) + const maxLatency = Math.max(...latencies) + + // Calculate percentiles + latencies.sort((a, b) => a - b) + const p50 = latencies[Math.floor(latencies.length * 0.5)] + const p90 = latencies[Math.floor(latencies.length * 0.9)] + const p95 = latencies[Math.floor(latencies.length * 0.95)] + const p99 = latencies[Math.floor(latencies.length * 0.99)] + + console.log("\n=== RESULTS ===") + console.log(`Total Requests: ${results.length}`) + console.log(`Success: ${successCount} (${successRate.toFixed(2)}%)`) + console.log(results.forEach((r) => console.log(r.success ? r.address : r.error))) + console.log(`Failures: ${failureCount}`) + console.log("\n=== LATENCY (ms) ===") + console.log(`Average: ${avgLatency.toFixed(2)}`) + console.log(`Min: ${minLatency}`) + console.log(`Max: ${maxLatency}`) + console.log(`P50: ${p50}`) + console.log(`P90: ${p90}`) + console.log(`P95: ${p95}`) + console.log(`P99: ${p99}`) + + // Display error distribution if there are failures + if (failureCount > 0) { + console.log("\n=== ERROR DISTRIBUTION ===") + const errorGroups = results + .filter((r) => !r.success) + .reduce( + (acc, curr) => { + const key = curr.status + if (!acc[key]) acc[key] = 0 + acc[key]++ + return acc + }, + {} as Record, + ) + + Object.entries(errorGroups).forEach(([status, count]) => { + console.log(`${status}: ${count} (${((count / failureCount) * 100).toFixed(2)}%)`) + }) + + // Show a sample of errors + console.log("\n=== SAMPLE ERRORS ===") + results + .filter((r) => !r.success) + .slice(0, 3) + .forEach((result, i) => { + console.log(`Error ${i + 1}: ${result.error || result.status}`) + }) + } + + // End nonce monitoring and show report + await endNonceMonitoring() +} + +// Parse command line arguments +function parseArgs(): StressTestConfig { + const args = process.argv.slice(2) + const config = { ...DEFAULT_CONFIG } + + for (let i = 0; i < args.length; i += 2) { + const key = args[i] + const value = args[i + 1] + + switch (key) { + case "--requests": + case "-r": + config.concurrentRequests = Number.parseInt(value, 10) + break + case "--batch-size": + case "-b": + config.batchSize = Number.parseInt(value, 10) + break + case "--delay": + case "-d": + config.delayBetweenBatchesMs = Number.parseInt(value, 10) + break + case "--url": + case "-u": + config.submitterUrl = value + break + } + } + + return config +} + +// Main execution +async function main() { + try { + const config = parseArgs() + await runStressTest(config) + } catch (error) { + console.error("Stress test failed:", error) + process.exit(1) + } + + process.exit(0) +} + +main() diff --git a/apps/submitter/bin/stressTestExecute.ts b/apps/submitter/bin/stressTestExecute.ts new file mode 100644 index 0000000000..bfda701033 --- /dev/null +++ b/apps/submitter/bin/stressTestExecute.ts @@ -0,0 +1,267 @@ +/** + * Stress test for the execute endpoint. + * Sends multiple concurrent requests to execute boops and measures performance. + */ + +import { BoopClient, CreateAccount, computeBoopHash } from "@happy.tech/boop-sdk" +import { stringify } from "@happy.tech/common" +import { generatePrivateKey, privateKeyToAccount } from "viem/accounts" +import { createAndSignMintBoop } from "#lib/utils/test/helpers" // no barrel import: don't start services +import { endNonceMonitoring, startNonceMonitoring, trackCreatedAccount } from "./utils/nonceMonitor" + +// Configuration +const DEFAULT_CONFIG = { + concurrentRequests: 500, // Lower default for execute since it's more resource-intensive + batchSize: 50, + delayBetweenBatchesMs: 200, + submitterUrl: "http://localhost:3001", +} + +interface StressTestConfig { + concurrentRequests: number + batchSize: number + delayBetweenBatchesMs: number + submitterUrl: string +} + +interface TestResult { + success: boolean + latencyMs: number + status: string + boopHash?: string + evmTxHash?: string + error?: string +} + +/** + * Run the stress test for execute endpoint + */ +async function runStressTest(config: StressTestConfig = DEFAULT_CONFIG): Promise { + console.log(` +=== EXECUTE ENDPOINT STRESS TEST === +Submitter URL: ${config.submitterUrl} +Concurrent Requests: ${config.concurrentRequests} +Batch Size: ${config.batchSize} +Delay Between Batches: ${config.delayBetweenBatchesMs}ms +================================== +`) + + // Start monitoring executor nonces + await startNonceMonitoring() + + const boopClient = new BoopClient({ + submitterUrl: config.submitterUrl, + }) + + // First create an account to use for all execute requests + console.log("Creating test account...") + const eoa = privateKeyToAccount(generatePrivateKey()) + const createAccountResult = await boopClient.createAccount({ + owner: eoa.address, + salt: "0x0000000000000000000000000000000000000000000000000000000000000001", + }) + + if ( + createAccountResult.status !== CreateAccount.Success && + createAccountResult.status !== "createAccountAlreadyCreated" + ) { + throw new Error("Account creation failed: " + stringify(createAccountResult)) + } + + const account = createAccountResult.address + console.log(`Test account created: ${account}`) + + // Track the created account + trackCreatedAccount(account) + + const results: TestResult[] = [] + const batches = Math.ceil(config.concurrentRequests / config.batchSize) + + console.log(`Executing ${batches} batches of ${config.batchSize} requests...`) + + for (let batchIndex = 0; batchIndex < batches; batchIndex++) { + const batchStart = performance.now() + const batchPromises: Promise[] = [] + + const remainingRequests = Math.min(config.batchSize, config.concurrentRequests - batchIndex * config.batchSize) + + console.log(`Batch ${batchIndex + 1}/${batches}: Sending ${remainingRequests} requests...`) + + for (let i = 0; i < remainingRequests; i++) { + batchPromises.push( + (async () => { + // Create a unique nonce for each request + // Using nonceTrack to distribute requests across different tracks + // This helps avoid nonce conflicts when executing many transactions + const nonceValue = BigInt(i % 50) + const nonceTrack = BigInt(Math.floor(i / 50) + batchIndex * 10) + + try { + // Create and sign a boop for execution + const boop = await createAndSignMintBoop(eoa, { account, nonceTrack, nonceValue }) + + // Calculate boopHash for logging + const chainId = 216n // Using testnet chain ID, adjust if needed + const boopHash = computeBoopHash(chainId, boop) + + const start = performance.now() + const result = await boopClient.execute({ boop }) + + results.push({ + success: result.status === "onchainSuccess", + latencyMs: Math.round(performance.now() - start), + status: result.status, + boopHash: result.receipt?.boopHash || boopHash, + evmTxHash: result.receipt?.evmTxHash, + }) + } catch (error) { + results.push({ + success: false, + latencyMs: 0, // Can't measure latency for failed requests + status: "error", + error: stringify(error), + }) + } + })(), + ) + } + + await Promise.all(batchPromises) + + const batchDuration = performance.now() - batchStart + console.log(`Batch ${batchIndex + 1} completed in ${Math.round(batchDuration)}ms`) + + // Add delay between batches if not the last batch + if (batchIndex < batches - 1 && config.delayBetweenBatchesMs > 0) { + console.log(`Waiting ${config.delayBetweenBatchesMs}ms before next batch...`) + await new Promise((resolve) => setTimeout(resolve, config.delayBetweenBatchesMs)) + } + } + + // Calculate and display statistics + const successCount = results.filter((r) => r.success).length + const failureCount = results.length - successCount + const successRate = (successCount / results.length) * 100 + + const latencies = results.filter((r) => r.latencyMs > 0).map((r) => r.latencyMs) + + if (latencies.length === 0) { + console.log("\n=== RESULTS ===") + console.log("No successful requests to measure latency") + return + } + + const avgLatency = latencies.reduce((sum, val) => sum + val, 0) / latencies.length + const minLatency = Math.min(...latencies) + const maxLatency = Math.max(...latencies) + + // Calculate percentiles + latencies.sort((a, b) => a - b) + const p50 = latencies[Math.floor(latencies.length * 0.5)] + const p90 = latencies[Math.floor(latencies.length * 0.9)] + const p95 = latencies[Math.floor(latencies.length * 0.95)] + const p99 = latencies[Math.floor(latencies.length * 0.99)] + + console.log("\n=== RESULTS ===") + console.log(`Total Requests: ${results.length}`) + console.log(`Success: ${successCount} (${successRate.toFixed(2)}%)`) + console.log(`Failures: ${failureCount}`) + console.log("\n=== LATENCY (ms) ===") + console.log(`Average: ${avgLatency.toFixed(2)}`) + console.log(`Min: ${minLatency}`) + console.log(`Max: ${maxLatency}`) + console.log(`P50: ${p50}`) + console.log(`P90: ${p90}`) + console.log(`P95: ${p95}`) + console.log(`P99: ${p99}`) + + // Display error distribution if there are failures + if (failureCount > 0) { + console.log("\n=== ERROR DISTRIBUTION ===") + const errorGroups = results + .filter((r) => !r.success) + .reduce( + (acc, curr) => { + const key = curr.status + if (!acc[key]) acc[key] = 0 + acc[key]++ + return acc + }, + {} as Record, + ) + + Object.entries(errorGroups).forEach(([status, count]) => { + console.log(`${status}: ${count} (${((count / failureCount) * 100).toFixed(2)}%)`) + }) + + // Show a sample of errors + console.log("\n=== SAMPLE ERRORS ===") + results + .filter((r) => !r.success) + .slice(0, 3) + .forEach((result, i) => { + console.log(`Error ${i + 1}: ${result.error || result.status}`) + }) + } + + // Show successful transaction hashes + if (successCount > 0) { + console.log("\n=== SAMPLE SUCCESSFUL TRANSACTIONS ===") + results + .filter((r) => r.success && r.evmTxHash) + // .slice(0, 5) + .forEach((result, i) => { + console.log(`Transaction ${i + 1}: ${result.evmTxHash}`) + }) + } + + // End nonce monitoring and show report + await endNonceMonitoring() +} + +// Parse command line arguments +function parseArgs(): StressTestConfig { + const args = process.argv.slice(2) + const config = { ...DEFAULT_CONFIG } + + for (let i = 0; i < args.length; i += 2) { + const key = args[i] + const value = args[i + 1] + + switch (key) { + case "--requests": + case "-r": + config.concurrentRequests = Number.parseInt(value, 10) + break + case "--batch-size": + case "-b": + config.batchSize = Number.parseInt(value, 10) + break + case "--delay": + case "-d": + config.delayBetweenBatchesMs = Number.parseInt(value, 10) + break + case "--url": + case "-u": + config.submitterUrl = value + break + } + } + + return config +} + +// Main execution +async function main() { + try { + const config = parseArgs() + await runStressTest(config) + } catch (error) { + console.error("Stress test failed:", error) + process.exit(1) + } + + process.exit(0) +} + +main() diff --git a/apps/submitter/bin/stressTestSimulate.ts b/apps/submitter/bin/stressTestSimulate.ts new file mode 100644 index 0000000000..4bc4da6684 --- /dev/null +++ b/apps/submitter/bin/stressTestSimulate.ts @@ -0,0 +1,246 @@ +/** + * Stress test for the simulate endpoint. + * Sends multiple concurrent requests to simulate boops and measures performance. + */ + +import { BoopClient, CreateAccount } from "@happy.tech/boop-sdk" +import { stringify } from "@happy.tech/common" +import { generatePrivateKey, privateKeyToAccount } from "viem/accounts" +import { createAndSignMintBoop } from "#lib/utils/test/helpers" // no barrel import: don't start services +import { endNonceMonitoring, startNonceMonitoring, trackCreatedAccount } from "./utils/nonceMonitor" + +// Configuration +const DEFAULT_CONFIG = { + concurrentRequests: 1000, + batchSize: 100, + delayBetweenBatchesMs: 100, + submitterUrl: "http://localhost:3001", +} + +interface StressTestConfig { + concurrentRequests: number + batchSize: number + delayBetweenBatchesMs: number + submitterUrl: string +} + +interface TestResult { + success: boolean + latencyMs: number + status: string + error?: string +} + +/** + * Run the stress test for simulate endpoint + */ +async function runStressTest(config: StressTestConfig = DEFAULT_CONFIG): Promise { + console.log(` +=== SIMULATE ENDPOINT STRESS TEST === +Submitter URL: ${config.submitterUrl} +Concurrent Requests: ${config.concurrentRequests} +Batch Size: ${config.batchSize} +Delay Between Batches: ${config.delayBetweenBatchesMs}ms +=================================== +`) + + // Start monitoring executor nonces + await startNonceMonitoring() + + const boopClient = new BoopClient({ + submitterUrl: config.submitterUrl, + }) + + // First create an account to use for all simulation requests + console.log("Creating test account...") + const eoa = privateKeyToAccount(generatePrivateKey()) + const createAccountResult = await boopClient.createAccount({ + owner: eoa.address, + salt: "0x0000000000000000000000000000000000000000000000000000000000000001", + }) + + if ( + createAccountResult.status !== CreateAccount.Success && + createAccountResult.status !== "createAccountAlreadyCreated" + ) { + throw new Error("Account creation failed: " + stringify(createAccountResult)) + } + + const account = createAccountResult.address + console.log(`Test account created: ${account}`) + + // Track the created account + trackCreatedAccount(account) + + const results: TestResult[] = [] + const batches = Math.ceil(config.concurrentRequests / config.batchSize) + + console.log(`Executing ${batches} batches of ${config.batchSize} requests...`) + + for (let batchIndex = 0; batchIndex < batches; batchIndex++) { + const batchStart = performance.now() + const batchPromises: Promise[] = [] + + const remainingRequests = Math.min(config.batchSize, config.concurrentRequests - batchIndex * config.batchSize) + + console.log(`Batch ${batchIndex + 1}/${batches}: Sending ${remainingRequests} requests...`) + + for (let i = 0; i < remainingRequests; i++) { + batchPromises.push( + (async () => { + // Create a unique nonce for each request + const nonceValue = BigInt(batchIndex * config.batchSize + i) + const nonceTrack = 0n + + try { + // Create and sign a boop for simulation + const boop = await createAndSignMintBoop(eoa, { account, nonceTrack, nonceValue }) + + const start = performance.now() + const result = await boopClient.simulate({ boop }) + + results.push({ + success: result.status === "onchainSuccess", + latencyMs: Math.round(performance.now() - start), + status: result.status, + }) + } catch (error) { + results.push({ + success: false, + latencyMs: 0, // Can't measure latency for failed requests + status: "error", + error: stringify(error), + }) + } + })(), + ) + } + + await Promise.all(batchPromises) + + const batchDuration = performance.now() - batchStart + console.log(`Batch ${batchIndex + 1} completed in ${Math.round(batchDuration)}ms`) + + // Add delay between batches if not the last batch + if (batchIndex < batches - 1 && config.delayBetweenBatchesMs > 0) { + console.log(`Waiting ${config.delayBetweenBatchesMs}ms before next batch...`) + await new Promise((resolve) => setTimeout(resolve, config.delayBetweenBatchesMs)) + } + } + + // Calculate and display statistics + const successCount = results.filter((r) => r.success).length + const failureCount = results.length - successCount + const successRate = (successCount / results.length) * 100 + + const latencies = results.filter((r) => r.latencyMs > 0).map((r) => r.latencyMs) + + if (latencies.length === 0) { + console.log("\n=== RESULTS ===") + console.log("No successful requests to measure latency") + return + } + + const avgLatency = latencies.reduce((sum, val) => sum + val, 0) / latencies.length + const minLatency = Math.min(...latencies) + const maxLatency = Math.max(...latencies) + + // Calculate percentiles + latencies.sort((a, b) => a - b) + const p50 = latencies[Math.floor(latencies.length * 0.5)] + const p90 = latencies[Math.floor(latencies.length * 0.9)] + const p95 = latencies[Math.floor(latencies.length * 0.95)] + const p99 = latencies[Math.floor(latencies.length * 0.99)] + + console.log("\n=== RESULTS ===") + console.log(`Total Requests: ${results.length}`) + console.log(`Success: ${successCount} (${successRate.toFixed(2)}%)`) + console.log(`Failures: ${failureCount}`) + console.log("\n=== LATENCY (ms) ===") + console.log(`Average: ${avgLatency.toFixed(2)}`) + console.log(`Min: ${minLatency}`) + console.log(`Max: ${maxLatency}`) + console.log(`P50: ${p50}`) + console.log(`P90: ${p90}`) + console.log(`P95: ${p95}`) + console.log(`P99: ${p99}`) + + // Display error distribution if there are failures + if (failureCount > 0) { + console.log("\n=== ERROR DISTRIBUTION ===") + const errorGroups = results + .filter((r) => !r.success) + .reduce( + (acc, curr) => { + const key = curr.status + if (!acc[key]) acc[key] = 0 + acc[key]++ + return acc + }, + {} as Record, + ) + + Object.entries(errorGroups).forEach(([status, count]) => { + console.log(`${status}: ${count} (${((count / failureCount) * 100).toFixed(2)}%)`) + }) + + // Show a sample of errors + console.log("\n=== SAMPLE ERRORS ===") + results + .filter((r) => !r.success) + .slice(0, 3) + .forEach((result, i) => { + console.log(`Error ${i + 1}: ${result.error || result.status}`) + }) + } + + // End nonce monitoring and show report + await endNonceMonitoring() +} + +// Parse command line arguments +function parseArgs(): StressTestConfig { + const args = process.argv.slice(2) + const config = { ...DEFAULT_CONFIG } + + for (let i = 0; i < args.length; i += 2) { + const key = args[i] + const value = args[i + 1] + + switch (key) { + case "--requests": + case "-r": + config.concurrentRequests = Number.parseInt(value, 10) + break + case "--batch-size": + case "-b": + config.batchSize = Number.parseInt(value, 10) + break + case "--delay": + case "-d": + config.delayBetweenBatchesMs = Number.parseInt(value, 10) + break + case "--url": + case "-u": + config.submitterUrl = value + break + } + } + + return config +} + +// Main execution +async function main() { + try { + const config = parseArgs() + await runStressTest(config) + } catch (error) { + console.error("Stress test failed:", error) + process.exit(1) + } + + process.exit(0) +} + +main() diff --git a/apps/submitter/bin/stressTestSubmit.ts b/apps/submitter/bin/stressTestSubmit.ts new file mode 100644 index 0000000000..a5c5811396 --- /dev/null +++ b/apps/submitter/bin/stressTestSubmit.ts @@ -0,0 +1,576 @@ +/** + * Stress test for the submit endpoint. + * Sends multiple concurrent requests to submit boops and measures performance. + */ + +import { type Boop, BoopClient, CreateAccount, computeBoopHash } from "@happy.tech/boop-sdk" +import { type Hash, stringify } from "@happy.tech/common" +import { generatePrivateKey, privateKeyToAccount } from "viem/accounts" +import { createAndSignMintBoop } from "#lib/utils/test/helpers" // no barrel import: don't start services +import { endNonceMonitoring, startNonceMonitoring, trackCreatedAccount } from "./utils/nonceMonitor" + +// Configuration +const DEFAULT_CONFIG = { + concurrentRequests: 100, + batchSize: 10, + delayBetweenBatchesMs: 100, + submitterUrl: "http://localhost:3001", +} + +interface StressTestConfig { + concurrentRequests: number + batchSize: number + delayBetweenBatchesMs: number + submitterUrl: string +} + +// Define types for test results and responses +interface BoopReceipt { + boopHash: Hash + evmTxHash: Hash + status: string + blockHash: Hash + blockNumber: bigint + gasPrice: bigint + boop: Boop +} + +interface GetStateOutput { + status: string + receipt?: BoopReceipt + simulation?: { + status: string + } +} + +interface TestResult { + success: boolean + latencyMs: number + status: string + boopHash?: Hash + error?: string + receipt?: BoopReceipt + state?: GetStateOutput +} + +/** + * Run the stress test for submit endpoint + */ +async function runStressTest( + config: StressTestConfig = DEFAULT_CONFIG, +): Promise<{ results: TestResult[]; boopClient: BoopClient }> { + console.log(` +=== SUBMIT ENDPOINT STRESS TEST === +Submitter URL: ${config.submitterUrl} +Concurrent Requests: ${config.concurrentRequests} +Batch Size: ${config.batchSize} +Delay Between Batches: ${config.delayBetweenBatchesMs}ms +================================= +`) + + // Create a BoopClient instance + const boopClient = new BoopClient({ submitterUrl: config.submitterUrl }) + + // Initialize nonce monitoring + console.log("=== Starting Nonce Monitoring ===") + await startNonceMonitoring() + + // Create a test account for this run + console.log("Creating test account...") + const eoa = privateKeyToAccount(generatePrivateKey()) + const createAccountResult = await boopClient.createAccount({ + owner: eoa.address, + salt: "0x0000000000000000000000000000000000000000000000000000000000000001", + }) + + if ( + createAccountResult.status !== CreateAccount.Success && + createAccountResult.status !== "createAccountAlreadyCreated" + ) { + throw new Error("Account creation failed: " + stringify(createAccountResult)) + } + + const account = createAccountResult.address + console.log(`Test account created: ${account}`) + + // Track the created account + trackCreatedAccount(account) + + // Calculate number of batches + const batches = Math.ceil(config.concurrentRequests / config.batchSize) + console.log(`Executing ${batches} batches of ${config.batchSize} requests...`) + + // Store results for reporting + const results: TestResult[] = [] + const latencies: number[] = [] + + // Initialize counters for this test run + + // Execute batches + for (let batchIndex = 0; batchIndex < batches; batchIndex++) { + const batchStart = performance.now() + const batchPromises: Promise[] = [] + + const remainingRequests = Math.min(config.batchSize, config.concurrentRequests - batchIndex * config.batchSize) + + console.log(`Batch ${batchIndex + 1}/${batches}: Sending ${remainingRequests} requests...`) + + for (let i = 0; i < remainingRequests; i++) { + batchPromises.push( + (async () => { + // Using nonceTrack to distribute requests across different tracks + // This helps avoid nonce conflicts when submitting many transactions + const nonceValue = BigInt(i % 50) + const nonceTrack = BigInt(Math.floor(i / 50) + batchIndex * 10) + + try { + // Create and sign a boop for submission + const boop = await createAndSignMintBoop(eoa, { account, nonceTrack, nonceValue }) + const boopHash = computeBoopHash(216n, boop) + + const start = performance.now() + const result = await boopClient.submit({ boop }) + + // Store the initial result + const testResult = { + success: true, + latencyMs: Math.round(performance.now() - start), + status: result.status, + boopHash: result.boopHash || boopHash, + } + + // Add to results immediately so we don't lose track of it + results.push(testResult) + + // Now poll for boop state and receipt asynchronously + // This won't block the main stress test flow + if (testResult.boopHash) { + void pollBoopStateAndReceipt(boopClient, testResult.boopHash, testResult) + } + } catch (error) { + results.push({ + success: false, + latencyMs: 0, // Can't measure latency for failed requests + status: "error", + error: stringify(error), + }) + } + })(), + ) + } + + // Wait for all requests in this batch to complete + await Promise.all(batchPromises) + + const batchEnd = performance.now() + const batchDuration = Math.round(batchEnd - batchStart) + console.log(`Batch ${batchIndex + 1} completed in ${batchDuration}ms`) + + // Store latencies for reporting + results.forEach((result) => { + if (result.success && result.latencyMs > 0) { + latencies.push(result.latencyMs) + } + }) + + // Wait between batches if not the last batch + if (batchIndex < batches - 1) { + console.log(`Waiting ${config.delayBetweenBatchesMs}ms before next batch...`) + await new Promise((resolve) => setTimeout(resolve, config.delayBetweenBatchesMs)) + } + } + + // Calculate success rate + const totalRequests = results.length + const successfulRequests = results.filter((r) => r.success).length + const successRate = (successfulRequests / totalRequests) * 100 + + // Calculate latency statistics + latencies.sort((a, b) => a - b) + const avgLatency = latencies.reduce((sum, val) => sum + val, 0) / latencies.length + const minLatency = latencies[0] || 0 + const maxLatency = latencies[latencies.length - 1] || 0 + const p50 = latencies[Math.floor(latencies.length * 0.5)] || 0 + const p90 = latencies[Math.floor(latencies.length * 0.9)] || 0 + const p95 = latencies[Math.floor(latencies.length * 0.95)] || 0 + const p99 = latencies[Math.floor(latencies.length * 0.99)] || 0 + + // Get successful transaction hashes for verification + const successfulHashes = results + .filter((r) => r.success && r.boopHash) + .map((r) => r.boopHash) + .filter(Boolean) as string[] + + // Print results + console.log("\n=== RESULTS ===") + console.log(`Total Requests: ${totalRequests}`) + console.log(`Success: ${successfulRequests} (${successRate.toFixed(2)}%)`) + + console.log("\n=== TRANSACTION HASHES (for manual verification) ===") + console.log(`Successful transaction hashes (${successfulHashes.length}):`) + console.log(JSON.stringify(successfulHashes, null, 2)) + console.log(`Receipt status summary: ${JSON.stringify(receiptStatusCounts, null, 2)}`) + + console.log("\nFailures: " + (totalRequests - successfulRequests)) + + console.log("\n=== LATENCY (ms) ===") + console.log(`Average: ${avgLatency.toFixed(2)}`) + console.log(`Min: ${minLatency}`) + console.log(`Max: ${maxLatency}`) + console.log(`P50: ${p50}`) + console.log(`P90: ${p90}`) + console.log(`P95: ${p95}`) + console.log(`P99: ${p99}`) + + // End nonce monitoring + console.log("\n=== Ending Nonce Monitoring ===") + await endNonceMonitoring() + + // Print accounts created during test + console.log("\n=== ACCOUNTS CREATED DURING TEST (1) ===") + console.log(account) + + // Return the results and boopClient for final polling + return { results, boopClient } +} + +// Parse command line arguments +function parseArgs(): StressTestConfig { + const args = process.argv.slice(2) + const config = { ...DEFAULT_CONFIG } + + for (let i = 0; i < args.length; i += 2) { + const key = args[i] + const value = args[i + 1] + + switch (key) { + case "--requests": + case "-r": + config.concurrentRequests = Number.parseInt(value, 10) + break + case "--batch-size": + case "-b": + config.batchSize = Number.parseInt(value, 10) + break + case "--delay": + case "-d": + config.delayBetweenBatchesMs = Number.parseInt(value, 10) + break + case "--url": + case "-u": + config.submitterUrl = value + break + } + } + + return config +} + +// Track receipt statuses for summary reporting +const receiptStatusCounts: Record = {} + +// Track all transaction hashes for comprehensive polling +const allTransactionHashes: Hash[] = [] + +// Track unique transaction hashes to avoid duplicates +const uniqueTransactionHashes = new Set() + +// Track polling attempts and timing for each transaction +const pollingStats: Record< + string, + { + attempts: number + firstPollTime?: number + lastPollTime?: number + receiptFoundTime?: number + totalPollingTimeMs?: number + success: boolean + } +> = {} + +/** + * Polls the boop state and receipt endpoints for a given boop hash + * Updates the test result object with the retrieved data + */ +async function pollBoopStateAndReceipt(boopClient: BoopClient, hash: Hash, testResult: TestResult) { + // Add hash to global tracking array for final polling (with deduplication) + const hashStr = hash.toString() + if (!uniqueTransactionHashes.has(hashStr)) { + allTransactionHashes.push(hash) + uniqueTransactionHashes.add(hashStr) + console.log( + `[Tracking] Added new transaction ${hash.substring(0, 8)}... to tracking (total: ${allTransactionHashes.length})`, + ) + } + + // Initialize polling stats for this hash if not already tracking + if (!pollingStats[hash]) { + pollingStats[hash] = { + attempts: 0, + firstPollTime: Date.now(), + success: false, + } + } + + const maxTries = 20 + const delayMs = 1000 + + for (let i = 0; i < maxTries; i++) { + // Update polling stats + pollingStats[hash].attempts++ + pollingStats[hash].lastPollTime = Date.now() + + try { + // Poll for boop state + const stateResponse = await boopClient.getState({ boopHash: hash }) + if (stateResponse) { + testResult.state = stateResponse + } + + // Poll for receipt + const receiptResponse = await boopClient.waitForReceipt({ boopHash: hash, timeout: 2000 }) + if (receiptResponse?.receipt) { + testResult.receipt = receiptResponse.receipt + + // Count receipt statuses for summary (with deduplication) + if (receiptResponse.receipt.status && !pollingStats[hash].success) { + const status = receiptResponse.receipt.status + receiptStatusCounts[status] = (receiptStatusCounts[status] || 0) + 1 + + // Mark as successful and record timing + pollingStats[hash].success = true + pollingStats[hash].receiptFoundTime = Date.now() + pollingStats[hash].totalPollingTimeMs = + pollingStats[hash].receiptFoundTime - (pollingStats[hash].firstPollTime || 0) + + console.log( + `[Poll] Receipt found for ${hash.substring(0, 8)}... after ${pollingStats[hash].attempts} attempts (${pollingStats[hash].totalPollingTimeMs}ms) at ${new Date().toISOString()}`, + ) + } + } + + // If we have a receipt with a status, we can stop polling + if (receiptResponse.receipt?.status) { + return + } + } catch (_error) { + // Continue polling even if there's an error + // This could be because the boop is not yet processed + await new Promise((resolve) => setTimeout(resolve, delayMs)) + continue + } + + // Wait before the next polling attempt + await new Promise((resolve) => setTimeout(resolve, delayMs)) + } + + // Log if we couldn't get a receipt after max tries + if (!pollingStats[hash].success) { + console.log(`[Poll] No receipt found for ${hash.substring(0, 8)}... after ${maxTries} attempts`) + } +} + +/** + * Performs a final polling pass for all transactions to ensure we get as many receipts as possible + * This is run after all submissions are complete + */ +async function finalPollingPass(boopClient: BoopClient, results: TestResult[]): Promise { + console.log("\n=== Performing final receipt polling pass... ===") + console.log(`Polling ${allTransactionHashes.length} unique transactions with longer timeout...`) + + // Use a longer timeout and more attempts for the final pass + const maxTries = 30 + const delayMs = 1000 + + // Create a map of hash to test result for easy lookup + const resultsByHash = new Map() + for (const result of results) { + if (result.boopHash) { + resultsByHash.set(result.boopHash, result) + } + } + + // Track transactions that still need polling + const transactionsNeedingPolling = allTransactionHashes.filter((hash) => { + // Skip if we already have a successful receipt for this hash + return !pollingStats[hash]?.success + }) + + console.log(`Found ${transactionsNeedingPolling.length} transactions that still need polling`) + + // Poll each transaction again with longer timeout + const pollingPromises = transactionsNeedingPolling.map(async (hash) => { + const testResult = resultsByHash.get(hash) + if (!testResult) return + + // Initialize polling stats for this hash if not already tracking + if (!pollingStats[hash]) { + pollingStats[hash] = { + attempts: 0, + firstPollTime: Date.now(), + success: false, + } + } + + let finalAttempt = 0 + + for (let i = 0; i < maxTries; i++) { + finalAttempt = i + 1 + pollingStats[hash].attempts++ + pollingStats[hash].lastPollTime = Date.now() + + try { + // Poll for receipt with longer timeout + const receiptResponse = await boopClient.waitForReceipt({ boopHash: hash, timeout: 3000 }) + if (receiptResponse?.receipt) { + testResult.receipt = receiptResponse.receipt + + // Count receipt statuses for summary (with deduplication) + if (receiptResponse.receipt.status && !pollingStats[hash].success) { + const status = receiptResponse.receipt.status + receiptStatusCounts[status] = (receiptStatusCounts[status] || 0) + 1 + + // Mark as successful and record timing + pollingStats[hash].success = true + pollingStats[hash].receiptFoundTime = Date.now() + pollingStats[hash].totalPollingTimeMs = + pollingStats[hash].receiptFoundTime - (pollingStats[hash].firstPollTime || 0) + + console.log( + `[Final Poll] Receipt found for ${hash.substring(0, 8)}... after ${finalAttempt} final attempts (${pollingStats[hash].totalPollingTimeMs}ms total) at ${new Date().toISOString()}`, + ) + return // Stop polling this transaction once we have a status + } + } + } catch (_error) { + // Continue polling even if there's an error + } + + // Wait before the next polling attempt + await new Promise((resolve) => setTimeout(resolve, delayMs)) + } + + // Log if we still couldn't get a receipt after max tries + if (!pollingStats[hash].success) { + console.log( + `[Final Poll] No receipt found for ${hash.substring(0, 8)}... after ${finalAttempt} final attempts`, + ) + } + }) + + // Wait for all polling to complete + await Promise.all(pollingPromises) + console.log("Final polling pass complete.") +} + +/** + * Calculate statistics for an array of numbers + */ +function calculateStats(values: number[]): { + min: number + max: number + avg: number + median: number + mode: number + p90: number + p95: number +} { + if (values.length === 0) { + return { min: 0, max: 0, avg: 0, median: 0, mode: 0, p90: 0, p95: 0 } + } + + // Sort the values for percentile calculations + const sorted = [...values].sort((a, b) => a - b) + + // Calculate min, max, avg + const min = sorted[0] + const max = sorted[sorted.length - 1] + const avg = sorted.reduce((sum, val) => sum + val, 0) / sorted.length + + // Calculate median + const midIndex = Math.floor(sorted.length / 2) + const median = sorted.length % 2 === 0 ? (sorted[midIndex - 1] + sorted[midIndex]) / 2 : sorted[midIndex] + + // Calculate mode + const counts: Record = {} + let mode = sorted[0] + let maxCount = 0 + + for (const value of sorted) { + counts[value] = (counts[value] || 0) + 1 + if (counts[value] > maxCount) { + maxCount = counts[value] + mode = value + } + } + + // Calculate percentiles + const p90 = sorted[Math.floor(sorted.length * 0.9)] + const p95 = sorted[Math.floor(sorted.length * 0.95)] + + return { min, max, avg, median, mode, p90, p95 } +} + +async function main() { + try { + const config = parseArgs() + // Run the stress test and get the results and boopClient + const { results, boopClient } = await runStressTest(config) + + // Perform a final polling pass to ensure we get as many receipts as possible + console.log("\nStarting final polling pass to collect all receipts...") + await finalPollingPass(boopClient, results) + + // Print the final receipt status counts after all polling is complete + console.log("\n=== FINAL RECEIPT STATUS SUMMARY ===") + console.log(JSON.stringify(receiptStatusCounts, null, 2)) + + // Calculate and display polling statistics + const successfulPolls = Object.entries(pollingStats).filter(([_, stats]) => stats.success) + + if (successfulPolls.length > 0) { + const attemptCounts = successfulPolls.map(([_, stats]) => stats.attempts) + const pollingTimes = successfulPolls + .map(([_, stats]) => stats.totalPollingTimeMs || 0) + .filter((time) => time > 0) + + const attemptStats = calculateStats(attemptCounts) + const timeStats = calculateStats(pollingTimes) + + console.log("\n=== POLLING STATISTICS ===") + console.log(`Total transactions tracked: ${Object.keys(pollingStats).length}`) + console.log(`Successful receipts found: ${successfulPolls.length}`) + console.log( + `Success rate: ${((successfulPolls.length / Object.keys(pollingStats).length) * 100).toFixed(2)}%`, + ) + + console.log("\n=== POLLING ATTEMPTS ===") + console.log(`Min attempts: ${attemptStats.min}`) + console.log(`Max attempts: ${attemptStats.max}`) + console.log(`Average attempts: ${attemptStats.avg.toFixed(2)}`) + console.log(`Median attempts: ${attemptStats.median}`) + console.log(`Mode attempts: ${attemptStats.mode}`) + console.log(`90th percentile: ${attemptStats.p90}`) + console.log(`95th percentile: ${attemptStats.p95}`) + + console.log("\n=== POLLING TIME (ms) ===") + console.log(`Min time: ${timeStats.min}`) + console.log(`Max time: ${timeStats.max}`) + console.log(`Average time: ${timeStats.avg.toFixed(2)}`) + console.log(`Median time: ${timeStats.median}`) + console.log(`Mode time: ${timeStats.mode}`) + console.log(`90th percentile: ${timeStats.p90}`) + console.log(`95th percentile: ${timeStats.p95}`) + } else { + console.log("\n=== POLLING STATISTICS ===") + console.log("No successful polls to analyze") + } + } catch (error) { + console.error("Stress test failed:", error) + process.exit(1) + } + + process.exit(0) +} + +main() diff --git a/apps/submitter/bin/testResyncService.ts b/apps/submitter/bin/testResyncService.ts index 207701b399..17b45a65db 100755 --- a/apps/submitter/bin/testResyncService.ts +++ b/apps/submitter/bin/testResyncService.ts @@ -1,4 +1,5 @@ #!/usr/bin/env bun +// TODO — this script is a WIP! import { type Hex, sleep } from "@happy.tech/common" import { abis, deployment } from "@happy.tech/contracts/mocks/sepolia" import { @@ -8,15 +9,13 @@ import { createPublicClient, createWalletClient, encodeFunctionData, - formatEther, + // formatEther, parseGwei, } from "viem" import { privateKeyToAccount } from "viem/accounts" import { happychainTestnet } from "viem/chains" import { resyncAccount } from "#lib/services/resync" -// TODO — this script is a WIP! - /** * This script tests the {@link resyncAccount} function by: * 1. Connecting to the testnet.happy.tech RPC endpoint @@ -28,33 +27,49 @@ import { resyncAccount } from "#lib/services/resync" const BLOCK_GAS_LIMIT = 30_000_000 const NUM_STUCK_TXS = 10 -const TX_GAS_AMOUNT = 1_200_000n -const NUM_BLOCKS_TO_FILL = 20 +const TX_GAS_AMOUNT = 1_500_000n +const NUM_BLOCKS_TO_FILL = 10 const MONITOR_TIMEOUT = 60_000 +const NUM_FILLER_ACCOUNTS = 10 +// const MIN_ACCOUNT_BALANCE = parseGwei("10000") -const TXS_PER_BLOCK = Math.floor(BLOCK_GAS_LIMIT / Number(TX_GAS_AMOUNT)) -const NUM_BLOCK_FILLING_TXS = TXS_PER_BLOCK * NUM_BLOCKS_TO_FILL +const TXS_PER_BLOCK = Math.floor(BLOCK_GAS_LIMIT / Number(TX_GAS_AMOUNT)) // 5 +const TXNS_PER_FILLER = Math.floor((TXS_PER_BLOCK * NUM_BLOCKS_TO_FILL) / NUM_FILLER_ACCOUNTS) // Number of transactions per filler account + +type TxInfo = { + txHash: Hex + address: `0x${string}` + nonce: number +} const RPC_HTTP_URL = process.env.RPC_HTTP_URLS?.split(",")[0] ?? "https://rpc.testnet.happy.tech/http" -const EXECUTOR_KEY = process.env.EXECUTOR_KEYS?.split(",")[0] -const BLOCK_FILLER_KEY = process.env.EXECUTOR_KEYS?.split(",")[1] -if (!EXECUTOR_KEY || !BLOCK_FILLER_KEY) { - throw new Error("Executor keys not found in environment variables") +// Get executor key from PRIVATE_KEY_ACCOUNT_DEPLOYER +const EXECUTOR_KEY = process.env.PRIVATE_KEY_ACCOUNT_DEPLOYER +if (!EXECUTOR_KEY) { + throw new Error("Executor key not found in environment variables (PRIVATE_KEY_ACCOUNT_DEPLOYER)") +} + +// Get block filler keys from EXECUTOR_KEYS (first 10 keys) +const BLOCK_FILLER_KEYS = process.env.EXECUTOR_KEYS?.split(",").slice(0, NUM_FILLER_ACCOUNTS) +if (!BLOCK_FILLER_KEYS || BLOCK_FILLER_KEYS.length < NUM_FILLER_ACCOUNTS) { + throw new Error(`Need at least ${NUM_FILLER_ACCOUNTS} block filler keys in EXECUTOR_KEYS`) } const executorAccount = privateKeyToAccount(EXECUTOR_KEY as Hex) -const blockFillerAccount = privateKeyToAccount(BLOCK_FILLER_KEY as Hex) +const blockFillerAccounts = BLOCK_FILLER_KEYS.map((key) => privateKeyToAccount(key as Hex)) const executorWalletClient = createWalletClient({ account: executorAccount, transport: http(RPC_HTTP_URL), }) -const blockFillerWalletClient = createWalletClient({ - account: blockFillerAccount, - transport: http(RPC_HTTP_URL), -}) +const blockFillerWalletClients = blockFillerAccounts.map((account) => + createWalletClient({ + account, + transport: http(RPC_HTTP_URL), + }), +) const publicClient = createPublicClient({ transport: http(RPC_HTTP_URL), @@ -76,8 +91,8 @@ async function prepareBurnGasTx({ maxFeePerGas: bigint maxPriorityFeePerGas: bigint nonce: number -}): Promise { - return walletClient.signTransaction( +}): Promise { + const txHash = await walletClient.signTransaction( await walletClient.prepareTransactionRequest({ account, chain: happychainTestnet, @@ -93,89 +108,158 @@ async function prepareBurnGasTx({ maxPriorityFeePerGas, }), ) + + return { txHash, address: account.address, nonce } } async function run(): Promise { try { console.log("\n\x1b[1m\x1b[34m═════════ RESYNC SERVICE TEST ═════════\x1b[0m") - const [executorBalance, fillerBalance] = await Promise.all([ - publicClient.getBalance({ address: executorAccount.address }), - publicClient.getBalance({ address: blockFillerAccount.address }), - ]) - - console.log(`\nExecutor account: ${executorAccount.address}, balance: ${formatEther(executorBalance)} ETH`) - console.log(`Block filler account: ${blockFillerAccount.address}, balance: ${formatEther(fillerBalance)} ETH`) - - if (executorBalance < parseGwei("5000") || fillerBalance < parseGwei("5000")) { - console.error("\n\x1b[41m\x1b[37m INSUFFICIENT FUNDS \x1b[0m Please fund the accounts.") - return - } + //! Commented out printing of balance, as all 10 anvil keys have ample ETH + // const executorBalance = await publicClient.getBalance({ address: executorAccount.address }) + // const fillerBalances = await Promise.all( + // blockFillerAccounts.map((account) => publicClient.getBalance({ address: account.address })), + // ) + + // console.log(`\nExecutor account: ${executorAccount.address}, balance: ${formatEther(executorBalance)} ETH`) + // console.log( + // `Block filler accounts: ${blockFillerAccounts.map((account) => account.address).join(", ")}, \nbalances: ${fillerBalances + // .map((balance) => formatEther(balance)) + // .join(", ")}`, + // ) + + // // Check if any account has insufficient balance + // if (executorBalance < MIN_ACCOUNT_BALANCE) { + // console.error("\n\x1b[41m\x1b[37m INSUFFICIENT FUNDS \x1b[0m Executor account needs funding.") + // return + // } + + // const insufficientFillerAccounts = fillerBalances.filter((balance) => balance < MIN_ACCOUNT_BALANCE) + // if (insufficientFillerAccounts.length > 0) { + // console.error(`\n\x1b[41m\x1b[37m INSUFFICIENT FUNDS \x1b[0m ${insufficientFillerAccounts.length} filler accounts need funding.`) + // return + // } const block = await publicClient.getBlock() const baseFee = block.baseFeePerGas || parseGwei("1") - const [executorLatestNonce, executorPendingNonce, fillerLatestNonce, fillerPendingNonce] = await Promise.all([ - publicClient.getTransactionCount({ address: executorAccount.address }), - publicClient.getTransactionCount({ address: executorAccount.address, blockTag: "pending" }), - publicClient.getTransactionCount({ address: blockFillerAccount.address }), - publicClient.getTransactionCount({ address: blockFillerAccount.address, blockTag: "pending" }), - ]) + const executorLatestNonce = await publicClient.getTransactionCount({ + address: executorAccount.address, + blockTag: "latest", + }) + const executorPendingNonce = await publicClient.getTransactionCount({ + address: executorAccount.address, + blockTag: "pending", + }) + + const fillerLatestNonces = await Promise.all( + blockFillerAccounts.map((account) => publicClient.getTransactionCount({ address: account.address })), + ) console.log("\nCurrent network state:") - console.log(`- Current block: ${block.number}, base fee: ${baseFee}`) + console.log(`- Current block: ${block.number}, base fee: ${Number(baseFee)}`) console.log(`- Executor nonces: latest=${executorLatestNonce}, pending=${executorPendingNonce}`) - console.log(`- Block filler nonces: latest=${fillerLatestNonce}, pending=${fillerPendingNonce}`) + console.log(`- Block filler nonces: latest=${fillerLatestNonces.join(", ")}`) const stuckPriorityFee = 0n - const fillerPriorityFee = baseFee - - console.log(`\nPreparing ${NUM_BLOCK_FILLING_TXS} block-filling and ${NUM_STUCK_TXS} stuck transactions...`) + const fillerPriorityFee = baseFee / 2n - const blockFillerRawTxs = await Promise.all( - Array.from({ length: NUM_BLOCK_FILLING_TXS }, (_, i) => - prepareBurnGasTx({ - account: blockFillerAccount, - walletClient: blockFillerWalletClient, - gasToBurn: TX_GAS_AMOUNT, - gas: TX_GAS_AMOUNT + 42000n, - maxFeePerGas: baseFee * 3n, - maxPriorityFeePerGas: fillerPriorityFee, - nonce: fillerLatestNonce + i, - }), - ), + console.log( + `\nPreparing ${NUM_FILLER_ACCOUNTS * TXNS_PER_FILLER} block-filling and ${NUM_STUCK_TXS} stuck transactions...`, ) - const stuckRawTxs = await Promise.all( - Array.from({ length: NUM_STUCK_TXS }, (_, i) => - prepareBurnGasTx({ - account: executorAccount, - walletClient: executorWalletClient, + // Create block filler transactions organized by nonce offset + // blockFillerTxs[0] = all txs with currentNonce, blockFillerTxs[1] = all txs with currentNonce+1, etc. + const blockFillerTxs: TxInfo[][] = Array.from({ length: TXNS_PER_FILLER }, () => []) + + for (let txIndex = 0; txIndex < TXNS_PER_FILLER; txIndex++) { + console.log(`\nCreating transaction batch ${txIndex} (nonce offset +${txIndex}) for all accounts`) + + for (let accountIndex = 0; accountIndex < blockFillerAccounts.length; accountIndex++) { + const account = blockFillerAccounts[accountIndex] + const walletClient = blockFillerWalletClients[accountIndex] + const currentNonce = fillerLatestNonces[accountIndex] + txIndex + + // Increase gas price slightly for each batch to avoid replacement transaction underpriced errors + // when transactions are reordered during sending + const batchMultiplier = BigInt(txIndex + 1) + const batchMaxFeePerGas = baseFee + (baseFee / 2n) * batchMultiplier + + // Make sure priority fee is always less than max fee + const batchPriorityFee = fillerPriorityFee * batchMultiplier + const adjustedPriorityFee = + batchPriorityFee < batchMaxFeePerGas ? batchPriorityFee : batchMaxFeePerGas - 1n + + const tx = await prepareBurnGasTx({ + account, + walletClient, gasToBurn: TX_GAS_AMOUNT, gas: TX_GAS_AMOUNT + 42000n, - maxFeePerGas: baseFee + baseFee / 2n, - maxPriorityFeePerGas: stuckPriorityFee, - nonce: executorLatestNonce + i, - }), - ), + maxFeePerGas: batchMaxFeePerGas, + maxPriorityFeePerGas: adjustedPriorityFee, + nonce: currentNonce, + }) + blockFillerTxs[txIndex].push(tx) + } + } + + // Create stuck transactions synchronously + const stuckTxs: TxInfo[] = [] + console.log( + `Creating ${NUM_STUCK_TXS} stuck transactions for executor account ${executorAccount.address} starting at nonce ${executorLatestNonce}`, ) - const allRawTxs: Hex[] = [] - const firstBatchCount = Math.floor(NUM_BLOCK_FILLING_TXS * 0.3) + for (let i = 0; i < NUM_STUCK_TXS; i++) { + const tx = await prepareBurnGasTx({ + account: executorAccount, + walletClient: executorWalletClient, + gasToBurn: TX_GAS_AMOUNT, + gas: TX_GAS_AMOUNT + 42000n, + maxFeePerGas: baseFee + baseFee / 5n, // Lower fee to ensure they stay in mempool + maxPriorityFeePerGas: stuckPriorityFee, + nonce: executorLatestNonce + i, + }) + stuckTxs.push(tx) + console.log(` Created stuck tx for ${tx.address} with nonce ${tx.nonce}`) + } - // Add first batch of block fillers - allRawTxs.push(...blockFillerRawTxs.slice(0, firstBatchCount)) - // Add stuck transactions - allRawTxs.push(...stuckRawTxs) - // Add remaining block fillers - allRawTxs.push(...blockFillerRawTxs.slice(firstBatchCount)) + // Order transactions: + // 1. First 2 batches of block filler txs (nonce 0 and 1 for all accounts) + // 2. All stuck transactions + // 3. Remaining 3 batches of block filler txs (nonces 2, 3, 4 for all accounts) + const orderedTxs = [ + // ...blockFillerTxs[0], // First batch - all accounts with nonce+0 + // ...blockFillerTxs[1], // Second batch - all accounts with nonce+1 + // ...blockFillerTxs[2], // Third batch - all accounts with nonce+2 + // ...blockFillerTxs[3], // Fourth batch - all accounts with nonce+3 + // ...blockFillerTxs[4], // Fifth batch - all accounts with nonce+4 + ...blockFillerTxs.flat(), + ...stuckTxs, // All stuck transactions + ] + + console.log(`\nSending ${orderedTxs.length} transactions in batches (non-blocking)...`) - console.log(`\nSending ${allRawTxs.length} raw transactions in parallel...`) - await Promise.all( - allRawTxs.map((serializedTransaction) => publicClient.sendRawTransaction({ serializedTransaction })), - ) + const startTime = Date.now() + + // Send transactions without awaiting - fire and forget for minimal latency + orderedTxs.forEach((tx, index) => { + // Use setTimeout to ensure minimal delay between sends and avoid overwhelming the RPC + // setTimeout(() => { + const _hash = publicClient.sendRawTransaction({ serializedTransaction: tx.txHash }).catch((error) => { + console.error( + `Failed to send tx ${index} (address: ${tx.address}, nonce: ${tx.nonce}): ${error.message}`, + ) + }) + // }, index * 10) // 10ms between each transaction + }) + + const sendDuration = Date.now() - startTime + console.log(`Sent all ${orderedTxs.length} transactions in ${sendDuration}ms`) console.log("\n\x1b[1m\x1b[34m═════════ RUNNING RESYNC ═════════\x1b[0m") + + // Run resync and monitor nonce gap concurrently await Promise.all([resyncAccount(executorAccount, "recheck"), monitorNonceGap()]) } catch (error) { console.error(`Error in test: ${error}`) @@ -183,36 +267,45 @@ async function run(): Promise { } } +/** + * Monitors the nonce gap between latest and pending nonces for the executor account. + * Runs for up to MONITOR_TIMEOUT milliseconds, checking every second. + * Resolves when the nonce gap is zero or when timeout is reached. + */ async function monitorNonceGap(): Promise { console.log("\n\x1b[1m\x1b[34m═════════ MONITORING NONCE GAP ═════════\x1b[0m") - try { - const startTime = Date.now() - while (Date.now() - startTime < MONITOR_TIMEOUT) { - const latestNonce = await publicClient.getTransactionCount({ address: executorAccount.address }) - const pendingNonce = await publicClient.getTransactionCount({ - address: executorAccount.address, - blockTag: "pending", - }) - - const nonceGap = pendingNonce - latestNonce - - console.log("\nLatest nonce:", latestNonce) - console.log("Pending nonce:", pendingNonce) - console.log("Nonce gap:", nonceGap) - - if (nonceGap === 0) { - console.log("\n\x1b[42m\x1b[30m SUCCESS \x1b[0m Nonce gap resolved successfully!") - return - } - - await sleep(1000) - } - - console.log("\n\x1b[43m\x1b[30m TIMEOUT \x1b[0m Monitoring timed out, nonce gap may still exist.") - } catch (error) { - console.error("Error in monitorNonceGap:", error) + const monitorStartTime = Date.now() + while (Date.now() - monitorStartTime < MONITOR_TIMEOUT) { + const latestNonce = await publicClient.getTransactionCount({ + address: executorAccount.address, + blockTag: "latest", + }) + const pendingNonce = await publicClient.getTransactionCount({ + address: executorAccount.address, + blockTag: "pending", + }) + + const nonceGap = pendingNonce - latestNonce + + console.log("\nLatest nonce:", latestNonce) + console.log("Pending nonce:", pendingNonce) + console.log("Nonce gap:", nonceGap) + + // if (nonceGap === 0) { + // console.log("\n\x1b[42m\x1b[30m SUCCESS \x1b[0m Nonce gap resolved successfully!") + // return + // } + + await sleep(1000) } + + console.log("\n\x1b[43m\x1b[30m TIMEOUT \x1b[0m Monitoring timed out, nonce gap may still exist.") } -run().catch(() => process.exit(1)) +run() + .then(() => process.exit(0)) + .catch((error) => { + console.error(`Error in test: ${error}`) + process.exit(1) + }) diff --git a/apps/submitter/bin/utils/nonceMonitor.ts b/apps/submitter/bin/utils/nonceMonitor.ts new file mode 100644 index 0000000000..8c5dacdc0d --- /dev/null +++ b/apps/submitter/bin/utils/nonceMonitor.ts @@ -0,0 +1,159 @@ +/** + * Utility to monitor nonces of executor keys before and after stress tests + */ + +import type { Address } from "@happy.tech/common" +import { publicClient } from "#lib/utils/clients" + +interface NonceState { + address: Address + nonce: number +} + +interface NonceReport { + before: NonceState[] + after: NonceState[] + diff: { address: Address; before: number; after: number; txCount: number }[] + createdAccounts?: Address[] +} + +// All executor keys and account deployer key to monitor +const KEYS_TO_MONITOR: Address[] = [ + // Account deployer key + "0xf4822fC7CB2ec69A5f9D4b5d4a59B949eFfa8311", + // Executor keys (foundry default keys) + "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", + "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", + "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC", + "0x90F79bf6EB2c4f870365E785982E1f101E93b906", + "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65", + "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc", + "0x976EA74026E726554dB657fA54763abd0C3a0aa9", + "0x14dC79964da2C08b23698B3D3cc7Ca32193d9955", + "0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f", + "0xa0Ee7A142d267C1f36714E4a8F75612F20a79720", +] + +/** + * Get current nonces for all executor keys + */ +export async function getCurrentNonces(): Promise { + const noncePromises = KEYS_TO_MONITOR.map(async (address) => { + const nonce = await publicClient.getTransactionCount({ + address, + }) + + return { + address, + nonce, + } + }) + + return Promise.all(noncePromises) +} + +/** + * Store of nonce states for comparison + */ +let initialNonceState: NonceState[] = [] + +/** + * Store of accounts created during the test + */ +let createdAccounts: Address[] = [] + +/** + * Track a new account that was created during the test + */ +export function trackCreatedAccount(address: Address): void { + if (!createdAccounts.includes(address)) { + createdAccounts.push(address) + } +} + +/** + * Start monitoring nonces by capturing the initial state + */ +export async function startNonceMonitoring(): Promise { + // Reset the created accounts array + createdAccounts = [] + console.log("\n=== Starting Nonce Monitoring ===") + initialNonceState = await getCurrentNonces() + + console.log("Initial nonce state:") + initialNonceState.forEach(({ address, nonce }) => { + console.log(`${address}: ${nonce}`) + }) +} + +/** + * End monitoring and report the differences + */ +export async function endNonceMonitoring(): Promise { + console.log("\n=== Ending Nonce Monitoring ===") + const finalNonceState = await getCurrentNonces() + + console.log("Final nonce state:") + finalNonceState.forEach(({ address, nonce }) => { + console.log(`${address}: ${nonce}`) + }) + + // Calculate differences + const diff = finalNonceState.map(({ address, nonce: afterNonce }) => { + const beforeState = initialNonceState.find((state) => state.address === address) + const beforeNonce = beforeState ? beforeState.nonce : 0 + const txCount = afterNonce - beforeNonce + + return { + address, + before: beforeNonce, + after: afterNonce, + txCount, + } + }) + + console.log("\n=== Nonce Differences (Transactions Sent) ===") + diff.forEach(({ address, before, after, txCount }) => { + if (txCount > 0) { + console.log(`${address}: ${before} → ${after} (${txCount} transactions)`) + } + }) + + const totalTxs = diff.reduce((sum, { txCount }) => sum + txCount, 0) + console.log(`\nTotal transactions sent: ${totalTxs}`) + + // Show accounts created during the test + if (createdAccounts.length > 0) { + console.log(`\n=== ACCOUNTS CREATED DURING TEST (${createdAccounts.length}) ===`) + createdAccounts.forEach((address, i) => { + if (i < 10 || i >= createdAccounts.length - 5) { + console.log(address) + } else if (i === 10) { + console.log(`... and ${createdAccounts.length - 15} more ...`) + } + }) + } + + return { + before: initialNonceState, + after: finalNonceState, + diff, + createdAccounts, + } +} + +/** + * Check if an address matches a specific executor + */ +export function isExecutorAddress(address: Address, executorAddress: Address): boolean { + return address.toLowerCase() === executorAddress.toLowerCase() +} + +/** + * Find which executor was used for a specific account creation + * This is useful for tracking which executor was responsible for creating which accounts + */ +export async function findExecutorForAccount(_accountAddress: Address): Promise
{ + // Return the account deployer key as the default executor for account creation + return KEYS_TO_MONITOR[0] // Account deployer key is first in the array +} diff --git a/apps/submitter/lib/services/replaceTransaction.ts b/apps/submitter/lib/services/replaceTransaction.ts index f1f886d7a8..8a4732a892 100644 --- a/apps/submitter/lib/services/replaceTransaction.ts +++ b/apps/submitter/lib/services/replaceTransaction.ts @@ -5,7 +5,7 @@ import { blockService, evmNonceManager } from "#lib/services" import { traceFunction } from "#lib/telemetry/traces" import type { EvmTxInfo } from "#lib/types" import { isNonceTooLowError, publicClient, walletClient } from "#lib/utils/clients" -import { getFees, getLatestBaseFee } from "#lib/utils/gas" +import { getFees } from "#lib/utils/gas" import { logger } from "#lib/utils/logger" // TODO: use this in BoopReceiptService @@ -43,8 +43,15 @@ async function replaceInternal( const nonce = evmTxInfo.nonce const initialDelay = 500 const maxDelay = 8000 + const MAX_ATTEMPTS = 15 // Limit total attempts to avoid infinite loops let attempt = 0 - let included = 0 // current included nonce — okay to start at 0 + + // Immediately check the current nonce before proceeding, and start with the current confirmed nonce + let included = await publicClient.getTransactionCount({ address, blockTag: "latest" }) + if (included >= nonce) { + logger.info(`Nonce ${nonce} for ${address} is already confirmed`) + return + } // TODO We don't really handle re-orgs here and might end up spinning forever. // Not an urgent problem: OP stacks don't re-org unless there is a catastrophe. @@ -62,6 +69,7 @@ async function replaceInternal( if (receivedNonce >= included) included = receivedNonce else logger.error(`Included nonce went down from ${included} to ${receivedNonce}, possible re-org.`) } + if (included >= nonce) { logger.info(`Transaction replacement successful for ${address} at nonce ${receivedNonce}`) return true @@ -70,19 +78,55 @@ async function replaceInternal( } while (true) { + if (attempt >= MAX_ATTEMPTS) { + logger.error(`Failed to replace transaction for ${address} at nonce ${nonce}`) + return + } + + const latestNonce = await publicClient.getTransactionCount({ address, blockTag: "latest" }) + if (latestNonce > included) included = latestNonce + + if (included >= nonce) { + logger.info(`Nonce ${nonce} for ${address} is now confirmed`) + return + } + const block = blockService.getCurrentBlock() const gasUsed = block.gasUsed ?? 0n const gasLimit = block.gasLimit ?? 2n ** 50n // 1 petagas — implausibly high - const blockBaseFee = getLatestBaseFee() - const baseFeeTooHigh = blockBaseFee > env.MAX_BASEFEE const blockFull = gasUsed > gasLimit - const { fees, error } = getFees({ cancellingFor: address }, evmTxInfo) - if (baseFeeTooHigh || blockFull || error) { - // Wait for basefee to come down or blocks to stop being full then recheck. + + // Use original tx gas values directly and apply more aggressive bumping based on attempt number + const feeMultiplier = Math.min(3.0, 1.2 + attempt * 0.3) + + // Use the original transaction's fees directly as a starting point + const basePriorityFee = evmTxInfo.maxPriorityFeePerGas + const baseFee = evmTxInfo.maxFeePerGas - basePriorityFee + + const newPriorityFee = BigInt(Math.floor(Number(basePriorityFee) * feeMultiplier)) + const newBaseFee = BigInt(Math.floor(Number(baseFee) * feeMultiplier)) + + let fees = { + maxPriorityFeePerGas: newPriorityFee, + maxFeePerGas: newBaseFee + newPriorityFee, + } + + // If our calculated fees exceed maximum configured fee, fall back to getFees + if (fees.maxFeePerGas > env.MAX_BASEFEE) { + const feeResult = getFees(undefined, evmTxInfo) + fees = feeResult.fees + if (feeResult.error) { + logger.warn(`Fee calculation error during replacement attempt ${attempt}:`, feeResult.error) + } + } + + logger.trace( + `Replacement attempt ${attempt} for nonce ${nonce}: using fee multiplier ${feeMultiplier.toFixed(2)}`, + ) + if (blockFull) { + // Wait for blocks to stop being full then recheck. if (await waitForNonce()) return continue - // NOTE: `error` signifies we've exceeded some max, and we know here that if it triggers the `if`, - // then the replacement bump is the cause and we can't bump enough to replace the tx. } try { @@ -90,23 +134,40 @@ async function replaceInternal( account, to: account.address, value: 0n, - gas: 21_000n, + gas: 22_000n, nonce, ...fees, }) evmTxInfo = { nonce, ...fees } - logger.info(`Sent replacement tx ${hash} for ${account} at nonce ${nonce}`) + logger.info(`Sent replacement tx ${hash} for ${account.address} at nonce ${nonce}`) while (true) if (await waitForNonce()) return } catch (error) { if (isNonceTooLowError(error)) { + // Immediately check the current nonce + const confirmedNonce = await publicClient.getTransactionCount({ address, blockTag: "latest" }) + included = Math.max(included, confirmedNonce) + + if (confirmedNonce >= nonce) { + logger.trace( + `Nonce ${nonce} for ${address} is already confirmed (detected from nonce too low error)`, + ) + return + } + + // Also try the nonce manager's resync const newNonce = await evmNonceManager.resyncIfTooLow(account.address) - if (newNonce && newNonce >= evmTxInfo.nonce) return + if (newNonce && newNonce >= evmTxInfo.nonce) { + logger.trace(`Nonce ${nonce} for ${address} is confirmed after resync (new nonce: ${newNonce})`) + return + } } const msg = getProp(error, "message", "string") const underpriced = msg?.includes("replacement") || msg?.includes("underpriced") if (underpriced) continue // don't wait - const delay = Math.min(maxDelay, initialDelay * 2 ** attempt++) - logger.warn(`Error during replacement, waiting ${delay}ms before retry`, address, nonce, error) + attempt++ + + const delay = Math.min(maxDelay, initialDelay * 2 ** attempt) + logger.warn(`Error during replacement (attempt ${attempt}/${MAX_ATTEMPTS}), waiting ${delay}ms`) await sleep(delay) } }