diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml
index 5be5a03..8ae816e 100644
--- a/.github/workflows/benchmarks.yml
+++ b/.github/workflows/benchmarks.yml
@@ -1,13 +1,6 @@
name: Performance Benchmarks
on:
- push:
- branches: [ "main", "dev_peer_discovery" ]
- pull_request:
- branches: [ "main", "dev_peer_discovery" ]
- schedule:
- # Run weekly on Sunday at 5 AM UTC
- - cron: '0 5 * * 0'
workflow_dispatch:
jobs:
diff --git a/.github/workflows/cmake-multi-platform.yml b/.github/workflows/cmake-multi-platform.yml
index 9946f49..00e91e4 100644
--- a/.github/workflows/cmake-multi-platform.yml
+++ b/.github/workflows/cmake-multi-platform.yml
@@ -2,16 +2,6 @@ name: Release Build CI
on:
push:
- branches:
- - develop
- - main
- paths-ignore:
- - ".github/**"
- - "Readme.md"
- pull_request:
- branches:
- - develop
- - main
paths-ignore:
- ".github/**"
- "Readme.md"
@@ -21,9 +11,113 @@ on:
description: "Release tag"
required: false
type: string
+ android:
+ description: "Build Android targets"
+ required: false
+ type: boolean
+ default: true
+ ios:
+ description: "Build iOS targets"
+ required: false
+ type: boolean
+ default: true
+ osx:
+ description: "Build OSX targets"
+ required: false
+ type: boolean
+ default: true
+ linux:
+ description: "Build Linux targets"
+ required: false
+ type: boolean
+ default: true
+ windows:
+ description: "Build Windows targets"
+ required: false
+ type: boolean
+ default: true
jobs:
+ prepare-matrix:
+ runs-on: ubuntu-latest
+ outputs:
+ matrix: ${{ steps.set-matrix.outputs.matrix }}
+ steps:
+ - name: Select build matrix
+ id: set-matrix
+ shell: bash
+ env:
+ SELECT_ANDROID: ${{ github.event_name != 'workflow_dispatch' || github.event.inputs.android == 'true' }}
+ SELECT_IOS: ${{ github.event_name != 'workflow_dispatch' || github.event.inputs.ios == 'true' }}
+ SELECT_OSX: ${{ github.event_name != 'workflow_dispatch' || github.event.inputs.osx == 'true' }}
+ SELECT_LINUX: ${{ github.event_name != 'workflow_dispatch' || github.event.inputs.linux == 'true' }}
+ SELECT_WINDOWS: ${{ github.event_name != 'workflow_dispatch' || github.event.inputs.windows == 'true' }}
+ run: |
+ python3 - <<'PY' >> "$GITHUB_OUTPUT"
+ import json
+ import os
+
+ entries = [
+ {
+ "target": "Linux",
+ "runner": ["self-hosted", "X64", "Linux"],
+ "abi": "x86_64",
+ "build-type": "Release",
+ "container": "ghcr.io/geniusventures/debian-bullseye:latest",
+ },
+ {
+ "target": "Linux",
+ "runner": "sg-arm-linux",
+ "abi": "aarch64",
+ "build-type": "Release",
+ "container": "ghcr.io/geniusventures/debian-bullseye:latest",
+ },
+ {
+ "target": "Windows",
+ "runner": ["self-hosted", "Windows"],
+ "abi": "",
+ "build-type": "Release",
+ },
+ {
+ "target": "OSX",
+ "runner": "gv-OSX-Large",
+ "abi": "",
+ "build-type": "Release",
+ },
+ {
+ "target": "Android",
+ "runner": "sg-ubuntu-linux",
+ "abi": "arm64-v8a",
+ "build-type": "Release",
+ },
+ {
+ "target": "Android",
+ "runner": ["self-hosted", "X64", "Linux"],
+ "abi": "armeabi-v7a",
+ "build-type": "Release",
+ },
+ {
+ "target": "iOS",
+ "runner": "macos-latest",
+ "abi": "",
+ "build-type": "Release",
+ },
+ ]
+
+ selected = {
+ "android": os.environ["SELECT_ANDROID"].lower() == "true",
+ "ios": os.environ["SELECT_IOS"].lower() == "true",
+ "osx": os.environ["SELECT_OSX"].lower() == "true",
+ "linux": os.environ["SELECT_LINUX"].lower() == "true",
+ "windows": os.environ["SELECT_WINDOWS"].lower() == "true",
+ }
+
+ include = [entry for entry in entries if selected[entry["target"].lower()]]
+ print(f"matrix={json.dumps({'include': include}, separators=(',', ':'))}")
+ PY
+
build:
+ needs: prepare-matrix
env:
GRPC_BUILD_ENABLE_CCACHE: "ON"
GH_TOKEN: ${{ secrets.GNUS_TOKEN_1 }}
@@ -36,40 +130,7 @@ jobs:
password: ${{ secrets.GNUS_TOKEN_1 }}
strategy:
fail-fast: false
- matrix:
- target: [ Android, iOS, OSX, Linux, Windows ]
- build-type: [ Release ]
- abi: [ "" ]
- include:
- - target: Linux
- runner: sg-ubuntu-linux
- abi: "x86_64"
- build-type: "Release"
- container: ghcr.io/geniusventures/debian-bullseye:latest
- - target: Linux
- runner: sg-arm-linux
- abi: "aarch64"
- build-type: "Release"
- container: ghcr.io/geniusventures/debian-bullseye:latest
- - target: Windows
- runner: [ self-hosted, Windows ]
- - target: OSX
- runner: gv-OSX-Large
- - target: Android
- runner: sg-ubuntu-linux
- abi: arm64-v8a
- build-type: "Release"
- - target: Android
- runner: sg-ubuntu-linux
- abi: armeabi-v7a
- build-type: "Release"
- - target: iOS
- runner: macos-latest
- exclude:
- - target: Android
- abi: ""
- - target: Linux
- abi: ""
+ matrix: ${{ fromJSON(needs.prepare-matrix.outputs.matrix) }}
steps:
- name: Configure Git Bash on Windows
if: ${{ runner.environment == 'self-hosted' && matrix.target == 'Windows' }}
@@ -362,7 +423,12 @@ jobs:
- name: Build rlp
working-directory: ${{ github.workspace }}/rlp/${{ env.BUILD_DIRECTORY }}
run: cmake --build . --config ${{ matrix.build-type }} -j
-
+
+ - name: Test rlp (non-mobile)
+ if: ${{ matrix.target != 'Android' && matrix.target != 'iOS' }}
+ working-directory: ${{ github.workspace }}/rlp/${{ env.BUILD_DIRECTORY }}
+ run: ctest . -C ${{ matrix.build-type }} --output-on-failure -V
+
- name: Install rlp
working-directory: ${{ github.workspace }}/rlp/${{ env.BUILD_DIRECTORY }}
run: cmake --install . --config ${{ matrix.build-type }}
diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml
index 0ccef7e..a3e7fa9 100644
--- a/.github/workflows/fuzz.yml
+++ b/.github/workflows/fuzz.yml
@@ -1,13 +1,6 @@
name: Fuzz Testing
on:
- push:
- branches: [ "main", "dev_peer_discovery" ]
- pull_request:
- branches: [ "main", "dev_peer_discovery" ]
- schedule:
- # Run nightly fuzzing for longer duration at 4 AM UTC
- - cron: '0 4 * * *'
workflow_dispatch:
inputs:
duration:
diff --git a/.github/workflows/sanitizers.yml b/.github/workflows/sanitizers.yml
index a28bb09..30a4aa9 100644
--- a/.github/workflows/sanitizers.yml
+++ b/.github/workflows/sanitizers.yml
@@ -1,13 +1,6 @@
name: Memory Safety - Sanitizers
on:
- push:
- branches: [ "main", "dev_peer_discovery" ]
- pull_request:
- branches: [ "main", "dev_peer_discovery" ]
- schedule:
- # Run nightly at 2 AM UTC
- - cron: '0 2 * * *'
workflow_dispatch:
jobs:
diff --git a/.github/workflows/valgrind.yml b/.github/workflows/valgrind.yml
index 310fb9a..a20eeea 100644
--- a/.github/workflows/valgrind.yml
+++ b/.github/workflows/valgrind.yml
@@ -1,13 +1,6 @@
name: Memory Safety - Valgrind
on:
- push:
- branches: [ "main", "dev_peer_discovery" ]
- pull_request:
- branches: [ "main", "dev_peer_discovery" ]
- schedule:
- # Run nightly at 3 AM UTC
- - cron: '0 3 * * *'
workflow_dispatch:
jobs:
diff --git a/.gitignore b/.gitignore
index 745e2aa..37fcd92 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,48 @@ build/
# Test log files
/tmp/eth_watch_*.log
examples/*.log
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/usage.statistics.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# github copilot
+.idea/**/copilot*
+# AWS User-specific
+.idea/**/aws.xml
+
+# Generated files
+.idea/**/contentModel.xml
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+.idea/**/dbnavigator.xml
+
+# CMake
+cmake-build-*/
+
+# File-based project format
+*.iws
+
+# IntelliJ
+out/
+
+# Android studio 3.1+ serialized cache file
+.idea/caches/build_file_checksums.ser
+
+### CLion+iml Patch ###
+# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
+
+*.iml
+modules.xml
+.idea/misc.xml
+*.ipr
+
diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000..13566b8
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/editor.xml b/.idea/editor.xml
new file mode 100644
index 0000000..17a8caf
--- /dev/null
+++ b/.idea/editor.xml
@@ -0,0 +1,11 @@
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..c143cea
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/AgentDocs/AGENT_MISTAKES.md b/AgentDocs/AGENT_MISTAKES.md
index dfedd23..42af03e 100644
--- a/AgentDocs/AGENT_MISTAKES.md
+++ b/AgentDocs/AGENT_MISTAKES.md
@@ -142,7 +142,19 @@ Live network testing cannot distinguish between "our crypto is wrong" and "the p
- When adding a new declaration, always look at the surrounding declarations in the same header and match their Doxygen style exactly.
- Never silently omit `@param` or `@return` for non-trivial declarations.
-### M018 — Using `std::cout` / `std::cerr` for debug output instead of spdlog
+### M019 — Using C++20 `boost::asio::awaitable` in code that must build on C++17 targets
+**What happened**: Async network functions were written using C++20 native coroutines (`boost::asio::awaitable`, `co_await`, `co_return`, `boost::asio::this_coro::executor`). This fails to compile on Debian 11 Bullseye with older Clang/GCC that only support C++17 (e.g. Clang 11/12 shipped with Bullseye).
+**Root cause**: Wrote the first convenient async pattern without checking the minimum compiler/standard requirement for all build targets.
+**Rule**: The project must compile under C++17 (Debian 11 Bullseye is a supported target). **Never use C++20 coroutine keywords** (`co_await`, `co_return`, `co_yield`) or `boost::asio::awaitable` / `boost::asio::this_coro::executor`. Use the C++17-compatible **Boost stackful coroutine** API instead:
+- Replace `boost::asio::awaitable` return types with `T` (returning directly).
+- Add `boost::asio::yield_context yield` as the last parameter to every async function.
+- Replace `co_await op(asio::use_awaitable)` → `op(yield)`.
+- Replace `co_return value` → `return value`.
+- Replace `boost::asio::co_spawn(exec, coro, token)` → `boost::asio::spawn(exec, [](yield_context yield){...})`.
+- Replace `co_await boost::asio::this_coro::executor` → `yield.get_executor()`.
+- Link `Boost::coroutine` and `Boost::context` in CMake target_link_libraries.
+
+
**What happened**: When a debug print was needed, `std::cerr` was inserted directly into source code, requiring an `#include ` and a build cycle to observe behaviour.
**Root cause**: Reaching for the obvious C++ I/O stream instead of using the project's established logging system.
**Rule**: **Never use `std::cout` or `std::cerr` for debug output.** Use the project spdlog system exclusively:
@@ -154,3 +166,11 @@ Live network testing cannot distinguish between "our crypto is wrong" and "the p
3. The global spdlog level is already controlled by the `--log-level` CLI flag in `eth_watch` (and similar entry points). Setting `--log-level debug` will show all `DEBUG` output with zero code changes.
4. `std::cout` is only acceptable for **user-facing program output** (e.g., final results printed to the terminal by design). It is never acceptable for diagnostic or debug output.
+---
+
+## SUBMODULE MANAGEMENT
+
+### M015 — Incorrectly reverting the `build` submodule pointer
+**What happened**: When merging `enr_records` into `copilot/discv5-implementation`, the `build` submodule pointer was updated from `bc5302b` to `f09f0cb` (the latest `main` of cmaketemplate). This was mistakenly reverted to `bc5302b`, which is the *older* commit and lacks Android library support. `f09f0cb` is the correct commit — it adds the `android::log`/`android::android` interface targets and thirdparty directory updates.
+**Rule**: When the `build` submodule pointer changes during a merge, verify whether the new commit is from the latest `main` of cmaketemplate. If it is, keep it (or update to it). The correct workflow to update the submodule is: go into `build/`, run `git pull && git checkout main`, then `cd ..` and `git add build`.
+
diff --git a/AgentDocs/Architecture.md b/AgentDocs/Architecture.md
index 6c290b0..d5ea2be 100644
--- a/AgentDocs/Architecture.md
+++ b/AgentDocs/Architecture.md
@@ -13,12 +13,13 @@ To monitor transactions and event logs for specific smart contracts on EVM-compa
- **Protocol**: Implement discv4 (simpler) or Discv5 (used by some newer chains). Messages include:
- `PING`/`PONG`: Check peer availability.
- `FIND_NODE`/`NEIGHBORS`: Query and receive peer lists.
+- **Current repo status**: The maintained implementation uses `Boost.Asio` UDP sockets and endpoint/address abstractions. The older ENet/raw-socket sketch below is historical and should not be used as the implementation model.
- **C++ Code**:
```cpp
- #include // ENet for UDP networking
+ #include
+ #include
#include
#include
- #include // For keccak256
struct Node {
std::string ip;
@@ -28,27 +29,31 @@ To monitor transactions and event logs for specific smart contracts on EVM-compa
class Discovery {
public:
- Discovery() {
- enet_initialize();
- host_ = enet_host_create(nullptr, 1, 2, 0, 0); // UDP host
+ Discovery()
+ : socket_(io_, boost::asio::ip::udp::v4())
+ {
}
- ~Discovery() { enet_host_destroy(host_); enet_deinitialize(); }
void SendPing(const Node& target) {
std::vector packet = EncodePing();
- ENetAddress addr{inet_addr(target.ip.c_str()), target.port};
- ENetPeer* peer = enet_host_connect(host_, &addr, 2, 0);
- ENetPacket* enet_packet = enet_packet_create(packet.data(), packet.size(), ENET_PACKET_FLAG_RELIABLE);
- enet_peer_send(peer, 0, enet_packet);
+ boost::system::error_code ec;
+ const auto address = boost::asio::ip::make_address(target.ip, ec);
+ if (ec) {
+ return;
+ }
+
+ const boost::asio::ip::udp::endpoint endpoint(address, target.port);
+ socket_.send_to(boost::asio::buffer(packet), endpoint, 0, ec);
}
- void HandlePacket(ENetEvent& event) {
+ void HandlePacket() {
// Decode packet (PING, PONG, FIND_NODE, NEIGHBORS)
// Update peer list if NEIGHBORS received
}
private:
- ENetHost* host_;
+ boost::asio::io_context io_;
+ boost::asio::ip::udp::socket socket_;
std::vector EncodePing() {
// RLP-encode PING: [version, from, to, expiration, enr_seq]
// Return serialized bytes
@@ -57,30 +62,33 @@ To monitor transactions and event logs for specific smart contracts on EVM-compa
};
```
- **Notes**:
- - Use ENet for lightweight UDP networking (C-based, minimal).
+ - Use Boost.Asio for cross-platform UDP networking.
- RLP-encode messages per DevP2P specs (see below for RLP).
- Start with chain-specific bootnodes (hardcode from chain docs, e.g., Ethereum’s enodes).
- - Maintain 10-15 peers per chain.
+ - Maintain up to `max_active` concurrent dial attempts per chain (default 25 desktop, 3–5 mobile).
+ - A `DialScheduler` per chain queues discovered peers and recycles dial slots as connections succeed or fail, mirroring go-ethereum's `dialScheduler` pattern (`maxActiveDials = defaultMaxPendingPeers`).
+ - All chain schedulers share a single `boost::asio::io_context` (one thread, cooperative coroutines — no thread-per-chain overhead).
+ - A `WatcherPool` owns a **discv4 singleton** (stays warm across chain switches) and enforces a two-level resource cap: `max_total` (global fd limit) and `max_per_chain` (per-chain dial limit). Sensible defaults: mobile `max_total=12, max_per_chain=3`; desktop `max_total=200, max_per_chain=50`.
#### 2. RLPx Connection (TCP)
- **Purpose**: Establish secure TCP connections for `eth` subprotocol gossip.
- **Protocol**: RLPx uses ECIES for handshakes (encryption/auth) and multiplexes subprotocols.
+- **Current repo status**: The maintained transport path is `src/rlpx/socket/socket_transport.cpp`, which already uses Boost.Asio TCP sockets and timeout handling. The raw POSIX sketch below is historical and should not be used as the implementation model.
- **C++ Code**:
```cpp
+ #include
+ #include
#include // For ECIES
#include
- #include
- #include
#include
class RLPxSession {
public:
- RLPxSession(const Node& peer) : peer_(peer) {
- sock_ = socket(AF_INET, SOCK_STREAM, 0);
- sockaddr_in addr;
- addr.sin_addr.s_addr = inet_addr(peer.ip.c_str());
- addr.sin_port = htons(peer.port);
- connect(sock_, (sockaddr*)&addr, sizeof(addr));
+ RLPxSession(boost::asio::io_context& io, const Node& peer)
+ : socket_(io), peer_(peer) {
+ boost::asio::ip::tcp::resolver resolver(io);
+ auto endpoints = resolver.resolve(peer.ip, std::to_string(peer.port));
+ boost::asio::connect(socket_, endpoints);
PerformHandshake();
}
@@ -93,17 +101,17 @@ To monitor transactions and event logs for specific smart contracts on EVM-compa
void SendHello() {
std::vector hello = EncodeHello();
- send(sock_, hello.data(), hello.size(), 0);
+ boost::asio::write(socket_, boost::asio::buffer(hello));
}
void ReceiveMessage() {
std::vector buffer(1024);
- int len = recv(sock_, buffer.data(), buffer.size(), 0);
+ const std::size_t len = socket_.read_some(boost::asio::buffer(buffer));
// Decrypt and decode RLP message (e.g., HELLO, STATUS)
}
private:
- int sock_;
+ boost::asio::ip::tcp::socket socket_;
Node peer_;
std::vector EncodeHello() {
// RLP-encode HELLO: [protocolVersion, clientId, capabilities, port, id]
@@ -297,7 +305,12 @@ To monitor transactions and event logs for specific smart contracts on EVM-compa
- Hardcode bootnodes (from chain docs).
- Set chain ID (Ethereum: 1, Polygon: 137, Base: 8453, BSC: 56).
- Use `eth/66` (or chain-specific version).
-- **Connections**: Run separate Discovery and RLPxSession instances per chain.
+- **Connections**: One `discv4_client` singleton on `WatcherPool` (shared across all chains, stays warm across chain switches). One `DialScheduler` per active chain watcher.
+ - `WatcherPool(max_total, max_per_chain)` — two-level resource cap enforced across all schedulers:
+ - Mobile defaults: `max_total=12, max_per_chain=3` → up to 4 chains simultaneously, 3 fds each
+ - Desktop defaults: `max_total=200, max_per_chain=50`
+ - `start_watcher(chain)` — creates `DialScheduler` for that chain, immediately begins consuming discovered peers
+ - `stop_watcher(chain)` — **async**, no-block: disconnects all active TCP sessions for that chain; coroutines unwind at next yield, fds freed within one io_context cycle; UI never stutters; freed slots immediately available to a new chain watcher
- **Consensus Rules**:
- Ethereum: Post-Merge PoS, verify validator signatures.
- Polygon: PoS, check Heimdall checkpoints.
@@ -305,7 +318,7 @@ To monitor transactions and event logs for specific smart contracts on EVM-compa
- BSC: PoSA, check authority signatures or PoW.
### Challenges and Mitigations
-- **Resource Use**: Limit peers to 10-15 per chain, cache headers in memory (~1MB per 1000 blocks).
+- **Resource Use**: Two-level cap via `WatcherPool(max_total, max_per_chain)`. All coroutines share one `io_context` thread — zero thread overhead per chain. On desktop raise fd limit via `setrlimit(RLIMIT_NOFILE)` at startup. On mobile the low `max_total` keeps fd usage negligible and battery impact minimal. Redundancy is collective via IPFS pubsub — each device only needs a few stable peers per chain.
- **RLP Complexity**: Implement recursive RLP decoding for lists (blocks, receipts).
- **Peer Reliability**: Handle dropped connections with reconnect logic; maintain diverse peers.
- **Chain Quirks**: Test on testnets (Sepolia, Amoy, Base Sepolia, BSC Testnet) for chain-specific behaviors.
@@ -320,7 +333,111 @@ To monitor transactions and event logs for specific smart contracts on EVM-compa
6. Verify block headers for consensus (canonical chain).
7. Log/process matched transactions/logs.
-This C++ implementation ensures decentralized monitoring of your smart contracts across EVM chains using RLPx and `eth` gossip, with minimal dependencies (ENet, OpenSSL). If you need specific message formats or chain bootnodes, let me know!
-
-
-
+This C++ implementation ensures decentralized monitoring of your smart contracts across EVM chains using Boost.Asio-based discovery/RLPx transport plus OpenSSL-backed crypto. If you need specific message formats or chain bootnodes, let me know!
+
+
+
+
+---
+
+## discv5 Module (added 2026-03-15)
+
+A parallel discovery stack that locates Ethereum-compatible peers via the discv5 protocol (EIP-8020). It is deliberately additive — the existing discv4 stack and all RLPx/ETH code remain unchanged.
+
+### High-level data flow
+
+```
+BootnodeSource (ENR or enode URIs)
+ │
+ ▼
+discv5_crawler
+ ├── queued_peers_ — next FINDNODE targets
+ ├── measured_ids_ — nodes that replied
+ ├── failed_ids_ — nodes that timed out
+ └── discovered_ids_ — dedup set
+ │
+ │ PeerDiscoveredCallback (ValidatedPeer)
+ ▼
+discovery::ValidatedPeer ← shared with discv4 via include/discovery/discovered_peer.hpp
+ │
+ ▼
+DialScheduler / RLPx session (existing, unchanged)
+```
+
+### New files
+
+```
+include/
+ discovery/
+ discovered_peer.hpp — shared NodeId / ForkId / ValidatedPeer
+ discv5/
+ discv5_constants.hpp — all domain sizes + wire POD structs
+ discv5_error.hpp — discv5Error enum
+ discv5_types.hpp — EnrRecord, discv5Config, callbacks
+ discv5_enr.hpp — EnrParser (decode, verify, to_validated_peer)
+ discv5_bootnodes.hpp — IBootnodeSource, ChainBootnodeRegistry
+ discv5_crawler.hpp — peer queue state machine
+ discv5_client.hpp — UDP socket + async loops
+
+src/discv5/
+ discv5_error.cpp
+ discv5_enr.cpp — base64url, RLP, secp256k1 signature verify
+ discv5_bootnodes.cpp — per-chain seed lists (Ethereum/Polygon/BSC/Base)
+ discv5_crawler.cpp — enqueue/dedup/emit
+ discv5_client.cpp — Boost.Asio spawn/yield_context receive + crawler loops, FINDNODE send
+ CMakeLists.txt
+
+test/discv5/
+ discv5_enr_test.cpp — go-ethereum test vectors
+ discv5_bootnodes_test.cpp — registry and source tests
+ discv5_crawler_test.cpp — deterministic state machine tests
+ CMakeLists.txt
+
+examples/discv5_crawl/
+ discv5_crawl.cpp — live C++ example / functional test harness entry point
+ CMakeLists.txt
+```
+
+### Functional testing note
+
+Discovery functional testing in this repository is done through C++ example programs under `examples/`, not shell wrappers.
+
+The working discv4 reference pattern is `examples/discovery/test_enr_survey.cpp`: a standalone C++ example that drives a bounded live run, gathers counters in memory, and prints a structured report at the end.
+
+`examples/discv5_crawl/discv5_crawl.cpp` should be treated as the corresponding discv5 functional-testing entry point. At the current checkpoint it is still a partial live harness because the packet receive path does not yet decode the full discv5 WHOAREYOU / handshake / NODES flow. Once that path is implemented, this example should provide the same kind of end-of-run C++ diagnostic summary as `test_enr_survey.cpp`.
+
+### Wire-format structs (M014)
+
+All packet-size constants are derived from `sizeof(WireStruct)`, never bare literals:
+
+| Struct | Size | Constant |
+|---|---|---|
+| `IPv4Wire` | 4 B | `kIPv4Bytes` |
+| `IPv6Wire` | 16 B | `kIPv6Bytes` |
+| `MaskingIvWire` | 16 B | `kMaskingIvBytes` |
+| `GcmNonceWire` | 12 B | `kGcmNonceBytes` |
+| `StaticHeaderWire` | 23 B | `kStaticHeaderBytes` |
+| `EnrSigWire` | 64 B | `kEnrSigBytes` |
+| `CompressedPubKeyWire` | 33 B | `kCompressedKeyBytes` |
+| `UncompressedPubKeyWire` | 65 B | `kUncompressedKeyBytes` |
+
+### Supported chains (ChainBootnodeRegistry)
+
+| Chain | ID | Source format |
+|---|---|---|
+| Ethereum mainnet | 1 | ENR (go-ethereum V5Bootnodes) |
+| Ethereum Sepolia | 11155111 | enode (go-ethereum SepoliaBootnodes) |
+| Ethereum Holesky | 17000 | enode (go-ethereum HoleskyBootnodes) |
+| Polygon mainnet | 137 | enode (docs.polygon.technology) |
+| Polygon Amoy | 80002 | enode (docs.polygon.technology) |
+| BSC mainnet | 56 | enode (bnb-chain/bsc params/config.go) |
+| BSC testnet | 97 | enode (bnb-chain/bsc params/config.go) |
+| Base mainnet | 8453 | OP Stack — inject at runtime |
+| Base Sepolia | 84532 | OP Stack — inject at runtime |
+
+### Next sprint
+
+1. Implement the minimal WHOAREYOU / HANDSHAKE session layer required for live discv5 peers to accept queries.
+2. Decode incoming NODES responses and feed decoded peers back into the crawler / callback path.
+3. Make `examples/discv5_crawl/discv5_crawl.cpp` behave like a real example-based functional test, following the same C++ pattern already used by `examples/discovery/test_enr_survey.cpp`: bounded run, in-memory counters, final structured report.
+4. Once the example proves live peer discovery end-to-end, wire `discv5_client` as an alternative to `discv4_client` inside `eth_watch`.
diff --git a/AgentDocs/CHECKPOINT.md b/AgentDocs/CHECKPOINT.md
index 46282cb..3e954ef 100644
--- a/AgentDocs/CHECKPOINT.md
+++ b/AgentDocs/CHECKPOINT.md
@@ -1,127 +1,609 @@
-# Checkpoint — 2026-03-06 (End of Day 8 extended session)
+# Checkpoint Log
-## Build Status
-- **`ninja` builds with zero errors, zero warnings** as of end of session.
-- **`ctest` 441/441 tests pass** (no regressions). The new `HandshakeVectorsTest` compiles and is registered in CTest.
+## Networking portability update — 2026-03-17
+
+### What changed
+
+- The maintained UDP/TCP networking paths are now consistently Boost.Asio-based across production code and local test harnesses.
+- The remaining non-cross-platform address conversion helpers were removed from:
+ - `include/discv4/discv4_ping.hpp`
+ - `src/discv5/discv5_enr.cpp`
+ - the legacy helper path in `include/discv4/discovery.hpp`
+- The remaining raw UDP test sockets were replaced with Boost.Asio sockets in:
+ - `test/discv4/discv4_client_test.cpp`
+ - `test/discv4/enr_client_test.cpp`
+ - `test/discv4/enr_enrichment_test.cpp`
+ - `test/discv5/discv5_client_test.cpp`
+
+### Current networking status
+
+- `src/discv4/discv4_client.cpp` already used Boost.Asio UDP sockets and remains the maintained discv4 transport path.
+- `src/discv5/discv5_client.cpp` already used Boost.Asio UDP sockets and remains the maintained discv5 transport path.
+- `src/rlpx/socket/socket_transport.cpp` already used Boost.Asio TCP sockets and remains the maintained RLPx transport path.
+- No maintained `src/` or `test/` networking path now depends on POSIX `inet_*`, `sockaddr_in`, `sendto`, or `recvfrom` helpers.
+
+### Verified build/test coverage for this update
+
+The following targets were rebuilt successfully:
+
+- `discv4_client_test`
+- `discv4_enr_client_test`
+- `discv4_enr_enrichment_test`
+- `discv4_protocol_test`
+- `discv5_client_test`
+- `discv5_enr_test`
+
+The following test executables were run successfully:
+
+- `./test_bin/discv4_client_test`
+- `./test_bin/discv4_enr_client_test`
+- `./test_bin/discv4_enr_enrichment_test`
+- `./test_bin/discv4_protocol_test`
+- `./test_bin/discv5_client_test`
+- `./test_bin/discv5_enr_test`
+
+### Immediate doc consequence
+
+- Any older architecture or testing note that still suggests ENet, raw POSIX sockets, or `include/rlp/PeerDiscovery/discovery.hpp` as the active discovery implementation path is stale and should not be used.
---
-## What Was Accomplished This Session
-
-### Days 2–7 (prior sessions, already complete)
-- Full ETH/66+ packet encode/decode (transactions, block bodies, new block, receipts)
-- ABI decoder (`eth/abi_decoder.hpp/.cpp`) with keccak256 event signature hashing
-- `EthWatchService` — subscribe/unwatch, `process_message`, `process_receipts`, `process_new_block`
-- `ChainTracker` — deduplication window, tip tracking
-- GNUS.AI contract address constants + unit tests (`gnus_contracts_test`)
-- spdlog integration (`src/base/logger.cpp`, `include/base/logger.hpp`) with `--log-level` CLI arg
-- `eth_watch` example binary wired end-to-end: discv4 discovery → RLPx connect → ETH status → watch events
-- discv4 full bond cycle: PING → wait for PONG → wait for reverse PING → send PONG → send FIND_NODE → parse NEIGHBOURS
-- Magic number cleanup across all `src/` and `include/` files; constants extracted to named `constexpr`
-- All markdown docs moved to `AgentDocs/`
-- `AGENT_MISTAKES.md` created with M001–M016
-
-### Day 8 (this session)
-- **RLPx handshake rewrite** based on direct read of `go-ethereum/p2p/rlpx/rlpx.go`:
- - `create_auth_message`: RLP-encode `[sig, pubkey, nonce, version=4]`, append 100 bytes random padding, EIP-8 prefix (uint16-BE of ciphertext length), ECIES encrypt
- - `parse_ack_message`: read 2-byte length prefix, read body, ECIES decrypt, RLP-decode `[eph_pubkey, nonce, version]`
- - `derive_frame_secrets`: exact port of go-ethereum `secrets()` — ECDH → sharedSecret → aesSecret → macSecret → MAC seeds
- - `FrameCipher` rewrite: exact port of go-ethereum `hashMAC` and `sessionState` (AES-256-CTR enc/dec, running-keccak MAC accumulator, `computeHeader`/`computeFrame`)
-- **`test/rlpx/handshake_vectors_test.cpp`** — new test validating `derive_frame_secrets()` against go-ethereum `TestHandshakeForwardCompatibility` vectors (Auth₂/Ack₂, responder perspective)
-- **`include/rlpx/auth/auth_handshake.hpp`** — `derive_frame_secrets` moved to `public static`; free function `derive_frame_secrets(keys, is_initiator)` added in `auth_handshake.cpp` as test entry point
+## ENR / discv4 filter checkpoint — 2026-03-14
+
+## Current Status
+- The **ENRRequest / ENRResponse** wire path is implemented and unit-tested.
+- The live discovery path now does `bond -> request_enr -> ParseEthForkId -> set DiscoveredPeer.eth_fork_id`.
+- `DialScheduler::filter_fn` and `make_fork_id_filter()` are implemented.
+- `examples/discovery/test_discovery.cpp` is already wired to use the ENR pre-dial filter.
+- The latest live Sepolia run with that filter enabled produced:
+ - `discovered peers: 24733`
+ - `dialed: 0`
+ - `connected (right chain): 0`
+
+### Current interpretation
+1. The ENR wire/unit work is done.
+2. The live failure has moved: it is no longer “missing ENR filter hookup”.
+3. The immediate problem is now **why no usable `eth_fork_id` reaches the filter in the live path**.
+4. The next step is to follow **go-ethereum’s real discv4 ENR flow** and debug the live request/response sequence,
+ not to add more architecture or broad refactors.
---
-## Current Failure Mode (the problem to solve Monday)
+## Files Most Relevant To The Next Step
-The binary connects to discovered Sepolia peers, completes auth (sends auth, receives ack), derives secrets — but **frame MAC verification fails immediately on the first frame**:
+| File | Why it matters now |
+|---|---|
+| `examples/discovery/test_discovery.cpp` | Live Sepolia harness; now sets `scheduler->filter_fn` |
+| `src/discv4/discv4_client.cpp` | Current bond -> ENR -> callback flow |
+| `include/discv4/discv4_client.hpp` | `DiscoveredPeer.eth_fork_id`, `request_enr()` API |
+| `include/discv4/dial_scheduler.hpp` | `FilterFn`, `filter_fn`, `make_fork_id_filter()` |
+| `go-ethereum/p2p/discover/v4_udp.go` | Reference flow for `RequestENR`, `ensureBond`, `Resolve` |
+| `go-ethereum/eth/protocols/eth/discovery.go` | Reference `NewNodeFilter` logic |
+| `test/discv4/enr_client_test.cpp` | Loopback request/reply coverage |
+| `test/discv4/enr_enrichment_test.cpp` | ENR enrichment coverage |
+| `test/discv4/dial_filter_test.cpp` | Pre-dial filtering coverage |
-```
-[debug][rlpx.auth] execute: ack parsed successfully
-[debug][rlpx.frame] decrypt_header: MAC mismatch
-Error: Invalid message
-```
+---
-### Root Cause Hypothesis
-The `FrameCipher::HashMAC` model stores all bytes written and recomputes `keccak256(all_written)` on each `sum()` call. This is correct for the **seed initialisation** phase (go-ethereum's `mac.Write(xor(MAC,nonce)); mac.Write(auth)`) but the `computeHeader` / `computeFrame` operations in go-ethereum update the *running* keccak accumulator in-place — they do NOT restart from the seed bytes.
+## What Is Done
+
+### ENR wire support
+- `include/discv4/discv4_constants.hpp`
+ - Added `kPacketTypeEnrRequest = 0x05`
+ - Added `kPacketTypeEnrResponse = 0x06`
+- `include/discv4/discv4_enr_request.hpp`
+- `src/discv4/discv4_enr_request.cpp`
+ - Minimal `ENRRequest` modeled after go-ethereum `v4wire.ENRRequest{Expiration uint64}`
+- `include/discv4/discv4_enr_response.hpp`
+- `src/discv4/discv4_enr_response.cpp`
+ - Minimal `ENRResponse` modeled after go-ethereum `v4wire.ENRResponse{ReplyTok, Record}`
+ - `ParseEthForkId()` decodes the ENR `eth` entry into a `ForkId`
+
+### discv4 client flow
+- `include/discv4/discv4_client.hpp`
+ - Added `request_enr()`
+ - Extended `PendingReply`
+ - Added `std::optional eth_fork_id` to `DiscoveredPeer`
+- `src/discv4/discv4_client.cpp`
+ - Dispatches packet types 5 and 6
+ - Implements `handle_enr_response()` reply matching via `ReplyTok`
+ - In `handle_neighbours()`, the callback path now enriches peers with ENR-derived `eth_fork_id`
+
+### Pre-dial filter
+- `include/discv4/dial_scheduler.hpp`
+ - Added `FilterFn`
+ - Added `DialScheduler::filter_fn`
+ - Added `make_fork_id_filter()`
+ - `enqueue()` now drops peers that fail the filter before consuming a dial slot
+- `examples/discovery/test_discovery.cpp`
+ - The scheduler is now configured with the Sepolia ENR filter before enqueueing peers
-Specifically, `computeHeader` in go-ethereum does:
-```go
-sum1 := m.hash.Sum(m.hashBuffer[:0]) // peek at current state WITHOUT resetting
-return m.compute(sum1, header) // then write aesBuffer back into hash
-```
-And `m.hash` is a `keccak.NewLegacyKeccak256()` that was seeded once and **continues accumulating** — it is NOT re-hashed from scratch on every call.
+---
-Our `HashMAC::sum()` correctly recomputes `keccak256(written)` which equals `hash.Sum()` only because keccak is deterministic. **BUT** `compute()` then calls `m.hash.Write(aesBuffer)` which appends 16 bytes to the running accumulator. Our `HashMAC::compute_header` / `compute_frame` must also append those 16 `aesBuffer` bytes to `written` after every call, otherwise `sum()` diverges from go-ethereum's `hash.Sum()` after the first frame.
+## Verified Tests
-### The Exact Fix Needed Monday
+The following tests were built and run successfully during this work:
-In `src/rlpx/framing/frame_cipher.cpp`, `HashMAC::compute()` must append the `aesBuffer` XOR result back into `written`:
+- `./test/discv4/discv4_enr_request_test`
+- `./test/discv4/discv4_enr_response_test`
+- `./test/discv4/discv4_enr_client_test`
+- `./test/discv4/discv4_enr_enrichment_test`
+- `./test/discv4/discv4_dial_filter_test`
+- `./test/discv4/discv4_client_test`
+- `./test/discv4/discv4_dial_scheduler_test`
+
+---
-```cpp
-// go-ethereum: m.hash.Write(m.aesBuffer[:])
-write(aes_buf.data(), aes_buf.size()); // keep accumulator in sync
+## Latest Live Result
+
+Command run:
+
+```bash
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
+./examples/discovery/test_discovery --log-level warn --timeout 60
```
-This single line is almost certainly the MAC mismatch root cause. The `HandshakeVectorsTest` currently only validates key derivation (AES secret, MAC secret, ingress seed hash) — it does NOT yet exercise `computeHeader`/`computeFrame`. A new `FrameCipherMacTest` with go-ethereum's known frame vectors should be written to verify this fix before live testing.
+Observed result:
+
+- `24733 neighbour peer(s) discovered`
+- `dialed: 0`
+- `connect_failed: 0`
+- `wrong_chain: 0`
+- `status_timeout: 0`
+- `connected (right chain): 0`
+
+### Meaning of that result
+- Discovery itself is active.
+- The pre-dial filter is now blocking every candidate before any dial starts.
+- Therefore the next bug is **not** “hook up the filter”.
+- The next bug is one of these:
+ 1. live `request_enr()` is not successfully completing for real peers,
+ 2. live ENR responses are not being parsed into `eth_fork_id`,
+ 3. the live ENR `eth` entry is absent for most peers,
+ 4. the Sepolia fork-hash assumption used by the filter is wrong for live ENR data,
+ 5. the current sequencing differs from go-ethereum in a way that prevents usable ENR data from reaching the callback.
---
-## Key Files
-
-| File | Purpose |
-|------|---------|
-| `src/rlpx/auth/auth_handshake.cpp` | Handshake: create_auth, parse_ack, derive_frame_secrets |
-| `src/rlpx/auth/ecies_cipher.cpp` | ECIES encrypt/decrypt (OpenSSL) |
-| `src/rlpx/crypto/ecdh.cpp` | secp256k1 ECDH, key generation |
-| `src/rlpx/framing/frame_cipher.cpp` | HashMAC + AES-CTR frame enc/dec — **has the bug above** |
-| `include/rlpx/auth/auth_keys.hpp` | `AuthKeyMaterial`, `FrameSecrets` structs |
-| `include/rlpx/framing/frame_cipher.hpp` | `FrameCipher` public interface |
-| `include/rlpx/rlpx_types.hpp` | All `constexpr` size constants |
-| `test/rlpx/handshake_vectors_test.cpp` | go-ethereum vector test for key derivation |
-| `test/rlpx/frame_cipher_test.cpp` | Round-trip frame enc/dec test (does NOT use go-ethereum vectors yet) |
-| `examples/eth_watch/eth_watch.cpp` | Live CLI tool: `./eth_watch --chain sepolia --log-level debug` |
-| `AgentDocs/AGENT_MISTAKES.md` | Agent error log — **read before writing any code** |
+## Immediate Next Step
+
+Follow the **actual go-ethereum discv4 ENR flow** and debug the live path end-to-end.
+
+### Reference files
+- `go-ethereum/p2p/discover/v4_udp.go`
+ - `RequestENR`
+ - `ensureBond`
+ - `Resolve`
+- `go-ethereum/eth/protocols/eth/discovery.go`
+ - `NewNodeFilter`
+
+### What the next chat should do
+1. Compare `src/discv4/discv4_client.cpp` against go-ethereum’s `RequestENR` flow.
+2. Trace the live path to determine why `DiscoveredPeer.eth_fork_id` is not usable before filtering.
+3. Verify whether ENR requests are actually sent and matched for live peers.
+4. Verify whether live ENR responses contain an `eth` entry and what fork hash they advertise.
+5. Only after that, adjust the live filter/hash or sequencing with the smallest possible change.
+
+Follow the **actual go-ethereum discv4 ENR flow** and debug the live path end-to-end.
+
+## New Chat Handoff Prompt
+
+Use this to start the next chat:
+
+```text
+We already completed the ENRRequest/ENRResponse implementation in the rlp project.
+
+What is already done:
+- ENRRequest / ENRResponse wire support is implemented and unit-tested.
+- discv4_client now does bond -> request_enr -> ParseEthForkId -> set DiscoveredPeer.eth_fork_id.
+- DialScheduler::filter_fn and make_fork_id_filter() are implemented.
+- examples/discovery/test_discovery.cpp is already wired to use the ENR pre-dial filter.
+
+Latest live result:
+- ./examples/discovery/test_discovery --log-level warn --timeout 60
+- discovered peers: 24733
+- dialed: 0
+- connected (right chain): 0
+
+So the current bug is no longer "missing filter hookup". The filter is rejecting everything because no usable eth_fork_id is reaching the live dial path.
+
+Please compare our current live ENR flow against go-ethereum’s actual flow in:
+- go-ethereum/p2p/discover/v4_udp.go
+- go-ethereum/eth/protocols/eth/discovery.go
+
+Focus only on the minimal next step: find why no usable eth_fork_id reaches the filter in the live path, and fix that with the smallest possible change.
+
+Relevant project files:
+- AgentDocs/CHECKPOINT.md
+- examples/discovery/test_discovery.cpp
+- src/discv4/discv4_client.cpp
+- include/discv4/discv4_client.hpp
+- include/discv4/dial_scheduler.hpp
+- test/discv4/enr_client_test.cpp
+- test/discv4/enr_enrichment_test.cpp
+- test/discv4/dial_filter_test.cpp
+```
+### Reference files
+- `go-ethereum/p2p/discover/v4_udp.go`
+ - `RequestENR`
+ - `ensureBond`
+ - `Resolve`
+- `go-ethereum/eth/protocols/eth/discovery.go`
+ - `NewNodeFilter`
+
+### What the next chat should do
+1. Compare `src/discv4/discv4_client.cpp` against go-ethereum’s `RequestENR` flow.
+2. Trace the live path to determine why `DiscoveredPeer.eth_fork_id` is not usable before filtering.
+3. Verify whether ENR requests are actually sent and matched for live peers.
+4. Verify whether live ENR responses contain an `eth` entry and what fork hash they advertise.
+5. Only after that, adjust the live filter/hash or sequencing with the smallest possible change.
---
-## How to Run
+## New Chat Handoff Prompt
+
+Use this to start the next chat:
+
+## Quick Commands For The Next Chat
```bash
-cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/build/OSX/Debug
-ninja # build
-ctest --output-on-failure # run all 441 tests
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
+ninja
+
+./test/discv4/discv4_enr_request_test
+./test/discv4/discv4_enr_response_test
+./test/discv4/discv4_enr_client_test
+./test/discv4/discv4_enr_enrichment_test
+./test/discv4/discv4_dial_filter_test
+./test/discv4/discv4_client_test
+./test/discv4/discv4_dial_scheduler_test
+
+./examples/discovery/test_discovery --log-level warn --timeout 60
+./examples/discovery/test_discovery --log-level debug --timeout 60
+```
+
+
+---
+
+## discv5 Implementation — Sprint Checkpoint (2026-03-16)
+
+### Current implementation state
+
+A parallel `discv5` peer discovery module is present beside the existing `discv4` stack. The current branch reflects the post-merge state with the local build fixes applied.
+
+Most importantly, the current `discv5` code is now aligned with the project's C++17 rule and with the same Boost stackful coroutine style used by `discv4`:
+
+- `cmake/CommonBuildParameters.cmake` sets `CMAKE_CXX_STANDARD 17`
+- `src/discv5/CMakeLists.txt` uses `cxx_std_17`
+- `src/discv5/CMakeLists.txt` links `Boost::context` and `Boost::coroutine`, matching `src/discv4/CMakeLists.txt`
+- `src/discv5/discv5_client.cpp` uses `boost::asio::spawn(...)` and `boost::asio::yield_context`
+- No `co_await`, `co_return`, `boost::asio::awaitable`, or `co_spawn` remain in the current `discv5` implementation
+
+This means the earlier native-coroutine description is stale and should not be used as the current mental model for `discv5`.
+
+#### New files
+
+| Path | Purpose |
+|---|---|
+| `include/discovery/discovered_peer.hpp` | Shared `NodeId`, `ForkId`, `ValidatedPeer` handoff contract (used by both discv4 and discv5) |
+| `include/discv5/discv5_constants.hpp` | All domain constants + wire POD structs with `sizeof()`-derived sizes |
+| `include/discv5/discv5_error.hpp` | `discv5Error` enum + `to_string()` declaration |
+| `include/discv5/discv5_types.hpp` | `EnrRecord`, `Discv5Peer`, `discv5Config`, callback aliases |
+| `include/discv5/discv5_enr.hpp` | `EnrParser` – decode/verify ENR URIs |
+| `include/discv5/discv5_bootnodes.hpp` | `IBootnodeSource`, `StaticEnrBootnodeSource`, `StaticEnodeBootnodeSource`, `ChainBootnodeRegistry` |
+| `include/discv5/discv5_crawler.hpp` | `discv5_crawler` – queued/measured/failed/discovered peer sets |
+| `include/discv5/discv5_client.hpp` | `discv5_client` – UDP socket + receive loop + crawler loop |
+| `src/discv5/*.cpp` | Implementation files (error, enr, bootnodes, crawler, client) |
+| `src/discv5/CMakeLists.txt` | `discv5` static library |
+| `test/discv5/discv5_enr_test.cpp` | ENR parser tests using real go-ethereum test vectors |
+| `test/discv5/discv5_bootnodes_test.cpp` | Bootnode source and chain registry tests |
+| `test/discv5/discv5_crawler_test.cpp` | Deterministic crawler state machine tests |
+| `test/discv5/CMakeLists.txt` | Test executables |
+| `examples/discv5_crawl/discv5_crawl.cpp` | C++ live example / functional-harness entry point for discv5 |
+| `examples/discv5_crawl/CMakeLists.txt` | Example target wiring |
+
+#### Supported chains (bootnode registry)
+
+- Ethereum mainnet (ENR from go-ethereum V5Bootnodes) / Sepolia / Holesky
+- Polygon mainnet / Amoy testnet
+- BSC mainnet / testnet
+- Base mainnet / Base Sepolia (OP Stack — seed list populated at runtime)
+
+#### Architecture
+
+```
+BootnodeSource / ENR URI
+ │
+ ▼
+discv5_crawler (queued → FINDNODE → discovered)
+ │
+ │ PeerDiscoveredCallback
+ ▼
+ValidatedPeer (= discovery::ValidatedPeer)
+ │
+ ▼
+existing DialScheduler / RLPx path (unchanged)
+```
+
+### What is verified today
+
+- ENR URI parsing, base64url decoding, and signature verification are covered by `test/discv5/discv5_enr_test.cpp`
+- Per-chain bootnode registry wiring is covered by `test/discv5/discv5_bootnodes_test.cpp`
+- Crawler queue / dedup / lifecycle state is covered by `test/discv5/discv5_crawler_test.cpp`
+- `examples/CMakeLists.txt` includes `examples/discv5_crawl/`, so the live discv5 example target is part of the examples build
+- `examples/discv5_crawl/discv5_crawl.cpp` is the current C++ entry point intended for functional testing of the live discv5 path
+- `examples/discovery/test_enr_survey.cpp` is the closest existing example of the intended functional-test shape for a live discovery diagnostic binary
+
+### What is not working yet for functional testing
+
+`examples/discv5_crawl/discv5_crawl.cpp` exists and starts the client, but it is not yet a complete functional discovery test in the way `examples/discovery/test_enr_survey.cpp` is for discv4 diagnostics.
+
+The current gaps, verified from the actual source, are:
+
+1. `src/discv5/discv5_client.cpp::handle_packet()` only logs receipt of packets; it does not yet decode WHOAREYOU, handshake, or NODES messages.
+2. `src/discv5/discv5_client.cpp::send_findnode()` currently sends a minimal plaintext FINDNODE datagram, but discv5 needs the real session / handshake path before live peers will treat it as a valid query.
+3. `src/discv5/discv5_crawler.cpp::emit_peer()` exists, but the current client receive path does not yet decode incoming peer records and feed them back into the crawler emission path.
+4. Because of the above, `examples/discv5_crawl/discv5_crawl.cpp` is currently a live harness / smoke entry point, not yet a full end-to-end functional discovery test.
+
+### Design rules applied
+
+- **M012**: No bare integer literals — every value has a named `constexpr`.
+- **M014**: All wire sizes derived from `sizeof(WireStruct)` — see `StaticHeaderWire`, `IPv4Wire`, `IPv6Wire`, etc.
+- **M011**: No `if/else` string dispatch — used `switch(ChainId)` and `unordered_map`.
+- **M019**: Async flow is written with Boost stackful coroutines (`spawn` + `yield_context`) for C++17 compatibility, matching the project rule and the `discv4` pattern.
+- **M018**: `spdlog` via `logger_->info/warn/debug` — no `std::cout`.
+- **M015**: All constants inside `namespace discv5`.
+- **M017**: Every public declaration has a Doxygen `///` comment.
+
+### Next steps for C++ functional testing
+
+Functional testing for discovery in this repo should follow the same pattern already used by the C++ examples under `examples/`, not shell scripts. The closest working reference is `examples/discovery/test_enr_survey.cpp`.
+
+For `discv5`, the next work should focus on making `examples/discv5_crawl/discv5_crawl.cpp` useful as that same kind of C++ functional test harness.
+
+#### Reference pattern to follow
+
+Use `examples/discovery/test_enr_survey.cpp` as the model:
+
+- it is a standalone C++ example target under `examples/`
+- it is wired from the examples CMake tree like the other discovery example binaries
+- it drives the live protocol from inside C++
+- it collects counters and diagnostic results in memory
+- it prints a structured end-of-run report for manual inspection
+- it does not depend on shell wrappers to perform the functional test itself
-# Run new vector test only
-./test/rlpx/rlpx_handshake_vectors_tests
+#### Minimal remaining work for `examples/discv5_crawl/discv5_crawl.cpp`
-# Live Sepolia test
-./examples/eth_watch/eth_watch --chain sepolia --log-level debug
+1. Implement the minimal discv5 WHOAREYOU / handshake path needed for live peers to accept the query flow.
+2. Decode incoming NODES replies in `src/discv5/discv5_client.cpp`.
+3. Convert decoded peer records into `ValidatedPeer` values and feed them into the crawler path.
+4. Wire successful peer emission to the existing `PeerDiscoveredCallback` so the example can observe real discoveries.
+5. Keep the functional test in C++ under `examples/`.
+
+#### Recommended example-style testing shape
+
+Once the packet path above exists, `examples/discv5_crawl/discv5_crawl.cpp` should behave as a functional survey binary similar in spirit to `examples/discovery/test_enr_survey.cpp`:
+
+- start the `discv5_client`
+- seed from `ChainBootnodeRegistry`
+- run for a bounded timeout inside `boost::asio::io_context`
+- count packets received, peers decoded, peers emitted, and failures/timeouts
+- print a final summary from inside C++
+
+That gives the repo a real `discv5` functional test entry point under `examples/` without depending on shell-driven orchestration.
+
+### go-ethereum reference used
+
+```
+/tmp/go-ethereum/ (shallow clone for this session)
```
+Key files read:
+- `p2p/enr/enr.go` — ENR record structure and signature scheme
+- `p2p/enode/idscheme.go` — V4ID sign/verify, NodeAddr derivation
+- `p2p/enode/node_test.go` — TestPythonInterop and parseNodeTests (test vectors)
+- `p2p/enode/urlv4_test.go` — Valid/invalid ENR URI test vectors
+- `p2p/discover/v5wire/msg.go` — FINDNODE / NODES message types
+- `p2p/discover/v5wire/encoding.go` — StaticHeader wire layout
+- `params/bootnodes.go` — Real ENR/enode bootnode strings
+
---
-## Monday Task List (priority order)
+## discv5 Repair Checkpoint (2026-03-16, current build-blocker)
+
+### Current state
-1. **Fix `HashMAC::compute()` in `frame_cipher.cpp`** — append `aesBuffer` bytes into `written` after every `compute()` call. This is the single most likely cause of the MAC mismatch.
+- `src/discv5/discv5_client.cpp` is currently build-broken after a large in-progress edit.
+- The file contains literal patch markers (`+`) in source around `parse_handshake_auth(...)` and around `handshake_packet_count()/nodes_packet_count()`.
+- There are container type mismatches in `make_local_enr_record(...)` where `RlpEncoder::MoveBytes()` values (`rlp::Bytes`) are assigned/returned as `std::vector` without conversion.
+- The failure is localized to `src/discv5/discv5_client.cpp.o`; this must be repaired in place with tiny edits only.
-2. **Write `FrameCipherMacTest` using go-ethereum frame vectors** — go-ethereum `TestFrameRW` in `p2p/rlpx/rlpx_test.go` has known plaintexts and expected ciphertexts. Use those to verify `encrypt_frame` / `decrypt_frame` produce identical output before retrying live connection.
+### Known compiler errors (from latest failed build)
-3. **Re-run live test** — after #1 and #2 pass, `./eth_watch --chain sepolia --log-level debug` should reach `HELLO from peer: Geth/...`.
+- `no viable conversion from 'std::basic_string' to 'std::vector'` (around lines ~767 and ~821)
+- `expected expression` and `expected external declaration` caused by stray `+` markers (around lines ~997, ~1011, ~1154)
-4. **ETH STATUS handling** — after HELLO, send ETH Status message (message id 0x10, network_id=11155111, genesis hash, fork id). Currently `EthWatchService::process_message` dispatches on message ids but the STATUS exchange is not fully wired in `rlpx_session.cpp`.
+### Required repair approach (next chat)
-5. **NewBlockHashes → GetBlockBodies → GetReceipts pipeline** — once STATUS succeeds, implement the receipt-fetching loop in `EthWatchService`.
+1. Edit `src/discv5/discv5_client.cpp` in place; do not rewrite or replace the file.
+2. Remove only stray literal diff markers and duplicate fragment residue.
+3. Fix only the `rlp::Bytes`/`std::vector` boundaries with explicit conversions.
+4. Rebuild immediately after each small fix until `src/discv5/discv5_client.cpp.o` compiles.
+5. After compile recovers, run current discv5 tests:
+ - `test/discv5/discv5_enr_test`
+ - `test/discv5/discv5_bootnodes_test`
+ - `test/discv5/discv5_crawler_test`
+ - `test/discv5/discv5_client_test`
+
+### Scope guard
+
+- No refactor, rename, architecture changes, or broad cleanup.
+- Keep behavior unchanged except what is required to restore compile/test health.
---
-## go-ethereum Reference
-The local copy of go-ethereum is at:
+## discv5 Functional Checkpoint (2026-03-16, post-repair)
+
+### Current state
+
+- The previous `src/discv5/discv5_client.cpp` build breakage is resolved.
+- `discv5_client` and `discv5_crawl` now build and run in `build/OSX/Debug`.
+- `test/discv5/discv5_client_test` is green after the in-place repairs and test expectation alignment.
+- The `discv5_crawl` live harness now reaches callback peer emissions from decoded `NODES` responses.
+
+### Key technical fix that unlocked live discovery
+
+- Outbound encrypted packet construction had an AAD/header mismatch bug:
+ - code previously encoded a header to produce AAD,
+ - encrypted against that AAD,
+ - then re-encoded a new header before send.
+- This was fixed by appending ciphertext to the originally encoded header packet (same AAD/header bytes), without re-encoding.
+- The fix was applied in:
+ - session `FINDNODE` message send path,
+ - handshake send path,
+ - `NODES` response send path.
+
+### Additional parity/diagnostic updates applied
+
+- `discv5` target links `rlpx` (required for `rlpx::crypto::Ecdh::generate_ephemeral_keypair()`).
+- `WHOAREYOU` `record_seq` is now sent as `0`.
+- Handshake ENR attachment is conditional on remote `record_seq` state.
+- `discv5_crawl` now initializes local discv5 keypair (`cfg.private_key` / `cfg.public_key`) before start.
+- Detailed handshake/message diagnostics are now gated behind `--log-level trace`.
+- Per-peer discovery callback logs in `discv5_crawl` are reduced from `info` to `debug`.
+
+### Latest observed functional outcome
+
+- `discv5_crawl --chain ethereum --timeout 3 --log-level info` shows:
+ - non-zero `callback discoveries`,
+ - non-zero `nodes packets`,
+ - `run status: callback_emissions_seen`.
+
+This confirms the current discv5 harness performs real live discovery and peer emission.
+
+### Next steps
+
+1. Add/extend a Sepolia functional connect harness that uses `discv5` discovery and proves at least 3 right-chain connections.
+2. Keep `eth_watch` unchanged until the Sepolia connect milestone is stable.
+3. After that, add an opt-in discv5 discovery mode to `examples/eth_watch/eth_watch.cpp` and validate event flow with a sent transaction.
+
+---
+
+## discv5 Sepolia Connect Checkpoint (2026-03-17)
+
+### Commands run and observed outcomes
+
+1. Pure callback mode, no fork filter:
+
+```bash
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
+ninja test_discv5_connect
+./examples/discovery/test_discv5_connect --timeout 20 --connections 1 --log-level debug --seeded off --require-fork off --enqueue-bootstrap-candidates off
```
-/Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/rlp/ (go-ethereum source)
+
+Observed result:
+
+- `dialed: 73`
+- `connect failed: 69`
+- `connected (discv5): 0`
+- `filtered bad peers: 11`
+- `candidates seen: 84`
+- `discovered peers: 73`
+
+The failures were almost entirely pre-ETH and happened during RLPx auth / ack reception:
+
+- `read_exact(ack length prefix) failed`
+- `ack length 4911 exceeds EIP-8 max 2048`
+
+2. Fork-filtered mode:
+
+```bash
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
+ninja test_discv5_connect
+./examples/discovery/test_discv5_connect --timeout 20 --connections 1 --log-level debug --seeded off --require-fork on --enqueue-bootstrap-candidates off
+```
+
+Observed result:
+
+- `dialed: 0`
+- `connect failed: 0`
+- `candidates seen: 0`
+- `discovered peers: 0`
+
+This confirms the `--require-fork on` failure is upstream of dialing.
+
+### Verified Sepolia fork-hash status
+
+- `examples/chains.json` contains `"sepolia": "268956b6"`.
+- `examples/chain_config.hpp` loads this value from `chains.json` and falls back only if the file/key is missing.
+- `examples/discovery/test_discv5_connect.cpp` uses fallback `{ 0x26, 0x89, 0x56, 0xb6 }`.
+- `AgentDocs/SEPOLIA_TEST_PARAMS.md` documents current Sepolia fork hash as `26 89 56 b6`.
+
+Live confirmation from the existing ENR survey harness:
+
+```bash
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
+ninja test_enr_survey
+./examples/discovery/test_enr_survey --timeout 20 --log-level info
+```
+
+Observed result:
+
+- `Peers WITH eth_fork_id: 522`
+- Sepolia expected hash `26 89 56 b6` was present in live ENR data
+
+Conclusion: the current Sepolia fork hash used by the harness is correct.
+
+### Current discv5-specific failure identified
+
+The current `discv5` path is discovering peers, but the discovered callback path is not surfacing `eth_fork_id`.
+
+Verified with:
+
+```bash
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
+./examples/discv5_crawl/discv5_crawl --chain sepolia --timeout 20 --log-level debug
```
-Key files to read:
-- `p2p/rlpx/rlpx.go` — frame cipher, handshake (already read this session)
-- `p2p/rlpx/rlpx_test.go` — `TestFrameRW`, `TestHandshakeForwardCompatibility` vectors
-- `eth/protocols/eth/handler.go` — ETH STATUS, NewBlockHashes dispatch
+
+Observed result:
+
+- `callback discoveries : 84`
+- `discovered : 84`
+- `wrong_chain : 0`
+- `no_eth_entry: 0`
+- every debug discovery line printed `eth_fork=no`
+
+Example lines from the run:
+
+- `Discovered peer 1 150.241.96.23:9222 eth_fork=no`
+- `Discovered peer 2 65.21.79.59:13000 eth_fork=no`
+- `Discovered peer 53 138.68.123.152:30303 eth_fork=no`
+
+This means:
+
+1. the current Sepolia fork hash is not the reason `--require-fork on` yields zero peers,
+2. the current `discv5` connect path is effectively filtering on missing fork metadata,
+3. the next bug is why the current `discv5` discovery path produces peers with no `eth_fork_id`, even though the discv4 ENR survey proves Sepolia `eth` entries do exist live.
+
+### devp2p cross-checks from failing tuples
+
+The exact failing tuples from `test_discv5_connect` were checked with workspace `go-ethereum` `devp2p rlpx ping`.
+
+Observed examples:
+
+- `65.21.79.59:13000` → `message too big`
+- `185.159.108.216:4001` → `message too big`
+- `150.241.96.23:9222` → `connection reset by peer`
+
+This confirms that at least some pure-mode failing tuples are genuinely bad RLPx targets as dialed, not uniquely rejected by the local client.
+
+### Next step for the next chat
+
+Do not chase the Sepolia fork hash any further.
+
+Focus on the actual current gap:
+
+1. trace why the `discv5` discovered peers all show `eth_fork=no`,
+2. determine whether the incoming discv5 ENRs truly lack the `eth` entry or whether `discv5` ENR decoding is not surfacing it,
+3. only after that, revisit fork-filtered dialing.
diff --git a/AgentDocs/CLAUDE.md b/AgentDocs/CLAUDE.md
index 3a24922..1a00bbf 100644
--- a/AgentDocs/CLAUDE.md
+++ b/AgentDocs/CLAUDE.md
@@ -1,7 +1,7 @@
# RLP Development Guide
## General Instructions
-You are an expert C++ software engineer working exclusively on the GNUS.AI Super Genius blockchain project.
+You are an Junior C++ software engineer working exclusively on the GNUS.AI Super Genius blockchain project. You are working with the Senior C++ engineer (user) and are learning his/her preferences and knowledge of the codebase. So, you are very careful to follow their style and guidelines. And learn best practices from them. You are careful not to break the project, nor slow down the Senior C++ engineer.
**MANDATORY RULES – NEVER VIOLATE THESE**
@@ -50,6 +50,15 @@ Your default mode is “tiny, surgical insertion into existing code”.
- This means do **NOT** add debug strings in the code, then compile and run to see if they work.
- Instead, if there is a bug, the agent should ask the user to debug the code to find the bug's root cause
+** When dealing with a bug
+- When I report a bug, or you find one, ask the user for options
+ - Don't start by trying to fix it. Instead, start by writing a test that reproduces the bug.
+ - Then, have subagents try to fix the bug and prove it with a passing test.
+
+** Tool preference
+- Prefer the workspace file reader and workspace directory tools over `grep_search` for reading and exploring files.
+- Use `grep_search` only as a last resort when you need to search across many files for a pattern and the workspace tools are insufficient.
+
## Important Guidelines
- Do not commit changes without explicit user permission.
- When I report a bug, don't start by trying to fix it. Instead, start by writing a test that reproduces the bug. Then, have subagents try to fix the bug and prove it with a passing test.
@@ -61,9 +70,13 @@ Your default mode is “tiny, surgical insertion into existing code”.
- Always run the linter before committing.
- Always run the formatter before committing.
- Always run the build before committing.
-- Always run in interactive mode with the user on a step by step basis
+- Always run in interactive mode with the user on a step-by-step basis
- Always look in AgentDocs for other instructions.
- The files can include SPRINT_PLAN.md, Architecture.md, CHECKPOINT.md, AGENT_MISTAKES.md
+- Always make sure to only use C++17 features and below.
+ - For instance boost::coroutines only work in C++20, do NOT use it.
+ - Make sure not to use other C++ versions' features above C++17
+ - Do NOT use designated initializers (for example, `{.field = value}`); they require C++20 and break MSVC C++17 builds (`C7555`).
## Build Commands
@@ -91,7 +104,7 @@ ninja
- Line length: 120 characters maximum
- Classes/Methods: PascalCase
- Variables: camelCase
-- Constants: ALL_CAPS
+- Constants: prefer `constexpr` / `inline constexpr` named `kCamelCase`; avoid ALL_CAPS `#define` value constants
- Parentheses: space after opening and before closing: `if ( condition )`
- Braces: Each on their own line
- Error Handling: Use outcome::result pattern for error propagation
@@ -119,7 +132,7 @@ ninja
### Language Fundamentals
- Adapt your programming style based on the C++ sublanguage you're using (C, Object-Oriented C++, Template C++, STL)
-- Replace #define constants with const objects or enums
+- Replace `#define` value constants with `constexpr` / `inline constexpr` named `kCamelCase` (or enums when appropriate); reserve macros for header guards and unavoidable platform/compiler integration
- Replace function-like macros with inline functions
- Use const everywhere possible: objects, parameters, return types, and member functions
- Always initialize objects before use; prefer member initialization lists over assignments in constructor bodies
@@ -225,7 +238,7 @@ ninja
- Declare overriding functions override: catches interface mismatches, enables better refactoring, documents intent
- Prefer const_iterators to iterators: const-correctness, C++11 makes them practical with cbegin/cend
- Declare functions noexcept if they won't emit exceptions: enables optimizations (especially for move operations), required for some STL containers
-- Use constexpr whenever possible: computed at compile-time, usable in constant expressions, broader scope than const
+- Use `constexpr` whenever possible; in headers prefer C++17 `inline constexpr` for shared named constants following the project's `kCamelCase` convention
- Make const member functions thread-safe: use mutex for mutable data, consider std::atomic for simple cases
- Understand special member function generation: default constructor, destructor, copy ops, move ops; generation rules depend on what you declare
diff --git a/AgentDocs/COMMANDS_REFERENCE.md b/AgentDocs/COMMANDS_REFERENCE.md
index 2ac1da0..15a8e14 100644
--- a/AgentDocs/COMMANDS_REFERENCE.md
+++ b/AgentDocs/COMMANDS_REFERENCE.md
@@ -1,17 +1,5 @@
# Quick Commands Reference
-## One-Liner Tests
-
-### Sepolia Testnet (Recommended for Testing)
-```bash
-cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp && ./test_eth_watch.sh sepolia
-```
-
-### Ethereum Mainnet
-```bash
-cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp && ./test_eth_watch.sh mainnet
-```
-
## Manual Steps
### Step 1: Get a Live Peer
@@ -37,7 +25,7 @@ echo "Host: $HOST, Port: $PORT, Pubkey: $PUBKEY"
### Step 3: Connect
```bash
-cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/build/OSX/Debug
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
./eth_watch "$HOST" "$PORT" "$PUBKEY"
```
@@ -45,26 +33,26 @@ cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/buil
### Clean build
```bash
-cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/build/OSX/Debug
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
ninja clean && ninja eth_watch
```
### Run all tests
```bash
-cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/build/OSX/Debug
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
ninja test
```
### Run specific test
```bash
-cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/build/OSX/Debug
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
./rlp_decoder_tests
```
### Using bootstrap nodes (for reference)
```bash
# These won't send block data, but will connect
-cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/build/OSX/Debug
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
./eth_watch --chain sepolia # Uses bootstrap node (no messages)
./eth_watch --chain mainnet # Uses bootstrap node (no messages)
./eth_watch --chain polygon # Uses bootstrap node (no messages)
@@ -87,7 +75,7 @@ cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/buil
## File Locations
```
-Project Root: /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/
+Project Root: /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/
Key Files:
- ./test_eth_watch.sh (Automated test)
diff --git a/AgentDocs/QUICK_TEST_GUIDE.md b/AgentDocs/QUICK_TEST_GUIDE.md
index 7d83308..14a645f 100644
--- a/AgentDocs/QUICK_TEST_GUIDE.md
+++ b/AgentDocs/QUICK_TEST_GUIDE.md
@@ -10,11 +10,11 @@ I've created two new resources to help you test with real Ethereum peers:
- Provides examples and scripts
### 2. **test_eth_watch.sh** (Automated!)
-Located in: `/Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/`
+Located in: `/Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/`
Usage:
```bash
-cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp
./test_eth_watch.sh sepolia # Test Sepolia testnet
./test_eth_watch.sh mainnet # Test Ethereum mainnet
```
@@ -42,7 +42,7 @@ HOST=$(echo "$PEER" | sed 's/.*@\([^:]*\):.*/\1/')
PORT=$(echo "$PEER" | sed 's/.*:\([0-9]*\)$/\1/')
# 3. Connect
-cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/build/OSX/Debug
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
./eth_watch "$HOST" "$PORT" "$PUBKEY"
```
@@ -76,7 +76,7 @@ NewBlockHashes: 1 hash
## Files Created/Updated
```
-/Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/
+ /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/
├── PUBLIC_NODES_FOR_TESTING.md (NEW - Complete reference guide)
├── test_eth_watch.sh (NEW - Automated test script)
├── WHY_NO_MESSAGES.md (Created earlier - explains bootstrap vs peers)
@@ -94,7 +94,7 @@ NewBlockHashes: 1 hash
## Next Steps
1. **For Quick Testing**: Run `./test_eth_watch.sh sepolia`
-2. **For Production**: Implement full discv4 discovery in `discovery.hpp`
+2. **For Discovery Debugging**: Use the maintained C++ discovery harnesses under `examples/discovery/` (for example `test_discovery.cpp` and `test_enr_survey.cpp`)
3. **For Development**: Use a local Geth node with `--http --http.api admin,web3,eth,net`
---
diff --git a/AgentDocs/SEPOLIA_TEST_PARAMS.md b/AgentDocs/SEPOLIA_TEST_PARAMS.md
index b7d4b3e..0406bf9 100644
--- a/AgentDocs/SEPOLIA_TEST_PARAMS.md
+++ b/AgentDocs/SEPOLIA_TEST_PARAMS.md
@@ -1,13 +1,33 @@
# Sepolia Test Parameters for eth_watch
-## Quick Answer
+## Current Sepolia Fork Hash (as of March 2026)
+
+The Sepolia chain is post-BPO2. Forks applied (all timestamps):
+- MergeNetsplit block 1735371
+- Shanghai 1677557088
+- Cancun 1706655072
+- Prague 1741159776 (passed ~March 5, 2025)
+- Osaka 1760427360 (passed ~October 14, 2025)
+- BPO1 1761017184 (passed ~October 21, 2025)
+- BPO2 1761607008 (passed ~October 28, 2025)
+
+**Current ENR/Status ForkId:** `{ 0x26, 0x89, 0x56, 0xb6 }`, Next=0
+
+Verified via `go-ethereum/core/forkid/forkid_test.go` SepoliaChainConfig test vectors
+and confirmed by live `test_enr_survey` run (March 14, 2026 — only hash `26 89 56 b6`
+matched current Sepolia peers in the ENR survey).
+
+> **Do NOT use `0xed, 0x88, 0xb5, 0xfd`** — that was the Prague hash with Next=1760427360,
+> valid only before Osaka launched (~Oct 2025). It will match zero live peers today.
+
+
To test `eth_watch` with a public Sepolia node, you can use one of the **bootstrap nodes** (though they won't send block data, they will at least connect):
### Option 1: Use Bootstrap Node (Will Connect, No Block Data)
```bash
-cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/build/OSX/Debug
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
# Using the first Sepolia bootstrap node
./eth_watch 138.197.51.181 30303 4e5e92199ee224a01932a377160aa432f31d0b351f84ab413a8e0a42f4f36476f8fb1cbe914af0d9aef0d51665c214cf653c651c4bbd9d5550a934f241f1682b
@@ -22,7 +42,7 @@ cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/buil
### Option 2: Use --chain Flag (Easiest)
```bash
-cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/build/OSX/Debug
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
./eth_watch --chain sepolia
```
@@ -113,19 +133,20 @@ Some public infrastructure providers run full nodes that accept p2p connections:
However, most public RPC endpoints **don't expose p2p ports** for security reasons.
-### Option C: Complete discv4 Implementation
+### Option C: Use the maintained discovery harnesses
+
+Use the current C++ discovery flow under `discv4_client` / `DialScheduler` via:
+1. `examples/discovery/test_discovery.cpp`
+2. `examples/discovery/test_enr_survey.cpp`
+3. the existing bootnode registry and ENR filter wiring
-Implement the full discv4 protocol in `/include/rlp/PeerDiscovery/discovery.hpp` to:
-1. Send PING to bootstrap nodes
-2. Receive PONG + NEIGHBOURS responses
-3. Extract real peer enodes from NEIGHBOURS
-4. Connect to those peers with eth_watch
+Those paths exercise the maintained discovery implementation instead of the old `discovery.hpp` sketch.
## Summary
**For quick testing right now:**
```bash
-cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/build/OSX/Debug
+cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/SuperGenius/rlp/build/OSX/Debug
# Easiest - use --chain flag
./eth_watch --chain sepolia
@@ -136,5 +157,5 @@ cd /Users/Shared/SSDevelopment/Development/GeniusVentures/GeniusNetwork/rlp/buil
**Expected result:** Connection succeeds, HELLO exchange works, but no block messages (because it's a bootstrap node).
-**To get block messages:** You need to implement discv4 discovery or run your own Geth node.
+**To get block messages:** You need to use the maintained discovery harnesses to find real peers, or run your own Geth node.
diff --git a/AgentDocs/WHY_NO_MESSAGES.md b/AgentDocs/WHY_NO_MESSAGES.md
index 52b165e..6e0f03f 100644
--- a/AgentDocs/WHY_NO_MESSAGES.md
+++ b/AgentDocs/WHY_NO_MESSAGES.md
@@ -96,19 +96,20 @@ geth --http --http.api admin,web3,eth,net --http.addr 127.0.0.1
Then query for its discovered peers and connect to them.
-### Option 3: Implement discv4 Properly
+### Option 3: Use the maintained discovery harnesses
-Complete the discv4 protocol implementation in:
+The maintained discovery implementation lives under the current `discv4_client` / `DialScheduler` path, with live harnesses under:
```
-/include/rlp/PeerDiscovery/discovery.hpp
+examples/discovery/test_discovery.cpp
+examples/discovery/test_enr_survey.cpp
```
-This would enable automatic peer discovery from bootstrap nodes.
+Use those binaries to exercise automatic peer discovery from bootstrap nodes instead of the old `discovery.hpp` sketch.
## Next Steps
1. **Short-term**: Use real peer node enodes for testing
-2. **Medium-term**: Complete discv4 implementation for automatic discovery
+2. **Medium-term**: Continue improving the existing `discv4_client` + scheduler discovery flow
3. **Long-term**: Add peer caching, K-Bucket routing, persistence
## References
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..78bc9fc
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,84 @@
+cmake_minimum_required(VERSION 3.16)
+
+set(
+ CMAKE_TOOLCHAIN_FILE
+ "${CMAKE_SOURCE_DIR}/cmake/toolchain/cxx17.cmake"
+ CACHE
+ FILEPATH
+ "Default toolchain"
+)
+add_definitions(-D_WIN32_WINNT=0x0601)
+add_definitions(-DBOOST_BIND_GLOBAL_PLACEHOLDERS)
+
+# Project definition
+project(rlp
+ VERSION 1.0.0
+ DESCRIPTION "rlp/rlpx/discv4/discv5/eth library for C++"
+ LANGUAGES C CXX
+)
+
+include(GNUInstallDirs)
+
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+find_package(Protobuf CONFIG REQUIRED)
+
+if(NOT TARGET protobuf::protoc)
+ add_executable(protobuf::protoc IMPORTED)
+endif()
+
+if(EXISTS "${Protobuf_PROTOC_EXECUTABLE}")
+ set_target_properties(protobuf::protoc PROPERTIES IMPORTED_LOCATION ${Protobuf_PROTOC_EXECUTABLE})
+endif()
+
+find_package(OpenSSL REQUIRED)
+include_directories(${OPENSSL_INCLUDE_DIR})
+include_directories(${GSL_INCLUDE_DIR})
+find_package(libsecp256k1 CONFIG REQUIRED)
+find_package(fmt CONFIG REQUIRED)
+find_package(spdlog CONFIG REQUIRED)
+find_package(Boost REQUIRED COMPONENTS date_time filesystem random regex system thread log log_setup program_options json context coroutine)
+find_package(Snappy CONFIG REQUIRED)
+include_directories(${Boost_INCLUDE_DIRS})
+
+if(BUILD_TESTING)
+ find_package(GTest CONFIG REQUIRED)
+endif()
+
+add_subdirectory(src)
+
+install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include" DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/rlp" FILES_MATCHING PATTERN "*.h*")
+install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/generated" DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/rlp" FILES_MATCHING PATTERN "*.h*")
+
+set(rlp_CONFIG_DESTINATION_DIR "lib/cmake/rlp")
+install(EXPORT rlp
+ FILE rlpTargets.cmake
+ NAMESPACE rlp::
+ DESTINATION ${rlp_CONFIG_DESTINATION_DIR}
+)
+
+include(CMakePackageConfigHelpers)
+
+# generate the config file that is includes the exports
+configure_package_config_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/config.cmake.in
+ "${CMAKE_CURRENT_BINARY_DIR}/rlpConfig.cmake"
+ INSTALL_DESTINATION ${rlp_CONFIG_DESTINATION_DIR}
+ NO_SET_AND_CHECK_MACRO
+ NO_CHECK_REQUIRED_COMPONENTS_MACRO
+)
+
+# generate the version file for the config file
+write_basic_package_version_file(
+ "${CMAKE_CURRENT_BINARY_DIR}/rlpConfigVersion.cmake"
+ VERSION "${PROJECT_VERSION}"
+ COMPATIBILITY AnyNewerVersion
+)
+
+# install the configuration file
+install(FILES
+ ${CMAKE_CURRENT_BINARY_DIR}/rlpConfig.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/rlpConfigVersion.cmake
+ DESTINATION ${rlp_CONFIG_DESTINATION_DIR}
+)
diff --git a/README.md b/README.md
index ff1a1e8..1659f9c 100644
--- a/README.md
+++ b/README.md
@@ -231,45 +231,6 @@ want to test, from your own wallet. GNUS contract addresses:
In one terminal, start the watcher before sending transactions so events are caught
live:
-```bash
-# All 4 mainnets
-./test_eth_watch.sh
-
-# All 4 testnets
-./test_eth_watch.sh gnus-all-testnets
-
-# Single chain
-./test_eth_watch.sh polygon
-```
-
-#### 4. Send test transactions
-
-In a second terminal, send a GNUS Transfer from the test wallet:
-
-```bash
-# Testnets
-source .env && ./send_test_transactions.sh testnets
-
-# Mainnets
-source .env && ./send_test_transactions.sh
-
-# Specific chains
-source .env && ./send_test_transactions.sh sepolia polygon-amoy
-```
-
-Optional env var overrides:
-
-```bash
-# Send to a different address
-TO_ADDRESS=0x... source .env && ./send_test_transactions.sh testnets
-
-# Use your own RPC endpoint
-RPC_SEPOLIA=https://my-node.example.com source .env && ./send_test_transactions.sh sepolia
-
-# Extend the watch window (default 60s)
-WATCH_TIMEOUT=120 ./test_eth_watch.sh gnus-all-testnets
-```
-
#### What a successful run looks like
```
diff --git a/build b/build
index bc5302b..4100dd4 160000
--- a/build
+++ b/build
@@ -1 +1 @@
-Subproject commit bc5302bcf1361331082989a231b3019a50b5fdc9
+Subproject commit 4100dd47f0c46ff212af49f5f6b9c16cb2d35f63
diff --git a/cmake/CommonBuildParameters.cmake b/cmake/CommonBuildParameters.cmake
index 0409a5f..8dbfd16 100644
--- a/cmake/CommonBuildParameters.cmake
+++ b/cmake/CommonBuildParameters.cmake
@@ -7,7 +7,7 @@ set(BOOST_PATCH_VERSION "0" CACHE STRING "Boost Patch Version")
set(BOOST_VERSION "${BOOST_MAJOR_VERSION}.${BOOST_MINOR_VERSION}.${BOOST_PATCH_VERSION}")
set(BOOST_VERSION_2U "${BOOST_MAJOR_VERSION}_${BOOST_MINOR_VERSION}")
-set(CMAKE_CXX_STANDARD 20)
+set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
@@ -32,6 +32,10 @@ set(OPENSSL_ROOT_DIR "${OPENSSL_DIR}" CACHE PATH "Path to OpenSSL install root f
set(OPENSSL_INCLUDE_DIR "${OPENSSL_DIR}/include" CACHE PATH "Path to OpenSSL include folder")
find_package(OpenSSL REQUIRED)
+# --------------------------------------------------------
+# Crypto3 include dir
+set(crypto3_INCLUDE_DIR "${ZKLLVM_BUILD_DIR}/zkLLVM/include")
+
# --------------------------------------------------------
# Set config of Microsoft GSL (header-only library)
set(GSL_INCLUDE_DIR "${_THIRDPARTY_BUILD_DIR}/Microsoft.GSL/include")
@@ -69,6 +73,8 @@ set(boost_random_DIR "${Boost_LIB_DIR}/cmake/boost_random-${BOOST_VERSION}")
set(boost_regex_DIR "${Boost_LIB_DIR}/cmake/boost_regex-${BOOST_VERSION}")
set(boost_system_DIR "${Boost_LIB_DIR}/cmake/boost_system-${BOOST_VERSION}")
set(boost_thread_DIR "${Boost_LIB_DIR}/cmake/boost_thread-${BOOST_VERSION}")
+set(boost_context_DIR "${Boost_LIB_DIR}/cmake/boost_context-${BOOST_VERSION}")
+set(boost_coroutine_DIR "${Boost_LIB_DIR}/cmake/boost_coroutine-${BOOST_VERSION}")
set(boost_unit_test_framework_DIR "${Boost_LIB_DIR}/cmake/boost_unit_test_framework-${BOOST_VERSION}")
set(Boost_USE_MULTITHREADED ON)
set(Boost_USE_STATIC_LIBS ON)
@@ -80,7 +86,7 @@ if(POLICY CMP0167)
endif()
# header only libraries must not be added here
-find_package(Boost REQUIRED COMPONENTS date_time filesystem random regex system thread log log_setup program_options json)
+find_package(Boost REQUIRED COMPONENTS date_time filesystem random regex system thread log log_setup program_options json context coroutine)
include_directories(${Boost_INCLUDE_DIRS})
# fmt
@@ -138,7 +144,8 @@ install(TARGETS ${PROJECT_NAME} EXPORT RLPTargets
)
install(
- EXPORT RLPTargets
+ EXPORT rlp
+ FILE rlpTargets.cmake
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/rlp
NAMESPACE rlp::
)
@@ -147,7 +154,7 @@ include(CMakePackageConfigHelpers)
# generate the config file that is includes the exports
configure_package_config_file(${PROJECT_ROOT}/cmake/config.cmake.in
- "${CMAKE_CURRENT_BINARY_DIR}/RLPConfig.cmake"
+ "${CMAKE_CURRENT_BINARY_DIR}/rlpConfig.cmake"
INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/rlp
NO_SET_AND_CHECK_MACRO
NO_CHECK_REQUIRED_COMPONENTS_MACRO
@@ -155,18 +162,18 @@ configure_package_config_file(${PROJECT_ROOT}/cmake/config.cmake.in
# generate the version file for the config file
write_basic_package_version_file(
- "${CMAKE_CURRENT_BINARY_DIR}/RLPConfigVersion.cmake"
+ "${CMAKE_CURRENT_BINARY_DIR}/rlpConfigVersion.cmake"
VERSION "${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}.${CPACK_PACKAGE_VERSION_PATCH}"
COMPATIBILITY AnyNewerVersion
)
# install the configuration file
install(FILES
- ${CMAKE_CURRENT_BINARY_DIR}/RLPConfigVersion.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/rlpConfigVersion.cmake
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/rlp
)
install(FILES
- ${CMAKE_CURRENT_BINARY_DIR}/RLPConfig.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/rlpConfig.cmake
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/rlp
)
diff --git a/cmake/CompilationFlags.cmake b/cmake/CompilationFlags.cmake
index 5f25d12..e4857c0 100644
--- a/cmake/CompilationFlags.cmake
+++ b/cmake/CompilationFlags.cmake
@@ -1,30 +1,37 @@
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "^(AppleClang|Clang|GNU)$")
# enable those flags
- #add_flag(-Wall)
- #add_flag(-Wextra)
- #add_flag(-Woverloaded-virtual) # warn if you overload (not override) a virtual function
- #add_flag(-Wformat=2) # warn on security issues around functions that format output (ie printf)
- #add_flag(-Wmisleading-indentation) # (only in GCC >= 6.0) warn if indentation implies blocks where blocks do not exist
- #add_flag(-Wduplicated-cond) # (only in GCC >= 6.0) warn if if / else chain has duplicated conditions
- #add_flag(-Wduplicated-branches) # (only in GCC >= 7.0) warn if if / else branches have duplicated code
- #add_flag(-Wnull-dereference) # (only in GCC >= 6.0) warn if a null dereference is detected
- #add_flag(-Wno-sign-compare)
- #add_flag(-Wtype-limits) # size_t - size_t >= 0 -> always true
- #add_flag(-Wnon-virtual-dtor) # warn the user if a class with virtual functions has a non-virtual destructor. This helps catch hard to track down memory errors
- #add_flag(-Wno-in-instantiation)
- # disable those flags
- add_flag(-Wno-unknown-attributes) # disable warning for zkLLVM attributes
- #add_flag(-Wno-unused-command-line-argument) # clang: warning: argument unused during compilation: '--coverage' [-Wunused-command-line-argument]
- #dd_flag(-Wno-unused-variable) # prints too many useless warnings
- #add_flag(-Wno-double-promotion) # (GCC >= 4.6, Clang >= 3.8) warn if float is implicit promoted to double
- #add_flag(-Wno-unused-parameter) # prints too many useless warnings
- #add_flag(-Wno-unused-function) # prints too many useless warnings
- #add_flag(-Wno-format-nonliteral) # prints way too many warnings from spdlog
- #add_flag(-Wno-gnu-zero-variadic-macro-arguments) # https://stackoverflow.com/questions/21266380/is-the-gnu-zero-variadic-macro-arguments-safe-to-ignore
+ add_flag(-Wall)
+ add_flag(-Wextra)
+ add_flag(-Woverloaded-virtual) # warn if you overload (not override) a virtual function
+ add_flag(-Wformat=2) # warn on security issues around functions that format output (ie printf)
+ add_flag(-Wmisleading-indentation) # (only in GCC >= 6.0) warn if indentation implies blocks where blocks do not exist
+ add_flag(-Wduplicated-cond) # (only in GCC >= 6.0) warn if if / else chain has duplicated conditions
+ add_flag(-Wduplicated-branches) # (only in GCC >= 7.0) warn if if / else branches have duplicated code
+ add_flag(-Wnull-dereference) # (only in GCC >= 6.0) warn if a null dereference is detected
+ add_flag(-Wsign-compare)
+ add_flag(-Wtype-limits) # size_t - size_t >= 0 -> always true
+ add_flag(-Wnon-virtual-dtor) # warn the user if a class with virtual functions has a non-virtual destructor. This helps catch hard to track down memory errors
+ # disable those flags
+ add_flag(-Wno-unused-command-line-argument) # clang: warning: argument unused during compilation: '--coverage' [-Wunused-command-line-argument]
+ add_flag(-Wno-unused-variable) # prints too many useless warnings
+ add_flag(-Wno-double-promotion) # (GCC >= 4.6, Clang >= 3.8) warn if float is implicit promoted to double
+ add_flag(-Wno-unused-parameter) # prints too many useless warnings
+ add_flag(-Wno-unused-function) # prints too many useless warnings
+ add_flag(-Wno-format-nonliteral) # prints way too many warnings from spdlog
+ add_flag(-Wno-gnu-zero-variadic-macro-arguments) # https://stackoverflow.com/questions/21266380/is-the-gnu-zero-variadic-macro-arguments-safe-to-ignore
+ add_flag(-Wno-unused-result) #Every logger call generates this
+ add_flag(-Wno-pessimizing-move) #Warning was irrelevant to situation
+ add_flag(-Wno-unused-but-set-variable)
+ add_flag(-Wno-macro-redefined)
+ add_flag(-Wno-deprecated-copy-with-user-provided-copy)
+ if(APPLE)
+ add_link_options(-Wl,-no_warn_duplicate_libraries)
+ endif()
# promote to errors
- #add_flag(-Werror=unused-lambda-capture) # error if lambda capture is unused
+ add_flag(-Werror=unused-lambda-capture) # error if lambda capture is unused
#add_flag(-Werror=return-type) # warning: control reaches end of non-void function [-Wreturn-type]
- #add_flag(-Werror=sign-compare) # warn the user if they compare a signed and unsigned numbers
+ add_flag(-Werror=sign-compare) # warn the user if they compare a signed and unsigned numbers
+ add_flag(-Werror=type-limits) # catch always-true / always-false limit checks as build breaks
#add_flag(-Werror=reorder) # field '$1' will be initialized after field '$2'
endif()
diff --git a/cmake/config.cmake.in b/cmake/config.cmake.in
index 08ca210..2597be6 100644
--- a/cmake/config.cmake.in
+++ b/cmake/config.cmake.in
@@ -1,3 +1,3 @@
@PACKAGE_INIT@
-include ( "${CMAKE_CURRENT_LIST_DIR}/ProofSystemTargets.cmake" )
+include("${CMAKE_CURRENT_LIST_DIR}/rlpTargets.cmake")
diff --git a/cmake/functions.cmake b/cmake/functions.cmake
deleted file mode 100644
index 0a8070f..0000000
--- a/cmake/functions.cmake
+++ /dev/null
@@ -1,69 +0,0 @@
-function(disable_clang_tidy target)
- set_target_properties(${target} PROPERTIES
- C_CLANG_TIDY ""
- CXX_CLANG_TIDY ""
- )
-endfunction()
-
-function(addtest test_name)
- add_executable(${test_name} ${ARGN})
- addtest_part(${test_name} ${ARGN})
- target_link_libraries(${test_name}
- GTest::gtest_main
- GTest::gmock_main
- )
- file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/xunit)
- set(xml_output "--gtest_output=xml:${CMAKE_BINARY_DIR}/xunit/xunit-${test_name}.xml")
- add_test(
- NAME ${test_name}
- COMMAND $ ${xml_output}
- )
- set_target_properties(${test_name} PROPERTIES
- RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/test_bin
- ARCHIVE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/test_lib
- LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR}/test_lib
- )
- disable_clang_tidy(${test_name})
-
- if(FORCE_MULTILE)
- set_target_properties(${test_name} PROPERTIES LINK_FLAGS "${MULTIPLE_OPTION}")
- endif()
-endfunction()
-
-function(addtest_part test_name)
- if (POLICY CMP0076)
- cmake_policy(SET CMP0076 NEW)
- endif ()
- target_sources(${test_name} PUBLIC
- ${ARGN}
- )
- target_link_libraries(${test_name}
- GTest::gtest
- )
-endfunction()
-
-# conditionally applies flag.
-function(add_flag flag)
- check_cxx_compiler_flag(${flag} FLAG_${flag})
- if (FLAG_${flag} EQUAL 1)
- add_compile_options(${flag})
- endif ()
-endfunction()
-
-function(print)
- message(STATUS "[${CMAKE_PROJECT_NAME}] ${ARGV}")
-endfunction()
-
-### sgnus_install should be called right after add_library(target)
-function(sgnus_install target)
- install(TARGETS ${target} EXPORT ProofSystemTargets
- LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
- ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
- RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
- INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
- PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
- FRAMEWORK DESTINATION ${CMAKE_INSTALL_PREFIX}
- BUNDLE DESTINATION ${CMAKE_INSTALL_BINDIR}
- )
-endfunction()
-
diff --git a/cmake/toolchain/cxx17.cmake b/cmake/toolchain/cxx17.cmake
new file mode 100644
index 0000000..50c933e
--- /dev/null
+++ b/cmake/toolchain/cxx17.cmake
@@ -0,0 +1,3 @@
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS OFF)
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index 270bcfb..9ce38b9 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -3,4 +3,6 @@ cmake_minimum_required(VERSION 3.15)
# Add subdirectories for each example
add_subdirectory(eth_watch)
+add_subdirectory(discovery)
+add_subdirectory(discv5_crawl)
diff --git a/examples/chain_config.hpp b/examples/chain_config.hpp
new file mode 100644
index 0000000..b348a6b
--- /dev/null
+++ b/examples/chain_config.hpp
@@ -0,0 +1,100 @@
+// Copyright 2025 GeniusVentures
+// SPDX-License-Identifier: Apache-2.0
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+/// @brief Search for chains.json next to the binary, then in CWD.
+/// Parse it and return the 4-byte fork hash for @p chain.
+///
+/// chains.json format (simple name → 8 hex-char string):
+/// @code
+/// { "sepolia": "268956b6", "mainnet": "07c9462e" }
+/// @endcode
+///
+/// @param chain Chain name key, e.g. "sepolia".
+/// @param argv0 Value of argv[0] used to locate the binary directory.
+/// @return Parsed 4-byte fork hash, or nullopt if file/key not found.
+[[nodiscard]] inline std::optional>
+load_fork_hash( const std::string& chain, const std::string& argv0 ) noexcept
+{
+ const std::filesystem::path bin_dir =
+ std::filesystem::path( argv0 ).parent_path();
+
+ const std::filesystem::path candidates[] = {
+ bin_dir / "chains.json",
+ std::filesystem::path( "chains.json" )
+ };
+
+ for ( const auto& candidate : candidates )
+ {
+ std::ifstream file( candidate );
+ if ( !file.is_open() )
+ {
+ continue;
+ }
+
+ boost::system::error_code ec;
+ const boost::json::value jval = boost::json::parse( file, ec );
+ if ( ec )
+ {
+ continue;
+ }
+
+ const boost::json::object* obj = jval.if_object();
+ if ( !obj )
+ {
+ continue;
+ }
+
+ const boost::json::value* entry = obj->if_contains( chain );
+ if ( !entry )
+ {
+ continue;
+ }
+
+ const boost::json::string* hex = entry->if_string();
+ if ( !hex || hex->size() != 8U )
+ {
+ continue;
+ }
+
+ auto nibble = []( char c ) -> std::optional
+ {
+ if ( c >= '0' && c <= '9' ) { return static_cast( c - '0' ); }
+ if ( c >= 'a' && c <= 'f' ) { return static_cast( 10 + c - 'a' ); }
+ if ( c >= 'A' && c <= 'F' ) { return static_cast( 10 + c - 'A' ); }
+ return std::nullopt;
+ };
+
+ std::array hash{};
+ bool ok = true;
+ for ( size_t i = 0; i < 4U && ok; ++i )
+ {
+ const auto hi = nibble( ( *hex )[i * 2U] );
+ const auto lo = nibble( ( *hex )[i * 2U + 1U] );
+ if ( !hi || !lo )
+ {
+ ok = false;
+ break;
+ }
+ hash[i] = static_cast( ( *hi << 4U ) | *lo );
+ }
+
+ if ( ok )
+ {
+ return hash;
+ }
+ }
+
+ return std::nullopt;
+}
+
diff --git a/examples/chains.json b/examples/chains.json
new file mode 100644
index 0000000..fe4e303
--- /dev/null
+++ b/examples/chains.json
@@ -0,0 +1,7 @@
+{
+ "sepolia": "268956b6",
+ "mainnet": "07c9462e",
+ "holesky": "9bc6cb31",
+ "hoodi": "23aa1351"
+}
+
diff --git a/examples/discovery/CMakeLists.txt b/examples/discovery/CMakeLists.txt
new file mode 100644
index 0000000..fd0add1
--- /dev/null
+++ b/examples/discovery/CMakeLists.txt
@@ -0,0 +1,67 @@
+cmake_minimum_required(VERSION 3.15)
+
+add_executable(test_discovery
+ test_discovery.cpp
+)
+
+include_directories(
+ ${PROJECT_SOURCE_DIR}/include
+)
+
+target_link_libraries(test_discovery
+ PRIVATE
+ discv4
+ Boost::json
+)
+
+# Disable ASAN: live-network integration test uses Boost.Asio coroutines which
+# trigger false positives on macOS ARM64 (same reason as eth_watch).
+if(CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU")
+ target_compile_options(test_discovery PRIVATE -fno-sanitize=address)
+endif()
+
+# ── test_enr_survey ───────────────────────────────────────────────────────────
+# Diagnostic live test: discovery-only (no dialing), no fork-id filter.
+# Tallies eth_fork_id presence and hash frequency to diagnose live ENR failures.
+add_executable(test_enr_survey
+ test_enr_survey.cpp
+)
+
+add_executable(test_discv5_connect
+ test_discv5_connect.cpp
+)
+
+target_link_libraries(test_enr_survey
+ PRIVATE
+ discv4
+ Boost::boost
+ Boost::json
+)
+
+target_link_libraries(test_discv5_connect
+ PUBLIC
+ discv5
+ Boost::json
+)
+
+if(CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU")
+ target_compile_options(test_enr_survey PRIVATE -fno-sanitize=address)
+ target_compile_options(test_discv5_connect PRIVATE -fno-sanitize=address)
+endif()
+
+# Copy chains.json from examples/ alongside both discovery binaries at build time.
+# Update examples/chains.json (no recompile needed) when fork hashes change.
+add_custom_command(TARGET test_discovery POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
+ ${CMAKE_CURRENT_SOURCE_DIR}/../chains.json
+ $/chains.json
+ COMMENT "Copying chains.json to discovery binary directory"
+)
+
+add_custom_command(TARGET test_discv5_connect POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
+ ${CMAKE_CURRENT_SOURCE_DIR}/../chains.json
+ $/chains.json
+ COMMENT "Copying chains.json to discv5 connect binary directory"
+)
+
diff --git a/examples/discovery/test_discovery.cpp b/examples/discovery/test_discovery.cpp
new file mode 100644
index 0000000..946331f
--- /dev/null
+++ b/examples/discovery/test_discovery.cpp
@@ -0,0 +1,517 @@
+// Copyright 2025 GeniusVentures
+// SPDX-License-Identifier: Apache-2.0
+//
+// examples/discovery/test_discovery.cpp
+//
+// Functional test for discv4 peer discovery + RLPx ETH Status handshake
+// against live Sepolia bootnodes. Uses DialScheduler to maintain concurrent
+// outbound dials and verifies that at least MIN_CONNECTIONS peers complete the
+// ETH/68+69 Status handshake on the correct chain (network_id=11155111).
+//
+// Checks (GTest-style output):
+// 1. At least one bootnode bond completes (PING→PONG)
+// 2. At least MIN_PEERS neighbour peers discovered
+// 3. At least MIN_CONNECTIONS peers complete the Sepolia ETH Status handshake
+//
+// Exit code 0 = all checks pass, 1 = any check failed.
+//
+// Usage:
+// ./test_discovery [--log-level debug] [--timeout 60] [--connections 3]
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include "../chain_config.hpp"
+
+// ── Sepolia chain constants ───────────────────────────────────────────────────
+
+static constexpr uint64_t kSepoliaNetworkId = 11155111;
+static constexpr uint8_t kEthOffset = 0x10;
+
+static eth::Hash256 sepolia_genesis()
+{
+ // 25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9
+ eth::Hash256 h{};
+ const char* hex = "25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9";
+ for (size_t i = 0; i < 32; ++i)
+ {
+ auto nibble = [](char c) -> uint8_t {
+ if (c >= '0' && c <= '9') return static_cast(c - '0');
+ if (c >= 'a' && c <= 'f') return static_cast(10 + c - 'a');
+ return 0;
+ };
+ h[i] = static_cast((nibble(hex[i*2]) << 4) | nibble(hex[i*2+1]));
+ }
+ return h;
+}
+
+// Sepolia post-BPO2 fallback hash — used only when chains.json is not found.
+// Update chains.json instead of this constant when the fork advances.
+static const std::array kSepoliaForkHashFallback{ 0x26, 0x89, 0x56, 0xb6 };
+
+// ── Test framework ────────────────────────────────────────────────────────────
+
+namespace {
+
+struct TestSuite
+{
+ int run = 0, passed = 0, failed = 0;
+ std::string current;
+
+ void start(const std::string& name)
+ {
+ current = name;
+ ++run;
+ std::cout << "[ RUN ] " << name << "\n";
+ }
+ void pass(const std::string& detail = "")
+ {
+ ++passed;
+ std::cout << "[ OK ] " << current << "\n";
+ if (!detail.empty()) std::cout << " " << detail << "\n";
+ }
+ void fail(const std::string& detail = "")
+ {
+ ++failed;
+ std::cout << "[ FAILED ] " << current << "\n";
+ if (!detail.empty()) std::cout << " " << detail << "\n";
+ }
+ void header(int n)
+ {
+ std::cout << "\n[==========] DiscoveryTest (" << n << " checks)\n\n";
+ }
+ void footer()
+ {
+ std::cout << "\n[==========] " << run << " check(s)\n";
+ std::cout << "[ PASSED ] " << passed << "\n";
+ if (failed) std::cout << "[ FAILED ] " << failed << "\n";
+ std::cout << "\n";
+ }
+};
+
+} // namespace
+
+// ── Dial-attempt statistics ───────────────────────────────────────────────────
+
+struct DialStats
+{
+ std::atomic dialed{0}; ///< total dial attempts started
+ std::atomic connect_failed{0}; ///< TCP / auth / pre-HELLO Disconnect
+ std::atomic wrong_chain{0}; ///< Status received but wrong network_id
+ std::atomic status_timeout{0}; ///< no Status within timeout (not TooManyPeers)
+ std::atomic too_many_peers{0}; ///< TooManyPeers before chain confirmed
+ std::atomic too_many_peers_right_chain{0}; ///< TooManyPeers after chain confirmed
+ std::atomic connected{0}; ///< right chain, Status validated
+};
+
+// Does not set up EthWatchService — just validates the chain and returns.
+
+static void dial_connect_only(
+ discv4::ValidatedPeer vp,
+ std::function on_done,
+ std::function)> on_connected,
+ boost::asio::yield_context yield,
+ std::shared_ptr stats,
+ eth::ForkId fork_id)
+{
+ static auto log = rlp::base::createLogger("test_discovery");
+ ++stats->dialed;
+
+ auto keypair_result = rlpx::crypto::Ecdh::generate_ephemeral_keypair();
+ if (!keypair_result)
+ {
+ ++stats->connect_failed;
+ on_done();
+ return;
+ }
+ const auto& keypair = keypair_result.value();
+
+ const rlpx::SessionConnectParams params{
+ vp.peer.ip,
+ vp.peer.tcp_port,
+ keypair.public_key,
+ keypair.private_key,
+ vp.pubkey,
+ "rlp-test-discovery",
+ 0
+ };
+
+ auto session_result = rlpx::RlpxSession::connect(params, yield);
+ if (!session_result)
+ {
+ ++stats->connect_failed;
+ on_done();
+ return;
+ }
+ auto session = std::move(session_result.value());
+
+ // Send ETH Status (69)
+ {
+ const eth::Hash256 genesis = sepolia_genesis();
+ eth::StatusMessage69 status69{
+ 69,
+ kSepoliaNetworkId,
+ genesis,
+ fork_id,
+ 0,
+ 0,
+ genesis,
+ };
+ eth::StatusMessage status = status69;
+ auto encoded = eth::protocol::encode_status(status);
+ if (encoded)
+ {
+ (void)session->post_message(rlpx::framing::Message{
+ static_cast(kEthOffset + eth::protocol::kStatusMessageId),
+ std::move(encoded.value())
+ });
+ }
+ }
+
+ auto executor = yield.get_executor();
+ auto status_received = std::make_shared>(false);
+ auto status_timeout = std::make_shared(executor);
+ auto lifetime = std::make_shared(executor);
+ auto disconnect_reason = std::make_shared>(
+ static_cast(rlpx::DisconnectReason::kRequested));
+ status_timeout->expires_after(eth::protocol::kStatusHandshakeTimeout);
+ lifetime->expires_after(std::chrono::seconds(10)); // stay connected briefly after handshake
+
+ session->set_disconnect_handler(
+ [lifetime, status_timeout, disconnect_reason]
+ (const rlpx::protocol::DisconnectMessage& msg)
+ {
+ disconnect_reason->store(static_cast(msg.reason));
+ lifetime->cancel();
+ status_timeout->cancel();
+ });
+
+ session->set_ping_handler([session](const rlpx::protocol::PingMessage&) {
+ const rlpx::protocol::PongMessage pong;
+ auto encoded = pong.encode();
+ if (!encoded) { return; }
+ (void)session->post_message(rlpx::framing::Message{
+ rlpx::kPongMessageId,
+ std::move(encoded.value())
+ });
+ });
+
+ const eth::Hash256 genesis = sepolia_genesis();
+ session->set_generic_handler([session, status_received, status_timeout,
+ on_connected, genesis, stats](const rlpx::protocol::Message& msg)
+ {
+ static auto gh_log = rlp::base::createLogger("test_discovery");
+ if (msg.id < kEthOffset) { return; }
+ const auto eth_id = static_cast(msg.id - kEthOffset);
+ if (eth_id != eth::protocol::kStatusMessageId) { return; }
+
+ const rlp::ByteView payload(msg.payload.data(), msg.payload.size());
+ auto decoded = eth::protocol::decode_status(payload);
+ if (!decoded)
+ {
+ status_timeout->cancel();
+ (void)session->disconnect(rlpx::DisconnectReason::kSubprotocolError);
+ return;
+ }
+ auto valid = eth::protocol::validate_status(decoded.value(), kSepoliaNetworkId, genesis);
+ if (!valid)
+ {
+ SPDLOG_LOGGER_DEBUG(gh_log, "ETH Status validation failed: {}",
+ static_cast(valid.error()));
+ ++stats->wrong_chain;
+ status_timeout->cancel();
+ (void)session->disconnect(rlpx::DisconnectReason::kSubprotocolError);
+ return;
+ }
+ ++stats->connected;
+ status_received->store(true);
+ status_timeout->cancel();
+ on_connected(session);
+ });
+
+ boost::system::error_code hs_ec;
+ status_timeout->async_wait(boost::asio::redirect_error(yield, hs_ec));
+
+ if (!status_received->load())
+ {
+ if (hs_ec) // timer was cancelled — peer disconnected us before Status
+ {
+ const auto reason = static_cast(disconnect_reason->load());
+ if (reason == rlpx::DisconnectReason::kTooManyPeers)
+ {
+ ++stats->too_many_peers;
+ }
+ else
+ {
+ ++stats->connect_failed;
+ }
+ }
+ else // timer fired naturally — no Status received within timeout
+ {
+ ++stats->status_timeout;
+ }
+ (void)session->disconnect(rlpx::DisconnectReason::kTimeout);
+ on_done();
+ return;
+ }
+
+ // Stay briefly connected so on_connected can be counted
+ boost::system::error_code lt_ec;
+ lifetime->async_wait(boost::asio::redirect_error(yield, lt_ec));
+ on_done();
+}
+
+// ── main ──────────────────────────────────────────────────────────────────────
+
+int main(int argc, char** argv)
+{
+ int timeout_secs = 180;
+ int min_connections = 3;
+ int min_peers = 3;
+ int max_dials = 16; // target dialed peers (go-ethereum: MaxPeers/dialRatio = 50/3 ≈ 16)
+ // active concurrent attempts = min(target*2, 50) per go-ethereum's freeDialSlots()
+
+ for (int i = 1; i < argc; ++i)
+ {
+ std::string_view arg(argv[i]);
+ if (arg == "--log-level" && i + 1 < argc)
+ {
+ std::string_view lvl(argv[++i]);
+ if (lvl == "debug") spdlog::set_level(spdlog::level::debug);
+ else if (lvl == "info") spdlog::set_level(spdlog::level::info);
+ else if (lvl == "warn") spdlog::set_level(spdlog::level::warn);
+ else if (lvl == "off") spdlog::set_level(spdlog::level::off);
+ }
+ else if (arg == "--timeout" && i + 1 < argc) { timeout_secs = std::atoi(argv[++i]); }
+ else if (arg == "--connections" && i + 1 < argc){ min_connections = std::atoi(argv[++i]); }
+ else if (arg == "--peers" && i + 1 < argc) { min_peers = std::atoi(argv[++i]); }
+ else if (arg == "--dials" && i + 1 < argc) { max_dials = std::atoi(argv[++i]); }
+ }
+
+ // ── Fork hash — loaded from chains.json, fallback to compiled-in value ──────
+ const auto loaded_hash = load_fork_hash( "sepolia", argv[0] );
+ if ( !loaded_hash )
+ {
+ std::cout << "[ WARN ] chains.json not found or missing 'sepolia' key — "
+ "using compiled-in fallback hash.\n";
+ }
+ const eth::ForkId sepolia_fork_id{
+ loaded_hash.value_or( kSepoliaForkHashFallback ),
+ 0
+ };
+
+ TestSuite suite;
+ suite.header(3);
+
+ boost::asio::io_context io;
+
+ // Shared result counters (written only from the single io_context thread)
+ std::atomic peers_count{0};
+ auto stats = std::make_shared();
+
+ // ── discv4 setup ─────────────────────────────────────────────────────────
+ auto keypair_result = rlpx::crypto::Ecdh::generate_ephemeral_keypair();
+ if (!keypair_result)
+ {
+ std::cout << "Failed to generate keypair\n";
+ return 1;
+ }
+ const auto& keypair = keypair_result.value();
+
+ discv4::discv4Config dv4_cfg;
+ dv4_cfg.bind_port = 0;
+ std::copy(keypair.private_key.begin(), keypair.private_key.end(), dv4_cfg.private_key.begin());
+ std::copy(keypair.public_key.begin(), keypair.public_key.end(), dv4_cfg.public_key.begin());
+
+ auto dv4 = std::make_shared(io, dv4_cfg);
+
+ // ── Overall test timeout ─────────────────────────────────────────────────
+ boost::asio::steady_timer deadline(io, std::chrono::seconds(timeout_secs));
+
+ // ── DialScheduler ────────────────────────────────────────────────────────
+ const int kMaxActiveDials = 50;
+ auto pool = std::make_shared(kMaxActiveDials, max_dials * 2);
+
+ auto sched_ref = std::make_shared(nullptr);
+
+ auto scheduler = std::make_shared(io, pool,
+ [&io, &deadline, min_connections, sched_ref, stats, sepolia_fork_id]
+ (discv4::ValidatedPeer vp,
+ std::function on_done,
+ std::function)> on_connected,
+ boost::asio::yield_context yc) mutable
+ {
+ dial_connect_only(vp, std::move(on_done),
+ [on_connected, &io, &deadline, min_connections, sched_ref]
+ (std::shared_ptr s) mutable
+ {
+ on_connected(s); // increments total_validated
+ if (*sched_ref && (*sched_ref)->total_validated >= min_connections)
+ {
+ deadline.cancel();
+ io.stop();
+ }
+ },
+ yc, stats, sepolia_fork_id);
+ });
+ *sched_ref = scheduler.get();
+
+ // Pre-dial ENR chain filter: only enqueue peers whose ENR `eth` entry carries
+ // the correct Sepolia fork hash. Mirrors go-ethereum NewNodeFilter.
+ // Peers with no eth_fork_id (ENR absent or no `eth` entry) are also dropped.
+ scheduler->filter_fn = discv4::make_fork_id_filter( sepolia_fork_id.fork_hash );
+
+ dv4->set_peer_discovered_callback(
+ [scheduler, &peers_count](const discv4::DiscoveredPeer& peer)
+ {
+ discv4::ValidatedPeer vp;
+ vp.peer = peer;
+ std::copy(peer.node_id.begin(), peer.node_id.end(), vp.pubkey.begin());
+ if (!rlpx::crypto::Ecdh::verify_public_key(vp.pubkey)) { return; }
+ ++peers_count;
+ scheduler->enqueue(std::move(vp));
+ });
+
+ dv4->set_error_callback([](const std::string&) {});
+
+ deadline.async_wait([&](boost::system::error_code) {
+ scheduler->stop();
+ dv4->stop();
+ io.stop();
+ });
+
+ // ── Signal handler ───────────────────────────────────────────────────────
+ boost::asio::signal_set signals(io, SIGINT, SIGTERM);
+ signals.async_wait([&](boost::system::error_code, int) {
+ deadline.cancel();
+ scheduler->stop();
+ dv4->stop();
+ io.stop();
+ });
+
+ // ── Seed discovery with Sepolia bootnodes ─────────────────────────────────
+ auto parse_enode = [](const std::string& enode)
+ -> std::optional>
+ {
+ // enode://@:
+ const std::string prefix = "enode://";
+ if (enode.substr(0, prefix.size()) != prefix) { return std::nullopt; }
+ const auto at = enode.find('@', prefix.size());
+ if (at == std::string::npos) { return std::nullopt; }
+ const auto colon = enode.rfind(':');
+ if (colon == std::string::npos || colon < at) { return std::nullopt; }
+ std::string pubkey = enode.substr(prefix.size(), at - prefix.size());
+ std::string host = enode.substr(at + 1, colon - at - 1);
+ uint16_t port = static_cast(std::stoi(enode.substr(colon + 1)));
+ return std::make_tuple(host, port, pubkey);
+ };
+
+ auto hex_to_nibble = [](char c) -> std::optional {
+ if (c >= '0' && c <= '9') return static_cast(c - '0');
+ if (c >= 'a' && c <= 'f') return static_cast(10 + c - 'a');
+ if (c >= 'A' && c <= 'F') return static_cast(10 + c - 'A');
+ return std::nullopt;
+ };
+
+ const auto start_result = dv4->start();
+ if (!start_result)
+ {
+ std::cout << "Failed to start discv4\n";
+ return 1;
+ }
+
+ for (const auto& enode : ETHEREUM_SEPOLIA_BOOTNODES)
+ {
+ auto parsed = parse_enode(enode);
+ if (!parsed) { continue; }
+ const auto& [host, port, pubkey_hex] = *parsed;
+ if (pubkey_hex.size() != 128) { continue; }
+ discv4::NodeId bn_id{};
+ bool ok = true;
+ for (size_t i = 0; i < 64 && ok; ++i)
+ {
+ auto hi = hex_to_nibble(pubkey_hex[i*2]);
+ auto lo = hex_to_nibble(pubkey_hex[i*2+1]);
+ if (!hi || !lo) { ok = false; break; }
+ bn_id[i] = static_cast((*hi << 4) | *lo);
+ }
+ if (!ok) { continue; }
+ std::string host_copy = host;
+ uint16_t port_copy = port;
+ boost::asio::spawn(io,
+ [dv4, host_copy, port_copy, bn_id](boost::asio::yield_context yc)
+ {
+ (void)dv4->find_node(host_copy, port_copy, bn_id, yc);
+ });
+ }
+
+ io.run();
+
+ // ── Dial breakdown ────────────────────────────────────────────────────────
+ std::cout << "\n[ STATS ] Dial breakdown:\n"
+ << " dialed: " << stats->dialed.load() << "\n"
+ << " connect failed: " << stats->connect_failed.load() << "\n"
+ << " wrong chain: " << stats->wrong_chain.load() << "\n"
+ << " too many peers: " << stats->too_many_peers.load() << "\n"
+ << " too many peers (right chain): " << stats->too_many_peers_right_chain.load() << "\n"
+ << " status timeout: " << stats->status_timeout.load() << "\n"
+ << " connected (right chain): " << stats->connected.load() << "\n";
+
+ // ── Results ───────────────────────────────────────────────────────────────
+ const int connections = scheduler->total_validated;
+
+ suite.start("DiscoveryTest.BootnodeBondComplete");
+ // bonds_count: we infer from the fact that peers were discovered (discv4 bonds internally)
+ if (peers_count.load() > 0)
+ suite.pass(std::to_string(peers_count.load()) + " neighbour peer(s) discovered");
+ else
+ suite.fail("No peers discovered — PING→PONG bond may have failed (firewall / UDP 30303?)");
+
+ suite.start("DiscoveryTest.RecursiveDiscovery");
+ if (peers_count.load() >= min_peers)
+ suite.pass(std::to_string(peers_count.load()) + " peer(s) discovered (min=" + std::to_string(min_peers) + ")");
+ else
+ suite.fail("Only " + std::to_string(peers_count.load()) + "/" + std::to_string(min_peers) + " peers discovered");
+
+ suite.start("DiscoveryTest.ActiveSepoliaConnections");
+ if (connections >= min_connections)
+ suite.pass(std::to_string(connections) + " active Sepolia ETH Status connection(s) confirmed");
+ else
+ suite.fail("Only " + std::to_string(connections) + "/" + std::to_string(min_connections)
+ + " Sepolia connection(s) — run with --log-level debug for details");
+
+ suite.footer();
+ // std::exit bypasses stack-variable destructors (including io_context), which avoids
+ // boost::coroutines::detail::forced_unwind being thrown during io cleanup when
+ // active coroutines are present at shutdown (TCP connect, etc.).
+ std::cout.flush();
+ std::exit(suite.failed > 0 ? 1 : 0);
+}
diff --git a/examples/discovery/test_discv5_connect.cpp b/examples/discovery/test_discv5_connect.cpp
new file mode 100644
index 0000000..2850a21
--- /dev/null
+++ b/examples/discovery/test_discv5_connect.cpp
@@ -0,0 +1,845 @@
+// Copyright 2025 GeniusVentures
+// SPDX-License-Identifier: Apache-2.0
+//
+// examples/discovery/test_discv5_connect.cpp
+//
+// Live functional harness:
+// discv5 discovery (Sepolia) -> RLPx ETH Status validation.
+//
+// Pass criterion:
+// connected (right chain) >= --connections (default: 3)
+//
+// Usage:
+// ./test_discv5_connect [--log-level debug] [--timeout 180] [--connections 3] [--dials 16]
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include "../chain_config.hpp"
+
+static constexpr uint64_t kSepoliaNetworkId = 11155111;
+static constexpr uint8_t kEthOffset = 0x10;
+static constexpr uint16_t kSepoliaRlpxPort = 30303;
+
+static std::string pubkey_to_hex(const rlpx::PublicKey& pubkey)
+{
+ static constexpr char kHex[] = "0123456789abcdef";
+ std::string out;
+ out.reserve(pubkey.size() * 2U);
+ for (const uint8_t byte : pubkey)
+ {
+ out.push_back(kHex[(byte >> 4U) & 0x0FU]);
+ out.push_back(kHex[byte & 0x0FU]);
+ }
+ return out;
+}
+
+static eth::Hash256 sepolia_genesis()
+{
+ eth::Hash256 h{};
+ const char* hex = "25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9";
+ for (size_t i = 0; i < 32; ++i)
+ {
+ auto nibble = [](char c) -> uint8_t {
+ if (c >= '0' && c <= '9') { return static_cast(c - '0'); }
+ if (c >= 'a' && c <= 'f') { return static_cast(10 + c - 'a'); }
+ return 0U;
+ };
+ h[i] = static_cast((nibble(hex[i * 2U]) << 4U) | nibble(hex[i * 2U + 1U]));
+ }
+ return h;
+}
+
+// Sepolia post-BPO2 fallback hash — used only when chains.json is not found.
+static const std::array kSepoliaForkHashFallback{ 0x26, 0x89, 0x56, 0xb6 };
+
+struct DialStats
+{
+ std::atomic dialed{0};
+ std::atomic connect_failed{0};
+ std::atomic wrong_chain{0};
+ std::atomic status_timeout{0};
+ std::atomic too_many_peers{0};
+ std::atomic connected{0};
+ std::atomic connected_seeded{0};
+ std::atomic connected_discv5{0};
+ std::atomic filtered_bad_peers{0};
+};
+
+struct QualityFilterState
+{
+ std::unordered_map fail_counts{};
+ std::unordered_map ip_fail_counts{};
+ std::unordered_map port_fail_counts{};
+ std::unordered_map subnet_enqueue_counts{};
+ std::unordered_set blocked_pubkeys{};
+ std::unordered_set blocked_ips{};
+ std::unordered_set blocked_ports{};
+ int block_threshold{2};
+ int ip_block_threshold{3};
+ int port_block_threshold{4};
+ int subnet_enqueue_limit{2};
+};
+
+static bool is_publicly_routable_ip(const std::string& ip)
+{
+ boost::system::error_code ec;
+ const auto addr = boost::asio::ip::make_address(ip, ec);
+ if (ec)
+ {
+ return false;
+ }
+
+ if (addr.is_v4())
+ {
+ const auto b = addr.to_v4().to_bytes();
+ if (b[0] == 0U || b[0] == 10U || b[0] == 127U)
+ {
+ return false;
+ }
+ if (b[0] == 169U && b[1] == 254U)
+ {
+ return false;
+ }
+ if (b[0] == 172U && b[1] >= 16U && b[1] <= 31U)
+ {
+ return false;
+ }
+ if (b[0] == 192U && b[1] == 168U)
+ {
+ return false;
+ }
+ if (b[0] >= 224U)
+ {
+ return false;
+ }
+ return true;
+ }
+
+ const auto v6 = addr.to_v6();
+ return !v6.is_loopback() && !v6.is_link_local() && !v6.is_multicast() && !v6.is_unspecified();
+}
+
+static std::optional subnet_key_v4_24(const std::string& ip)
+{
+ boost::system::error_code ec;
+ const auto addr = boost::asio::ip::make_address(ip, ec);
+ if (ec || !addr.is_v4())
+ {
+ return std::nullopt;
+ }
+
+ const auto b = addr.to_v4().to_bytes();
+ return std::to_string(b[0]) + "." + std::to_string(b[1]) + "." + std::to_string(b[2]);
+}
+
+static bool is_candidate_blocked(
+ const discv4::ValidatedPeer& vp,
+ const std::shared_ptr& quality)
+{
+ const std::string pubkey = pubkey_to_hex(vp.pubkey);
+ if (quality->blocked_pubkeys.find(pubkey) != quality->blocked_pubkeys.end())
+ {
+ return true;
+ }
+
+ if (quality->blocked_ips.find(vp.peer.ip) != quality->blocked_ips.end())
+ {
+ return true;
+ }
+
+ if (quality->blocked_ports.find(vp.peer.tcp_port) != quality->blocked_ports.end())
+ {
+ return true;
+ }
+
+ return false;
+}
+
+static void dial_connect_only(
+ discv4::ValidatedPeer vp,
+ std::function on_done,
+ std::function)> on_connected,
+ boost::asio::yield_context yield,
+ std::shared_ptr stats,
+ std::shared_ptr quality,
+ eth::ForkId fork_id)
+{
+ ++stats->dialed;
+
+ auto keypair_result = rlpx::crypto::Ecdh::generate_ephemeral_keypair();
+ if (!keypair_result)
+ {
+ ++stats->connect_failed;
+ on_done();
+ return;
+ }
+ const auto& keypair = keypair_result.value();
+
+ const rlpx::SessionConnectParams params{
+ vp.peer.ip,
+ vp.peer.tcp_port,
+ keypair.public_key,
+ keypair.private_key,
+ vp.pubkey,
+ "rlp-test-discv5-connect",
+ 0
+ };
+
+ auto session_result = rlpx::RlpxSession::connect(params, yield);
+ if (!session_result)
+ {
+ const std::string key = pubkey_to_hex(vp.pubkey);
+ const int fails = ++quality->fail_counts[key];
+ if (fails >= quality->block_threshold)
+ {
+ quality->blocked_pubkeys.insert(key);
+ }
+
+ const int ip_fails = ++quality->ip_fail_counts[vp.peer.ip];
+ if (ip_fails >= quality->ip_block_threshold)
+ {
+ quality->blocked_ips.insert(vp.peer.ip);
+ }
+
+ const int port_fails = ++quality->port_fail_counts[vp.peer.tcp_port];
+ if (port_fails >= quality->port_block_threshold)
+ {
+ quality->blocked_ports.insert(vp.peer.tcp_port);
+ }
+
+ ++stats->connect_failed;
+ on_done();
+ return;
+ }
+ auto session = std::move(session_result.value());
+
+ {
+ const eth::Hash256 genesis = sepolia_genesis();
+ eth::StatusMessage69 status69{
+ 69,
+ kSepoliaNetworkId,
+ genesis,
+ fork_id,
+ 0,
+ 0,
+ genesis,
+ };
+ eth::StatusMessage status = status69;
+ auto encoded = eth::protocol::encode_status(status);
+ if (encoded)
+ {
+ (void)session->post_message(rlpx::framing::Message{
+ static_cast(kEthOffset + eth::protocol::kStatusMessageId),
+ std::move(encoded.value())
+ });
+ }
+ }
+
+ auto executor = yield.get_executor();
+ auto status_received = std::make_shared>(false);
+ auto status_timeout = std::make_shared(executor);
+ auto lifetime = std::make_shared(executor);
+ auto disconnect_reason = std::make_shared>(
+ static_cast(rlpx::DisconnectReason::kRequested));
+ status_timeout->expires_after(eth::protocol::kStatusHandshakeTimeout);
+ lifetime->expires_after(std::chrono::seconds(10));
+
+ session->set_disconnect_handler(
+ [lifetime, status_timeout, disconnect_reason](const rlpx::protocol::DisconnectMessage& msg)
+ {
+ disconnect_reason->store(static_cast(msg.reason));
+ lifetime->cancel();
+ status_timeout->cancel();
+ });
+
+ session->set_ping_handler([session](const rlpx::protocol::PingMessage&)
+ {
+ const rlpx::protocol::PongMessage pong;
+ auto encoded = pong.encode();
+ if (!encoded)
+ {
+ return;
+ }
+
+ (void)session->post_message(rlpx::framing::Message{
+ rlpx::kPongMessageId,
+ std::move(encoded.value())
+ });
+ });
+
+ const eth::Hash256 genesis = sepolia_genesis();
+ session->set_generic_handler([session, status_received, status_timeout,
+ on_connected, genesis, stats](const rlpx::protocol::Message& msg)
+ {
+ if (msg.id < kEthOffset)
+ {
+ return;
+ }
+
+ const auto eth_id = static_cast(msg.id - kEthOffset);
+ if (eth_id != eth::protocol::kStatusMessageId)
+ {
+ return;
+ }
+
+ const rlp::ByteView payload(msg.payload.data(), msg.payload.size());
+ auto decoded = eth::protocol::decode_status(payload);
+ if (!decoded)
+ {
+ status_timeout->cancel();
+ (void)session->disconnect(rlpx::DisconnectReason::kSubprotocolError);
+ return;
+ }
+
+ auto valid = eth::protocol::validate_status(decoded.value(), kSepoliaNetworkId, genesis);
+ if (!valid)
+ {
+ ++stats->wrong_chain;
+ status_timeout->cancel();
+ (void)session->disconnect(rlpx::DisconnectReason::kSubprotocolError);
+ return;
+ }
+
+ ++stats->connected;
+ status_received->store(true);
+ status_timeout->cancel();
+ on_connected(session);
+ });
+
+ boost::system::error_code hs_ec;
+ status_timeout->async_wait(boost::asio::redirect_error(yield, hs_ec));
+
+ if (!status_received->load())
+ {
+ if (hs_ec)
+ {
+ const auto reason = static_cast(disconnect_reason->load());
+ if (reason == rlpx::DisconnectReason::kTooManyPeers)
+ {
+ ++stats->too_many_peers;
+ }
+ else
+ {
+ ++stats->connect_failed;
+ }
+ }
+ else
+ {
+ ++stats->status_timeout;
+ }
+
+ (void)session->disconnect(rlpx::DisconnectReason::kTimeout);
+ on_done();
+ return;
+ }
+
+ boost::system::error_code lt_ec;
+ lifetime->async_wait(boost::asio::redirect_error(yield, lt_ec));
+ on_done();
+}
+
+int main(int argc, char** argv)
+{
+ int timeout_secs = 180;
+ int min_connections = 3;
+ int max_dials = 16;
+ bool enable_seeded = false;
+ bool require_fork = true;
+ bool enqueue_bootstrap_candidates = false;
+
+ for (int i = 1; i < argc; ++i)
+ {
+ std::string_view arg(argv[i]);
+ if (arg == "--log-level" && i + 1 < argc)
+ {
+ std::string_view lvl(argv[++i]);
+ if (lvl == "trace") { spdlog::set_level(spdlog::level::trace); }
+ else if (lvl == "debug") { spdlog::set_level(spdlog::level::debug); }
+ else if (lvl == "info") { spdlog::set_level(spdlog::level::info); }
+ else if (lvl == "warn") { spdlog::set_level(spdlog::level::warn); }
+ else if (lvl == "off") { spdlog::set_level(spdlog::level::off); }
+ }
+ else if (arg == "--timeout" && i + 1 < argc)
+ {
+ timeout_secs = std::atoi(argv[++i]);
+ }
+ else if (arg == "--connections" && i + 1 < argc)
+ {
+ min_connections = std::atoi(argv[++i]);
+ }
+ else if (arg == "--dials" && i + 1 < argc)
+ {
+ max_dials = std::atoi(argv[++i]);
+ }
+ else if (arg == "--seeded" && i + 1 < argc)
+ {
+ const std::string_view mode(argv[++i]);
+ if (mode == "on")
+ {
+ enable_seeded = true;
+ }
+ else if (mode == "off")
+ {
+ enable_seeded = false;
+ }
+ else
+ {
+ std::cout << "Invalid --seeded value (use on|off)\n";
+ return 1;
+ }
+ }
+ else if (arg == "--require-fork" && i + 1 < argc)
+ {
+ const std::string_view mode(argv[++i]);
+ if (mode == "on")
+ {
+ require_fork = true;
+ }
+ else if (mode == "off")
+ {
+ require_fork = false;
+ }
+ else
+ {
+ std::cout << "Invalid --require-fork value (use on|off)\n";
+ return 1;
+ }
+ }
+ else if (arg == "--enqueue-bootstrap-candidates" && i + 1 < argc)
+ {
+ const std::string_view mode(argv[++i]);
+ if (mode == "on")
+ {
+ enqueue_bootstrap_candidates = true;
+ }
+ else if (mode == "off")
+ {
+ enqueue_bootstrap_candidates = false;
+ }
+ else
+ {
+ std::cout << "Invalid --enqueue-bootstrap-candidates value (use on|off)\n";
+ return 1;
+ }
+ }
+ }
+
+ const auto loaded_hash = load_fork_hash("sepolia", argv[0]);
+ if (!loaded_hash)
+ {
+ std::cout << "[ WARN ] chains.json not found or missing 'sepolia' key — using compiled-in fallback hash.\n";
+ }
+
+ const std::array sepolia_hash = loaded_hash.value_or(kSepoliaForkHashFallback);
+ const eth::ForkId sepolia_fork_id{ sepolia_hash, 0U };
+ const discovery::ForkId sepolia_discovery_fork_id{ sepolia_hash, 0U };
+
+ boost::asio::io_context io;
+
+ std::atomic discovered_peers{0};
+ std::atomic discovered_candidates{0};
+ auto stats = std::make_shared();
+
+ auto keypair_result = rlpx::crypto::Ecdh::generate_ephemeral_keypair();
+ if (!keypair_result)
+ {
+ std::cout << "Failed to generate keypair\n";
+ return 1;
+ }
+
+ discv5::discv5Config dv5_cfg;
+ dv5_cfg.bind_port = 0;
+ dv5_cfg.query_interval_sec = 10U;
+ if (require_fork)
+ {
+ dv5_cfg.required_fork_id = sepolia_discovery_fork_id;
+ }
+ std::copy(
+ keypair_result.value().private_key.begin(),
+ keypair_result.value().private_key.end(),
+ dv5_cfg.private_key.begin());
+ std::copy(
+ keypair_result.value().public_key.begin(),
+ keypair_result.value().public_key.end(),
+ dv5_cfg.public_key.begin());
+
+ auto source = discv5::ChainBootnodeRegistry::for_chain(discv5::ChainId::kEthereumSepolia);
+ if (!source)
+ {
+ std::cout << "Failed to load Sepolia discv5 bootnode source\n";
+ return 1;
+ }
+
+ dv5_cfg.bootstrap_enrs = source->fetch();
+ if (dv5_cfg.bootstrap_enrs.empty())
+ {
+ std::cout << "No discv5 Sepolia bootnodes configured\n";
+ return 1;
+ }
+
+ auto dv5 = std::make_shared(io, dv5_cfg);
+
+ const int kMaxActiveDials = 50;
+ auto pool = std::make_shared(kMaxActiveDials, max_dials * 2);
+ auto sched_ref = std::make_shared(nullptr);
+ auto seeded_pubkeys = std::make_shared>();
+ auto quality = std::make_shared();
+
+ boost::asio::steady_timer deadline(io, std::chrono::seconds(timeout_secs));
+
+ auto scheduler = std::make_shared(
+ io,
+ pool,
+ [&io, &deadline, min_connections, sched_ref, stats, seeded_pubkeys, quality, sepolia_fork_id]
+ (discv4::ValidatedPeer vp,
+ std::function on_done,
+ std::function)> on_connected,
+ boost::asio::yield_context yc) mutable
+ {
+ dial_connect_only(
+ vp,
+ std::move(on_done),
+ [on_connected, &io, &deadline, min_connections, sched_ref, stats, seeded_pubkeys]
+ (std::shared_ptr s) mutable
+ {
+ const std::string remote_pubkey_hex = pubkey_to_hex(s->peer_info().public_key);
+ if (seeded_pubkeys->find(remote_pubkey_hex) != seeded_pubkeys->end())
+ {
+ ++stats->connected_seeded;
+ }
+ else
+ {
+ ++stats->connected_discv5;
+ }
+ on_connected(s);
+ if (*sched_ref && (*sched_ref)->total_validated >= min_connections)
+ {
+ deadline.cancel();
+ io.stop();
+ }
+ },
+ yc,
+ stats,
+ quality,
+ sepolia_fork_id);
+ });
+ *sched_ref = scheduler.get();
+
+ auto parse_enode = [](const std::string& enode)
+ -> std::optional>
+ {
+ const std::string prefix = "enode://";
+ if (enode.substr(0, prefix.size()) != prefix)
+ {
+ return std::nullopt;
+ }
+ const auto at = enode.find('@', prefix.size());
+ if (at == std::string::npos)
+ {
+ return std::nullopt;
+ }
+ const auto colon = enode.rfind(':');
+ if (colon == std::string::npos || colon < at)
+ {
+ return std::nullopt;
+ }
+
+ std::string pubkey = enode.substr(prefix.size(), at - prefix.size());
+ std::string host = enode.substr(at + 1, colon - at - 1);
+ uint16_t port = static_cast(std::stoi(enode.substr(colon + 1)));
+ return std::make_tuple(host, port, pubkey);
+ };
+
+ auto hex_to_nibble = [](char c) -> std::optional
+ {
+ if (c >= '0' && c <= '9') { return static_cast(c - '0'); }
+ if (c >= 'a' && c <= 'f') { return static_cast(10 + c - 'a'); }
+ if (c >= 'A' && c <= 'F') { return static_cast(10 + c - 'A'); }
+ return std::nullopt;
+ };
+
+ if (enable_seeded)
+ {
+ for (const auto& enode : ETHEREUM_SEPOLIA_BOOTNODES)
+ {
+ auto parsed = parse_enode(enode);
+ if (!parsed)
+ {
+ continue;
+ }
+
+ const auto& [host, port, pubkey_hex] = *parsed;
+ if (port != kSepoliaRlpxPort || pubkey_hex.size() != 128U)
+ {
+ continue;
+ }
+
+ discv4::ValidatedPeer vp;
+ vp.peer.ip = host;
+ vp.peer.udp_port = port;
+ vp.peer.tcp_port = port;
+ vp.peer.last_seen = std::chrono::steady_clock::now();
+
+ bool ok = true;
+ for (size_t i = 0; i < vp.pubkey.size() && ok; ++i)
+ {
+ auto hi = hex_to_nibble(pubkey_hex[i * 2U]);
+ auto lo = hex_to_nibble(pubkey_hex[i * 2U + 1U]);
+ if (!hi || !lo)
+ {
+ ok = false;
+ break;
+ }
+ vp.pubkey[i] = static_cast((*hi << 4U) | *lo);
+ vp.peer.node_id[i] = vp.pubkey[i];
+ }
+
+ if (!ok || !rlpx::crypto::Ecdh::verify_public_key(vp.pubkey))
+ {
+ continue;
+ }
+
+ if (is_candidate_blocked(vp, quality))
+ {
+ ++stats->filtered_bad_peers;
+ continue;
+ }
+
+ seeded_pubkeys->insert(pubkey_to_hex(vp.pubkey));
+ scheduler->enqueue(std::move(vp));
+ }
+ }
+
+ dv5->set_peer_discovered_callback(
+ [scheduler, stats, quality, &discovered_peers, &discovered_candidates, sepolia_hash, require_fork]
+ (const discovery::ValidatedPeer& peer)
+ {
+ ++discovered_candidates;
+
+ if (peer.tcp_port == 0)
+ {
+ return;
+ }
+
+ if (require_fork && (!peer.eth_fork_id.has_value() || peer.eth_fork_id.value().hash != sepolia_hash))
+ {
+ return;
+ }
+
+ discv4::ValidatedPeer vp;
+ vp.peer.node_id = peer.node_id;
+ vp.peer.ip = peer.ip;
+ vp.peer.udp_port = peer.udp_port;
+ vp.peer.tcp_port = peer.tcp_port;
+ vp.peer.last_seen = peer.last_seen;
+ if (peer.eth_fork_id.has_value())
+ {
+ vp.peer.eth_fork_id = discv4::ForkId{
+ peer.eth_fork_id.value().hash,
+ peer.eth_fork_id.value().next
+ };
+ }
+
+ vp.pubkey = peer.node_id;
+ if (!rlpx::crypto::Ecdh::verify_public_key(vp.pubkey))
+ {
+ return;
+ }
+
+ if (!is_publicly_routable_ip(vp.peer.ip))
+ {
+ ++stats->filtered_bad_peers;
+ return;
+ }
+
+ if (is_candidate_blocked(vp, quality))
+ {
+ ++stats->filtered_bad_peers;
+ return;
+ }
+
+ const auto subnet_key = subnet_key_v4_24(vp.peer.ip);
+ if (subnet_key.has_value())
+ {
+ const int queued = quality->subnet_enqueue_counts[subnet_key.value()];
+ if (queued >= quality->subnet_enqueue_limit)
+ {
+ ++stats->filtered_bad_peers;
+ return;
+ }
+ ++quality->subnet_enqueue_counts[subnet_key.value()];
+ }
+
+ ++discovered_peers;
+ scheduler->enqueue(std::move(vp));
+ });
+
+ dv5->set_error_callback([](const std::string& msg)
+ {
+ std::cout << "discv5 error: " << msg << "\n";
+ });
+
+ if (enqueue_bootstrap_candidates)
+ {
+ for (const auto& enr_uri : dv5_cfg.bootstrap_enrs)
+ {
+ auto record_result = discv5::EnrParser::parse(enr_uri);
+ if (!record_result)
+ {
+ continue;
+ }
+
+ auto peer_result = discv5::EnrParser::to_validated_peer(record_result.value());
+ if (!peer_result)
+ {
+ continue;
+ }
+
+ if (peer_result.value().tcp_port == 0)
+ {
+ continue;
+ }
+
+ discv4::ValidatedPeer vp;
+ vp.peer.node_id = peer_result.value().node_id;
+ vp.peer.ip = peer_result.value().ip;
+ vp.peer.udp_port = peer_result.value().udp_port;
+ vp.peer.tcp_port = peer_result.value().tcp_port;
+ vp.peer.last_seen = peer_result.value().last_seen;
+ if (peer_result.value().eth_fork_id.has_value())
+ {
+ vp.peer.eth_fork_id = discv4::ForkId{
+ peer_result.value().eth_fork_id.value().hash,
+ peer_result.value().eth_fork_id.value().next
+ };
+ }
+
+ vp.pubkey = peer_result.value().node_id;
+ if (!rlpx::crypto::Ecdh::verify_public_key(vp.pubkey))
+ {
+ continue;
+ }
+
+ if (!is_publicly_routable_ip(vp.peer.ip))
+ {
+ ++stats->filtered_bad_peers;
+ continue;
+ }
+
+ if (is_candidate_blocked(vp, quality))
+ {
+ ++stats->filtered_bad_peers;
+ continue;
+ }
+
+ const auto subnet_key = subnet_key_v4_24(vp.peer.ip);
+ if (subnet_key.has_value())
+ {
+ const int queued = quality->subnet_enqueue_counts[subnet_key.value()];
+ if (queued >= quality->subnet_enqueue_limit)
+ {
+ ++stats->filtered_bad_peers;
+ continue;
+ }
+ ++quality->subnet_enqueue_counts[subnet_key.value()];
+ }
+
+ scheduler->enqueue(std::move(vp));
+ }
+ }
+
+ deadline.async_wait([&](boost::system::error_code)
+ {
+ scheduler->stop();
+ dv5->stop();
+ io.stop();
+ });
+
+ boost::asio::signal_set signals(io, SIGINT, SIGTERM);
+ signals.async_wait([&](boost::system::error_code, int)
+ {
+ deadline.cancel();
+ scheduler->stop();
+ dv5->stop();
+ io.stop();
+ });
+
+ const auto start_result = dv5->start();
+ if (!start_result)
+ {
+ std::cout << "Failed to start discv5 client\n";
+ return 1;
+ }
+
+ std::cout << "Running discv5 Sepolia discovery + connect harness (seeded="
+ << (enable_seeded ? "on" : "off")
+ << ", require-fork=" << (require_fork ? "on" : "off")
+ << ", enqueue-bootstrap-candidates=" << (enqueue_bootstrap_candidates ? "on" : "off")
+ << ")...\n";
+ io.run();
+
+ std::cout << "\n[ STATS ] Dial breakdown:\n"
+ << " dialed: " << stats->dialed.load() << "\n"
+ << " connect failed: " << stats->connect_failed.load() << "\n"
+ << " wrong chain: " << stats->wrong_chain.load() << "\n"
+ << " too many peers: " << stats->too_many_peers.load() << "\n"
+ << " status timeout: " << stats->status_timeout.load() << "\n"
+ << " connected (right chain): " << stats->connected.load() << "\n"
+ << " connected (seeded): " << stats->connected_seeded.load() << "\n"
+ << " connected (discv5): " << stats->connected_discv5.load() << "\n"
+ << " filtered bad peers: " << stats->filtered_bad_peers.load() << "\n"
+ << " candidates seen: " << discovered_candidates.load() << "\n"
+ << " discovered peers: " << discovered_peers.load() << "\n";
+
+ const int connections = scheduler->total_validated;
+ if (connections >= min_connections)
+ {
+ std::cout << "\n[ OK ] Discv5ConnectHarness.ActiveSepoliaConnections\n"
+ << " " << connections << " active Sepolia ETH Status connection(s) confirmed\n\n";
+ std::cout.flush();
+ std::exit(0);
+ }
+
+ std::cout << "\n[ FAILED ] Discv5ConnectHarness.ActiveSepoliaConnections\n"
+ << " Only " << connections << "/" << min_connections
+ << " Sepolia connection(s) — run with --log-level debug for details\n\n";
+ std::cout.flush();
+ std::exit(1);
+}
+
diff --git a/examples/discovery/test_enr_survey.cpp b/examples/discovery/test_enr_survey.cpp
new file mode 100644
index 0000000..5aaf597
--- /dev/null
+++ b/examples/discovery/test_enr_survey.cpp
@@ -0,0 +1,260 @@
+// Copyright 2025 GeniusVentures
+// SPDX-License-Identifier: Apache-2.0
+//
+// examples/discovery/test_enr_survey.cpp
+//
+// Diagnostic live test: run discv4 discovery with **no pre-dial filter**, collect
+// every DiscoveredPeer produced by the ENR-enrichment path, and at the end print
+// a frequency table of the eth fork-hashes actually seen in live ENR responses.
+//
+// This intentionally does zero dialing / RLPx — its only purpose is to determine:
+// 1. Whether request_enr() is successfully completing for live Sepolia peers.
+// 2. Which fork-hash bytes actually appear in live ENR `eth` entries.
+// 3. Whether the Sepolia fork-hash used by make_fork_id_filter() is correct.
+//
+// Usage:
+// ./test_enr_survey [--log-level debug] [--timeout 60]
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include